Ejemplo n.º 1
0
GPU_PERF_TEST(AlphaComp, cv::gpu::DeviceInfo, cv::Size, MatType, AlphaOp)
{
    cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
    cv::gpu::setDevice(devInfo.deviceID());

    cv::Size size = GET_PARAM(1);
    int type = GET_PARAM(2);
    int alpha_op = GET_PARAM(3);

    cv::Mat img1_host(size, type);
    fill(img1_host, 0, 255);

    cv::Mat img2_host(size, type);
    fill(img2_host, 0, 255);

    cv::gpu::GpuMat img1(img1_host);
    cv::gpu::GpuMat img2(img2_host);
    cv::gpu::GpuMat dst;

    cv::gpu::alphaComp(img1, img2, dst, alpha_op);

    TEST_CYCLE()
    {
        cv::gpu::alphaComp(img1, img2, dst, alpha_op);
    }
}
Ejemplo n.º 2
0
// Compare images:
bool KOFilter::imageCompare(const QString &dir1File, const QString &dir2File)
{
    // Check src file:
    QFileInfo fi1(dir1File);

    // Check dst file:
    QFileInfo fi2(dir2File);

    // Check files:
    if (!fi1.exists() || !fi2.exists())
        return false;

    QImage img1(dir1File);
    QImage img2(dir2File);
    if (img1.size() != img2.size())
        return false;

    for (int j=0; j<img1.height(); j++)
        for (int i=0; i<img1.width(); i++)
        {
            QRgb color1 = img1.pixel(i, j);
            QRgb color2 = img2.pixel(i, j);
            if (color1 != color2)
                return false;
        }

    return true;
}
Ejemplo n.º 3
0
GPU_PERF_TEST(BlendLinear, cv::gpu::DeviceInfo, cv::Size, MatType)
{
    cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
    cv::gpu::setDevice(devInfo.deviceID());

    cv::Size size = GET_PARAM(1);
    int type = GET_PARAM(2);

    cv::Mat img1_host(size, type);
    fill(img1_host, 0, 255);

    cv::Mat img2_host(size, type);
    fill(img2_host, 0, 255);

    cv::gpu::GpuMat img1(img1_host);
    cv::gpu::GpuMat img2(img2_host);
    cv::gpu::GpuMat weights1(size, CV_32FC1, cv::Scalar::all(0.5));
    cv::gpu::GpuMat weights2(size, CV_32FC1, cv::Scalar::all(0.5));
    cv::gpu::GpuMat dst;

    cv::gpu::blendLinear(img1, img2, weights1, weights2, dst);

    TEST_CYCLE()
    {
        cv::gpu::blendLinear(img1, img2, weights1, weights2, dst);
    }
}
Ejemplo n.º 4
0
TEST_F(for_each_channel_accumulate_test, should_throw_when_image_dimensions_dont_match)
{
    rgb16_image_t img1(1, 2), img2(1, 3), img3(2, 2);

    auto zero = [](channel16_t, channel16_t) { return 0; };
    ASSERT_THROW(for_each_channel_accumulate(const_view(img1), const_view(img2), 0, zero), std::invalid_argument);
    ASSERT_THROW(for_each_channel_accumulate(const_view(img1), const_view(img3), 0, zero), std::invalid_argument);
}
Ejemplo n.º 5
0
int main(int argc, const char* argv[])
{
	cv::gpu::GpuMat img1(cv::imread("../../Pictures/move1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); 
	cv::gpu::GpuMat img2(cv::imread("../../Pictures/move2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); 

	// Value for keypoints
	std::vector< cv::KeyPoint> keypoints1, keypoints2;
	cv::gpu::GpuMat keypoints1GPU, keypoints2GPU; 
	// Value for descriptor
	std::vector< float> descriptors1, descriptors2;
	cv::gpu::GpuMat descriptors1GPU, descriptors2GPU; 
	// orb detector and extractor
	cv::gpu::ORB_GPU orb(2000);
	// Create Object of DescriptorMatcher
	cv::gpu::BruteForceMatcher_GPU< cv::Hamming > matcher;
	// values of how maching
	std::vector< std::vector< cv::DMatch> > matches; 

	cv::TickMeter gpumeter;
	gpumeter.start();

	// get feauters
	orb(img1, cv::gpu::GpuMat(), keypoints1GPU, descriptors1GPU);
	orb(img2, cv::gpu::GpuMat(), keypoints2GPU, descriptors2GPU);

	// matching
	matcher.knnMatch(descriptors1GPU, descriptors2GPU, matches, 2);
	// threshold 
	std::vector< cv::DMatch > good_matches;
	for(int k = 0; k < std::min(descriptors1GPU.rows-1,(int) matches.size()); k++) 
	{
		if((matches[k][0].distance < 0.6*(matches[k][1].distance)) && ((int) matches[k].size()<=2 && (int) matches[k].size()>0))
		{
			good_matches.push_back(matches[k][0]);
		}
	}    

	gpumeter.stop();

	// show result
	std::cout << "ORB (GPU): " << gpumeter.getTimeMilli() << "ms" << std::endl;
	std::cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << std::endl; 
	std::cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << std::endl; 
	orb.downloadKeyPoints(keypoints1GPU, keypoints1);
	orb.downloadKeyPoints(keypoints2GPU, keypoints2);
	std::cout << "keypoint1 :" << keypoints1.size() << "  keypoint2 :" << keypoints2.size() << std::endl;

	cv::Mat img_matches; 
	cv::Mat img1_cpu, img2_cpu;
	img1.download(img1_cpu);
	img2.download(img2_cpu);
	cv::drawMatches(img1_cpu, keypoints1, img2_cpu, keypoints2, good_matches, img_matches);
	cv::namedWindow("matches", 0);
	cv::imshow("matches", img_matches);
	cv::waitKey(0);

	return 0;
}
Ejemplo n.º 6
0
// Main procedure
//---------------
int main(int argc,char **argv) {

  // Read and check command line parameters.
  cimg_usage("Compute a linear fading between two 2D images");
  const char *file_i1 = cimg_option("-i1",cimg_imagepath "sh0r.pgm","Input Image 1");
  const char *file_i2 = cimg_option("-i2",cimg_imagepath "milla.bmp","Input Image 2");
  const char *file_o  = cimg_option("-o",(char*)0,"Output Image");
  const bool visu     = cimg_option("-visu",true,"Visualization mode");
  const double pmin   = cimg_option("-min",40.0,"Begin of the fade (in %)")/100.0;
  const double pmax   = cimg_option("-max",60.0,"End of the fade (in %)")/100.0;
  const double angle  = cimg_option("-angle",0.0,"Fade angle")*cil::cimg::PI/180;

  // Init images.
  cil::CImg<unsigned char> img1(file_i1), img2(file_i2);
  if (!img2.is_sameXYZC(img1)) {
    int
      dx = std::max(img1.width(),img2.width()),
      dy = std::max(img1.height(),img2.height()),
      dz = std::max(img1.depth(),img2.depth()),
      dv = std::max(img1.spectrum(),img2.spectrum());
    img1.resize(dx,dy,dz,dv,3);
    img2.resize(dx,dy,dz,dv,3);
  }
  cil::CImg<unsigned char> dest(img1);

  // Compute the faded image.
  const double ca = std::cos(angle), sa = std::sin(angle);
  double alpha;
  cimg_forXYZC(dest,x,y,z,k) {
    const double X = ((double)x/img1.width() - 0.5)*ca + ((double)y/img1.height() - 0.5)*sa;
    if (X + 0.5<pmin) alpha = 0; else {
      if (X + 0.5>pmax) alpha = 1; else
        alpha = (X + 0.5 - pmin)/(pmax - pmin);
    }
    dest(x,y,z,k) = (unsigned char)((1 - alpha)*img1(x,y,z,k) + alpha*img2(x,y,z,k));
  }

  // Save and exit
  if (file_o) dest.save(file_o);
  if (visu) dest.display("Image fading");
  return 0;
}
Ejemplo n.º 7
0
void
PNGTests::testWriter() {
  static const int width  = 256;
  static const int height = 256;

  // create an image and fill it with random data
  auto_ptr<Image> image(CreateImage(width, height, PF_R8G8B8A8));
  setRandomBytes((byte*)image->getPixels(), width * height * 4);

  // generate filename
  char* filename = tmpnam(0);
  CPPUNIT_ASSERT_MESSAGE("opening temporary file", filename != 0);

  // save image
  CPPUNIT_ASSERT(SaveImage(filename, FF_PNG, image.get()) == true);

  // load it back
  auto_ptr<Image> img2(OpenImage(filename, PF_R8G8B8A8));
  CPPUNIT_ASSERT_MESSAGE("reloading image file", img2.get() != 0);

  AssertImagesEqual(
    "comparing saved with loaded",
    image.get(),
    img2.get());

  // force pixel format conversion (don't destroy the old image)
  auto_ptr<Image> img3(OpenImage(filename, PF_R8G8B8));
  CPPUNIT_ASSERT(SaveImage(filename, FF_PNG, img3.get()) == true);

  remove(filename);


  //== PALETTIZED SAVING TEST ==
  // disabled until loading palettized PNGs with a correct palette format
  // is implemented.
#if 0
  char* plt_filename = tmpnam(0);
  CPPUNIT_ASSERT_MESSAGE("opening temporary file (palette)", plt_filename != 0);
  auto_ptr<Image> plt(CreateImage(256, 256, PF_I8, 256, PF_R8G8B8));
  setRandomBytes((byte*)plt->getPixels(), 256 * 256);
  setRandomBytes((byte*)plt->getPalette(), 256);

  CPPUNIT_ASSERT(SaveImage(plt_filename, FF_PNG, plt.get()) == true);

  auto_ptr<Image> plt2(OpenImage(plt_filename, FF_PNG));
  CPPUNIT_ASSERT_MESSAGE("reloading palettized image", plt2.get() != 0);
  CPPUNIT_ASSERT(plt2->getPaletteSize() == 256);
  CPPUNIT_ASSERT(plt2->getPaletteFormat() == PF_R8G8B8);
  CPPUNIT_ASSERT(plt2->getFormat() == PF_I8);
  AssertImagesEqual("Comparing palettized image", plt.get(), plt2.get());

  remove(plt_filename);
#endif
}
Ejemplo n.º 8
0
void testImage2Grey()
{
    Image2Grey img(2,2);
    img.setPixel(0,0,0);
    img.setPixel(255,1,0);
    img.setPixel(0,0,1);
    img.setPixel(255,1,1);
    img.save("testSave1.pgm");

    Image2Grey img2("testSave1.pgm");
    img2.save("testSave2.pgm");
}
Ejemplo n.º 9
0
TEST_F(for_each_channel_accumulate_test, should_include_the_initial_value)
{
    rgb16_image_t img1(1, 1), img2(1, 1);

    view(img1)(0, 0) = { 1, 0, 0 };
    view(img2)(0, 0) = { 1, 0, 0 };

    ASSERT_EQ(2 + 8, for_each_channel_accumulate(const_view(img1), const_view(img2), 8, [&](channel16_t p1, channel16_t p2)
    {
        return p1 + p2;
    }));
}
Ejemplo n.º 10
0
static void callback_method(MknnKmeansAlgorithm *kmeans, int64_t num_iteration,
bool is_last_iteration, void *state_pointer) {
	//if (!is_last_iteration)
	//return;
	State *state = reinterpret_cast<State*>(state_pointer);
	int64_t *assign = mknn_kmeans_getAssignations(kmeans, false);
	MknnDataset *centers = mknn_kmeans_getCentroids(kmeans, false);
	cv::Mat img2(state->image_size, state->image_size, CV_8UC3);
	cv::rectangle(img2, cv::Point(0, 0), cv::Point(img2.cols, img2.rows),
			cv::Scalar(0, 0, 66), -1);
	for (int i = 0; i < mknn_dataset_getNumObjects(state->dataset); ++i) {
		int *vec = reinterpret_cast<int*>(mknn_dataset_getObject(state->dataset,
				i));
		double val = (180.0 * assign[i]) / mknn_dataset_getNumObjects(centers);
		cv::Scalar col = cv::Scalar(std::round(val), 255, 255);
		if (assign[i] < 0)
			col = cv::Scalar(0, 0, 200);
		cv::Point point = cv::Point(vec[0], vec[1]);
		cv::circle(img2, point, 2, col, -1);
		if (false) {
			std::string ss = my::toString::intValue(i);
			int fontFace = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
			double fontScale = 0.3;
			int thickness = 1;
			cv::putText(img2, ss, point, fontFace, fontScale, col, thickness);
		}
		if (false && i > 0) {
			int *pvec = reinterpret_cast<int*>(mknn_dataset_getObject(
					state->dataset, i - 1));
			cv::Point ppoint = cv::Point(pvec[0], pvec[1]);
			cv::line(img2, ppoint, point, cv::Scalar(30, 100, 200), 1);
		}
	}
	for (int i = 0; i < mknn_dataset_getNumObjects(centers); ++i) {
		float *vec =
				reinterpret_cast<float*>(mknn_dataset_getObject(centers, i));
		cv::Scalar col(30, 220, 255);
		cv::Point point = cv::Point(vec[0], vec[1]);
		cv::circle(img2, point, 5, col, -1);
		std::string ss = my::toString::intValue(i);
		int fontFace = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
		double fontScale = 0.5;
		int thickness = 1;
		cv::putText(img2, ss, point, fontFace, fontScale, col, thickness);
	}
	cv::Mat img3;
	cv::cvtColor(img2, img3, CV_HSV2BGR);
	std::cout << (is_last_iteration ? "[END] " : "") << "iteration "
			<< num_iteration << std::endl;
	cv::imshow("iteration", img3);
	cv::waitKey(0);
}
Ejemplo n.º 11
0
void TextureSplatting::onPluginLoad()
{
  // VS
  m_pVs = new QGLShader(QGLShader::Vertex, this);
  m_pVs->compileSourceFile(":/splat.vert");
  
  // FS
  m_pFs = new QGLShader(QGLShader::Fragment, this);
  m_pFs->compileSourceFile(":/splat.frag");
  
  // Program  
  m_pProgram = new QGLShaderProgram(this);
  m_pProgram->addShader(m_pVs);
  m_pProgram->addShader(m_pFs);
  m_pProgram->link();
  
  // Load Texture 1
  glActiveTexture(GL_TEXTURE0);
  QString filename = QFileDialog::getOpenFileName(0, "Open Image", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
  QImage img0(filename);	
  QImage im0 = QGLWidget::convertToGLFormat(img0);
  glGenTextures( 1, &m_textureId0);
  glBindTexture(GL_TEXTURE_2D, m_textureId0);
  glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im0.width(), im0.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im0.bits());
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glBindTexture(GL_TEXTURE_2D, 0);
  
  // Load Texture 2
  glActiveTexture(GL_TEXTURE1);
  QString filename2 = QFileDialog::getOpenFileName(0, "Open Image 2", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
  QImage img1(filename2);	
  QImage im1 = QGLWidget::convertToGLFormat(img1);
  glGenTextures( 1, &m_textureId1);
  glBindTexture(GL_TEXTURE_2D, m_textureId1);
  glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im1.width(), im1.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im1.bits());
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glBindTexture(GL_TEXTURE_2D, 0);
  
  // Load Texture 3
  glActiveTexture(GL_TEXTURE3);
  QString filename3 = QFileDialog::getOpenFileName(0, "Open Image 3", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
  QImage img2(filename3);	
  QImage im2 = QGLWidget::convertToGLFormat(img2);
  glGenTextures( 1, &m_textureId2);
  glBindTexture(GL_TEXTURE_2D, m_textureId2);
  glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im2.width(), im2.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im2.bits());
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glBindTexture(GL_TEXTURE_2D, 0);
}
Ejemplo n.º 12
0
TEST_F(for_each_channel_accumulate_test, should_call_functor_for_each_channel_in_parallel_in_both_images_and_sum_the_results)
{
    rgb16_image_t img1(1, 1);
    rgb8_image_t img2(1, 1);

    view(img1)(0, 0) = { 1025, 18, 36 };
    view(img2)(0, 0) = { 1, 2, 4 };

    ASSERT_EQ(1024 + 16 + 32, for_each_channel_accumulate(const_view(img1), const_view(img2), 0, [&](channel16_t p1, channel8_t p2)
    {
        return p1 - p2;
    })) << "should sum all results";
}
Ejemplo n.º 13
0
TEST_F(for_each_channel_accumulate_test, should_call_functor_for_each_pixel_in_parallel_in_both_images)
{
    rgb16_image_t img1(3, 2), img2(3, 2);
    auto v1 = view(img1), v2 = view(img2);

    v1(0, 0) = { 1, 0, 0 }; v1(1, 0) = { 2, 0, 0 };  v1(2, 0) = { 4, 0, 0 };
    v1(0, 1) = { 8, 0, 0 }; v1(1, 1) = { 16, 0, 0 }; v1(2, 1) = { 32, 0, 0 };
    v2(0, 0) = { 64, 0, 0 };  v2(1, 0) = { 128, 0, 0 };  v2(2, 0) = { 256, 0, 0 };
    v2(0, 1) = { 512, 0, 0 }; v2(1, 1) = { 1024, 0, 0 }; v2(2, 1) = { 2048, 0, 0 };

    ASSERT_EQ(4095, for_each_channel_accumulate(const_view(img1), const_view(img2), 0, [&](channel16_t p1, channel16_t p2)
    {
        return p1 + p2;
    })) << "should call functor for each pixel";
}
Ejemplo n.º 14
0
bool compareTwoImageTolerance(const FileName &fn1, const FileName &fn2, double tolerance, size_t index1, size_t index2)
{
    double min_val, max_val, avg, stddev;
    Image<double> img1;
    Image<double> img2;
    if (index1)
        img1.read(fn1, DATA, index1);
    else
        img1.read(fn1);
    if (index2)
        img2.read(fn2, DATA, index2);
    else
        img2.read(fn2);
    img1() -= img2();
    img1().computeStats(avg, stddev, min_val, max_val);
    //return true if equal, false if different
    return !(abs(max_val - avg) > tolerance || abs(avg - min_val) > tolerance);
}
Ejemplo n.º 15
0
void Rgbimage::extractKeypoints()
{
    int procWidth = 600;
    int procHeight = 600;
    float denseSpacing = 10;
    float denseSize = 20;

    for(int x = denseSpacing; x <= procWidth-denseSpacing; x+=denseSpacing)
        for(int y = denseSpacing; y <= procHeight-denseSpacing; y+=denseSpacing)
        {
            cv::KeyPoint kp(x, y, denseSize);
            denseKeyPoints_.push_back(kp);
        }
    cv::Mat img2(600,600,CV_8UC4);
    cv::drawKeypoints( image_, denseKeyPoints_, img2, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("asd", img2);
    cv::waitKey(0);
}
Ejemplo n.º 16
0
//Made templates for test calibration
void CalibrationMain::on_MakeTemplateB_clicked()
{
    QGraphicsScene * temp1 = new QGraphicsScene;
    QGraphicsScene * temp2 = new QGraphicsScene;
    QFileInfo ourFile(currentFile);
    QString targetFile =  QApplication::applicationDirPath() + "/calibrator/templates/" + ourFile.baseName() + ".gif";
    QString targetFile2 =  QApplication::applicationDirPath() + "/calibrator/templates/" + ourFile.baseName() + "m.gif";

    temp1->setBackgroundBrush(QBrush(Qt::black));
    temp2->setBackgroundBrush(QBrush(Qt::white));

    for(int i=0; i<10; i++)
        for(int j=0; j<10; j++)
        {
            if(framesX[i][j].used)
            {
                temp1->addRect(framesX[i][j].offsetX + 100*i, framesX[i][j].offsetY + 100 * j,
                              framesX[i][j].W-1, framesX[i][j].H-1, QPen(Qt::yellow, 1),Qt::transparent);
                temp2->addRect(framesX[i][j].offsetX + 100*i, framesX[i][j].offsetY + 100 * j,
                              framesX[i][j].W-1, framesX[i][j].H-1, QPen(Qt::black, 1),Qt::transparent);
            }
        }

    QImage img1(1000,1000,QImage::Format_ARGB32_Premultiplied);
    QImage img2(1000,1000,QImage::Format_ARGB32_Premultiplied);
    QPainter p1(&img1);
    QPainter p2(&img2);
    temp1->render(&p1, QRectF(0,0,1000,1000),QRectF(0,0,1000,1000));
    temp2->render(&p2, QRectF(0,0,1000,1000),QRectF(0,0,1000,1000));
    p1.end();
    p2.end();

    QApplication::setOverrideCursor(Qt::WaitCursor);
    Graphics::toGif(img1, targetFile);
    Graphics::toGif(img2, targetFile2);
    //img1.save(targetFile);
    //img2.save(targetFile2);
    QApplication::restoreOverrideCursor();

    QMessageBox::information(this, tr("Saved"), tr("Sprite drawing templates saved in:\n")
                             +targetFile+"\n"+targetFile2);

}
Ejemplo n.º 17
0
// 버튼 이미지를 설정한다.
void cBitmap2ButtonEx::SetButton2Bitmap(const wxString &fileName)
{
	if (fileName.IsEmpty())
		return;

	const wxString ext = GetFileExt();

	wxImage img1(fileName + _("_0") + ext);
	wxImage img2(fileName + _("_1") + ext);
	if (!img1.IsOk() || !img2.IsOk())
		return;

	m_btnImage[BTN3_STATE::NORMAL] = wxBitmap(img1);
	m_btnImage[BTN3_STATE::PRESSED] = wxBitmap(img1);
	m_btnImage[BTN3_STATE::HOVER] = wxBitmap(img2);

	Refresh();
	Layout();
	Fit();
}
Ejemplo n.º 18
0
ObjectSettings
Background::get_settings() {
  ObjectSettings result = GameObject::get_settings();
  result.options.push_back( ObjectOption(MN_INTFIELD, _("Z-pos"), &layer, "z-pos"));
  ObjectOption align(MN_STRINGSELECT, _("Alignment"), &alignment);
  align.select.push_back(_("none"));
  align.select.push_back(_("left"));
  align.select.push_back(_("right"));
  align.select.push_back(_("top"));
  align.select.push_back(_("bottom"));
  result.options.push_back(align);
  result.options.push_back( ObjectOption(MN_NUMFIELD, _("Scroll offset x"),
                                         &scroll_offset.x, "scroll-offset-x"));
  result.options.push_back( ObjectOption(MN_NUMFIELD, _("Scroll offset y"),
                                         &scroll_offset.y, "scroll-offset-y"));
  result.options.push_back( ObjectOption(MN_NUMFIELD, _("Scroll speed x"),
                                         &scroll_speed.x, "scroll-speed-x"));
  result.options.push_back( ObjectOption(MN_NUMFIELD, _("Scroll speed y"),
                                         &scroll_speed.y, "scroll-speed-y"));
  result.options.push_back( ObjectOption(MN_NUMFIELD, _("Speed x"), &speed, "speed"));
  result.options.push_back( ObjectOption(MN_NUMFIELD, _("Speed y"), &speed_y));

  ObjectOption img(MN_FILE, _("Top image"), &imagefile_top, "image-top", (OPTION_VISIBLE));
  img.select.push_back(".png");
  img.select.push_back(".jpg");
  img.select.push_back(".gif");
  img.select.push_back(".bmp");
  result.options.push_back(img);
  ObjectOption img2(MN_FILE, _("Image"), &imagefile, "image");
  img2.select = img.select;
  ObjectOption img3(MN_FILE, _("Bottom image"), &imagefile_bottom, "image-bottom", (OPTION_VISIBLE));
  img3.select = img.select;
  result.options.push_back(img2);
  result.options.push_back(img3);

  result.options.push_back( ObjectOption(MN_REMOVE, "", NULL));
  return result;
}
Ejemplo n.º 19
0
void tst_QVolatileImage::copy()
{
    QVolatileImage img(100, 100, QImage::Format_RGB32);
    img.beginDataAccess();
    img.imageRef().fill(QColor(Qt::green).rgba());
    QPainter p(&img.imageRef());
    p.drawRect(10, 10, 50, 50);
    p.end();
    img.endDataAccess();

    QVolatileImage img2(100, 100, QImage::Format_RGB32);
    img2.copyFrom(&img, QRect());
    QImage imgA = img.toImage();
    QImage imgB = img2.toImage();
    QCOMPARE(imgA.size(), imgB.size());
    QVERIFY(fuzzyCompareImages(imgA, imgB, 0));

    img2 = QVolatileImage(20, 20, QImage::Format_RGB32);
    img2.copyFrom(&img, QRect(5, 5, 20, 20));
    imgA = img.toImage().copy(5, 5, 20, 20);
    imgB = img2.toImage();
    QCOMPARE(imgA.size(), imgB.size());
    QVERIFY(fuzzyCompareImages(imgA, imgB, 0));
}
Ejemplo n.º 20
0
QString AFormatter::normalizeBody (const QString& body, const QString& nick)
{
	QString data = body;

	// отбрасываем "[tagline]" и "[moderator]"
	QRegExp tagline("\\[tagline\\](.+)\\[/tagline\\]",       Qt::CaseInsensitive);
	QRegExp moderator("\\[moderator\\](.+)\\[/moderator\\]", Qt::CaseInsensitive);

	tagline.setMinimal(true);
	moderator.setMinimal(true);

	data.replace(tagline,   "");
	data.replace(moderator, "");

	// удаляем img с даными вместо ссылки (например, см. http://www.rsdn.ru/forum/flame.comp/4077971.1.aspx)
	QRegExp img1("\\[img\\]data:(\\S+)\\[/img\\]", Qt::CaseInsensitive);
	img1.setMinimal(true);
	data.replace(img1, "");

	// заменяем [img]...[/img] на ссылку
	QRegExp img2("\\[img\\](\\S+)\\[/img\\]", Qt::CaseInsensitive);
	img2.setMinimal(true);
	data.replace(img2, QString::fromUtf8("[url=\\1]\\1[/url]"));

	// укорачивание длинных ссылок (например, см. http://www.rsdn.ru/forum/web/4086359.1.aspx)
	QRegExp url1("\\[url=data:(\\S+)\\](.+)\\[/url\\]", Qt::CaseInsensitive);
	url1.setMinimal(true);
	data.replace(url1, "[url=bad%20link]\\2[/url]");

	// удаление тэга цитирования
	QRegExp q1("(^|[^\\[])\\[q\\]", Qt::CaseInsensitive);
	q1.setMinimal(true);
	data.replace(q1, "\\1");

	QRegExp q2("(^|[^\\[])\\[/q\\]", Qt::CaseInsensitive);
	q2.setMinimal(true);
	data.replace(q2, "\\1");

	// удаление таблиц из цитирования
	QRegExp table("(^|[^\\[])\\[t\\](.+)\\[/t\\]", Qt::CaseInsensitive);
	table.setMinimal(true);
	data.replace(table, "\\1");

	// удаление тэгов [h1]..[h6] из цитирования
	for (int i = 1; i < 7; i++)
	{
		QRegExp h1("(^|[^\\[])\\[h" + QString::number(i) + "\\]", Qt::CaseInsensitive);
		h1.setMinimal(true);
		data.replace(h1, "\\1");

		QRegExp h2("(^|[^\\[])\\[/h" + QString::number(i) + "\\]", Qt::CaseInsensitive);
		h2.setMinimal(true);
		data.replace(h2, "\\1");
	}

	data = data.trimmed();

	// разбиение
	QStringList source = data.split("\n");

	QString nick_3 = AFormatter::nick3(nick);

	// добавление квотинга к строкам
	for (int i = 0; i < source.size(); i++)
		if (source[i].trimmed().length())
			source[i] = nick_3 + "> " + source[i];

	// удаление дублирующихся пустых строк
	int index = 0;

	// регексп для приветствий в квотинге
	QRegExp hello(QString::fromUtf8(">\\s{0,}Здравствуйте,\\s.+,\\sВы писали:"));

	while (index < source.size() - 1)
	{
		if (source.at(index).trimmed().length() == 0 && source.at(index + 1).trimmed().length() == 0)
		{
			source.removeAt(index);
			continue;
		}
		else if (source.at(index).indexOf(hello) != -1)
		{
			source.removeAt(index);

			if (index > 0)
				index--;

			continue;
		}

		index++;
	}

	// вычисление уровня квотинга
	QList<int> quoting_level;

	int size = source.size();

	for (int i = 0; i < size; i++)
	{
		QString temp = source.at(i).trimmed();

		int level = 0;

		for (int j = 0; j < temp.length() - 3; j++)
		{
			if (temp[j] == ' ' && !(temp[j + 1] == '&' || temp[j + 2] == '&' || temp[j + 3] == '&'))
				break;
			else if (temp[j] == '&' && temp[j + 1] == 'g' && temp[j + 2] == 't' && temp[j + 3] == ';')
			{
				level++;
				j += 3;
			}
			else if (temp[j] == '&' && temp[j + 1] == 'l' && temp[j + 2] == 't' && temp[j + 3] == ';')
				break;
		}

		quoting_level.append(level);

		source[i] = temp;
	}

	// вставка пустых строк между разными уровнями квотинга
	for (int i = 0; i < size - 1; i++)
	{
		if (source[i].length() == 0)
			continue;

		if (quoting_level.at(i) != quoting_level.at(i + 1) && source[i + 1].length() /*&& (quoting_level.at(i) == 0 || quoting_level.at(i + 1) == 0)*/)
			source[i] = source[i] + "\r\n";
	}

	data = source.join("\r\n").trimmed();

	// замена HTML спец-символов
	data.replace("&gt;",  ">");
	data.replace("&lt;",  "<");
	data.replace("&amp;", "&");

	return data;
}
Ejemplo n.º 21
0
// ZroOrder調整大小
void imgraw::resize_zero(float Ratio){
    //=========================================
    float rat = (float)Ratio;
    int Pic_x = this->width;
    int Pic_y = this->high;
    imgraw img2(Pic_y*Ratio, Pic_x*Ratio);

    float rat_r = floor(rat);
    int white=0; //幾個原點 (黑) 補一個新點 (白)
    int black=0; //處理放大倍率小數點,每幾個原點多補一個新點
    int limx=0,limy=0, limx_c=0,limy_c=0;
    // int debug=0;

    if(rat>1) {
        white = (rat_r)-1;
        if((rat-rat_r) > 0) {
            black = 1/(rat-rat_r);
            cout << "   **" << (rat-rat_r) <<endl;
            cout << "   black = " << black <<endl;
            limx = Pic_x * (rat-rat_r);
            limy = Pic_y * (rat-rat_r);
        }

        // 單點操作
        for(int j=0, yp=0; j < Pic_y; ++j) {
            limx_c=0;
            for(int i=0, xp=0; i < Pic_x; ++i) {
                // 填入原圖的點
                img2.point_write(j+yp, i+xp, this->point_read(j, i));
                // 補點 (整數) 每經一圓點補多少新點
                for(int k = 0; k < white; ++k) {
                    // k=補點位移指標,不是填入點位移指標
                    img2.point_write(j+yp, (i+xp)+k+1, this->point_read(j, i));
                } xp+=white; //填入點位移指標
                // 補點 2 (小數點倍率) 每經幾原點補一點新點
                if(black!=0 && (i+1)%black==0) { // 從0開始故多+1
                    // debug++;
                    if(limx_c<limx) {
                        limx_c+=1;
                        img2.point_write(j+yp, (i+xp)+1, this->point_read(j, i));
                        xp+=1;
                    }
                }
            }

            // 補排 (整數)
            for(int i = 0; i < white; ++i) {
                for(int k=0; k < Pic_x*rat; ++k)
                    img2.point_write(j+yp+1+i, k, img2.point_read(j+yp, k));
                // debug+=1;
            } yp+=white; //填入排位移指標
            // 補排 2 (小數點倍率)
            if(black!=0 && (j+1)%black==0) { // 從0開始故多+1
                if(limy_c<limy) {
                    if(white==0) { // 倍率介於1~2時
                        if(limy_c<limy) {
                            limy_c+=1;
                            for(int k=0; k < Pic_x*rat; ++k)
                                img2.point_write(j+yp+1, k, img2.point_read(j+yp, k));
                        }
                    }
                    else {
                        for(int i = 0; i < white; ++i) {
                            if(limy_c<limy) {
                                limy_c+=1;
                                for(int k=0; k < Pic_x*rat; ++k)
                                    img2.point_write(j+yp+1+i, k, img2.point_read(j+yp, k));
                            }
                            // debug+=1;
                        }
                    }
                    yp+=1; //填入排位移指標
                }
            }
        }
    } else { // 縮小
        int jmp=0; // 每讀幾點跳過
        jmp = (1 / (1-rat));
        cout << "jmp=" << jmp << endl;
        limx = floor(Pic_x*rat);
        limy = floor(Pic_y*rat);
        for(int j = 0, yp = 0; j < Pic_y; ++j){
            // 列
            limx_c=0;
            for(int i = 0,xp = 0; i < Pic_x; ++i){
                if ((i+1)%jmp!=0){// 每隔 jmp 點就跳過
                    img2.point_write(yp, xp, this->point_read(j, i));
                    limx_c+=1;
                    xp+=1;
                    // 這裡的次數太少
                }
            }
            // 排
            if ((j+1)%jmp!=0 && limy_c< floor(Pic_x*rat)-1){
                // if (limy_c<limy){
                    limy_c+=1;
                    yp+=1;
                // }
            }
        }
    }

    cout << "pix = " << floor(Pic_x*Ratio) << endl;
    // cout << "Pic_x = "   << Pic_x   << endl;
    // cout << "white = "   << white   << endl;
    // cout << "black = "   << black   << endl;
    // cout << "limx = "   << limx   << endl;
    // cout << "limy = "   << limy   << endl;
    // cout << "limx_c = "   << limx_c   << endl;
    // cout << "limy_c = "   << limy_c   << endl;
    // cout << "debug = "   << debug   << endl;
    // cout << "t = "   << ((float)Ratio-(int)Ratio)   << endl;

    // 將暫存寫回主檔
    this->img_data=img2.img_data;
    this->filesize=img2.filesize;
    this->width=img2.width;
    this->high=img2.high;
}
Ejemplo n.º 22
0
int main(int argc, char** argv)
{
	// Call to a constructor of the class Image by giving it the name of the picture file.
	
	Image img1 ("../../Multimedia/huge.jpg"); // Image "__name__" ("__image_path__");
	Image img2 ("../../Multimedia/huge.jpg"); // Image "__name__" ("__image_path__");
	
	//It is possible to display the image created before:
	img1.display("imagen 1");
	img2.display("imagen 2");
	
	//OpenMPI works only with primitive types so we need to pass 
	//the images to a vector of integers or other type,
	// if we want to parallelize the operation.
	
	int size=img1.get_width()*img1.get_height()*img1.get_depth()*img1.get_spectrum(); //define the size of the image result
	
	//As the images could be huge, we need to use malloc to apart the block of memory of the vector. 
	//Note that to use MPI the blocks of memory needs to be continuous so we use malloc.
	int *matrix= (int*)malloc(size*sizeof(int)); //img1
	int *matrix2= (int*)malloc(size*sizeof(int)); //img2
	int *mat_result= (int*)malloc(size*sizeof(int)); //result matrix
	
	Image result(img1.get_width(), img1.get_height(), img1.get_depth(), img1.get_spectrum(), 0);  //This is the constructor of the result image
	
	//We need to declare the variables of the function, the num of processes to use, the id of each process and the local parts of the matrix
	int x,y,z,c, procs, id, local_size, i,*matrix_local, *matrix2_local, *result_local;
	i=0;

	double time;

	MPI_Init(&argc, &argv); //This function is used to initialize the parallel section
	MPI_Comm_rank(MPI_COMM_WORLD, &id);// This function obtains the identifier of each process
	MPI_Comm_size(MPI_COMM_WORLD, &procs); // This function obtains the number of processes that be used in the communicator.
	time=MPI_Wtime(); //This function measure the time of the execution
	
	//Set the local size of each chunk to be send to each process
	local_size=size/procs;

	//Fill the vector with the pixel values of the images.
		for(c=0; c< img1.get_spectrum();++c)
		{
			for(z=0; z< img1.get_depth(); ++z)
			{	
				for(x=0; x<img1.get_width();++x)
				{					
					for(y=0; y< img1.get_height();++y)
					{	

						matrix[img1.get_height()*x+y+img1.get_height()*img1.get_width()*z+img1.get_height()*img1.get_width()*img1.get_depth()*c]=img1.get_pixel_value(x,y,z,c);
						matrix2[img1.get_height()*x+y+img1.get_height()*img1.get_width()*z+img1.get_height()*img1.get_width()*img1.get_depth()*c]=img2.get_pixel_value(x,y,z,c);

					}	
				}
			}
		}		
		//Create the local matrices, and the local result
		matrix2_local=(int*)malloc(local_size*sizeof(int));
		matrix_local=(int*)malloc(local_size*sizeof(int));
		result_local=(int*)malloc(local_size*sizeof(int));
	}	
Ejemplo n.º 23
0
void RenderImage::run(){
    QString path_to_file;
    QString path;
    QString file_begin = "img_0_",
            file_end = ".jpg", file_name;

    QString separator = QDir::separator();

    QPixmap img_full;
    QPixmap img_small;

    int result_w = 0,
        result_h = 0;

    int width, height, counter = 0;

    double coeff_h, coeff_w;
    float step_generate = 0.0;

    step_generate = ( 101.0 / (float)data_class->progress_value );

    for(int h=0; h <= data_class->size_category[current_directory][1] && do_run; h++ ){
        for(int w=0; w <= data_class->size_category[current_directory][0] && do_run; w++){

            path = data_class->getRoute(current_directory);
            path_to_file = path + file_begin + QString::number(h) + QString("_") + QString::number(w) + file_end;

            //qDebug() << path_to_file;
            //path for small image
            if(data_class->hash_name_status == true){
                file_name = data_class->hash_names_array[current_directory + "_small"][counter] + file_end;
            } else {
                file_name = "img_0_"+ QString::number(h) + QString("_") + QString::number(w) + file_end;
            }

            path = data_class->basic_catalog + separator + current_directory + separator + QString("small") + data_class->separator + file_name;

            QImage img2(path_to_file);

            width = img2.width();
            height = img2.height();

            coeff_h = (float)width / (float)height;
            coeff_w = (float)height / (float)width;

            width = (int)width;
            height = (int)height;

            if(width != 600 || height !=600){
                if(width > height) {
                    result_w = 600;
                    result_h = 600 * coeff_w;
                } else if(width < height) {
                    result_w = 600 * coeff_h;
                    result_h = 603;
                } else {
                    result_h = 600;
                    result_w = 600;
                }
             } else {
                result_h = 600;
                result_w = 600;
             }


            img_small = img_small.fromImage(img2.scaled(result_w, result_h, Qt::IgnoreAspectRatio, Qt::SmoothTransformation));
            QFile file2(path);
            file2.open(QIODevice::WriteOnly);
            img_small.save(&file2, "jpeg", data_class->compress_value);
            file2.close();

            if(width != 1800 || height != 1800){
                if(width > height) {
                    result_w = 1800;
                    result_h = 1800 * coeff_w;
                } else if(width < height) {
                    result_w = 1800 * coeff_h;
                    result_h = 1800;
                } else {
                    result_h = 1800;
                    result_w = 1800;
                }
            } else {
                result_h = 1800;
                result_w = 1800;
            }

            if(data_class->hash_name_status == true){
                file_name = data_class->hash_names_array[current_directory + "_full"][counter] + file_end;
            } else {
                file_name = "img_0_"+ QString::number(h) + QString("_") + QString::number(w) + file_end;
            }

            path = data_class->basic_catalog + separator + current_directory + separator + QString("full") + data_class->separator + file_name;

            QImage img(path_to_file);
            img_full = img_full.fromImage(img.scaled(result_w, result_h, Qt::IgnoreAspectRatio, Qt::SmoothTransformation));
            QFile file(path);
            file.open(QIODevice::WriteOnly);
            img_full.save(&file, "jpeg", data_class->compress_value);
            file.close();

            do_run = data_class->setProgressValue(step_generate);
            counter ++;
        }
    }
}
Ejemplo n.º 24
0
// TODO: the following function should go somewhere else, perhaps in ImageIO.cpp
// Make sure the image has the smallest number of bands before writing.
// That is, if it's 4 bands with full alpha, reduce to 3 bands.  
// If it's 3 bands with constant colors, make it 1-band.
CByteImage removeRedundantBands(CByteImage img)
{
    CShape sh = img.Shape();
	int w = sh.width, h = sh.height, nB = sh.nBands;
	int x, y;
	if (nB < 3)
		return img;

	// check if full alpha if alpha channel present
	bool fullAlpha = true;
	if (nB == 4) {
		for (y = 0; y < h && fullAlpha; y++) {
			uchar *pix = &img.Pixel(0, y, 0);
			for (x = 0; x < w; x++) {
				if (pix[3] != 255) {
					fullAlpha = false;
					break;
				}
				pix += nB;
			}
		}
	}
	if (!fullAlpha)
		return img;

	// check for equal colors
	bool equalColors = true;
	for (y = 0; y < h && equalColors; y++) {
		uchar *pix = &img.Pixel(0, y, 0);
		for (x = 0; x < w; x++) {
			if (pix[0] != pix[1] ||
				pix[0] != pix[2] ||
				pix[1] != pix[2]) {
					equalColors = false;
					break;
				}
				pix += nB;
		}
	}
	// at this point, if nB == 4 we can reduce to at least 3 bands,
	// and if equalColors we can reduce to 1 band.
	if (! equalColors && nB < 4)
		return img;

	int newNB = equalColors ? 1 : 3;

	if (DEBUG_ImageIOpng)
		fprintf(stderr, "reducing from %d to %d bands\n", nB, newNB);

	CShape sh2(w, h, newNB);
	CByteImage img2(sh2);
	
	for (y = 0; y < h; y++) {
		uchar *pix = &img.Pixel(0, y, 0);
		uchar *pix2 = &img2.Pixel(0, y, 0);
		for (x = 0; x < w; x++) {
			for (int b = 0; b < newNB; b++) {
				pix2[b] = pix[b];
			}
			pix += nB;
			pix2 += newNB;
		}
	}

	return img2;
}
Ejemplo n.º 25
0
//--------------------------------------------------------------
void testApp::setup()
{
	ofImage imageOf1, imageOf2;			//Load openFrameworks' images
	imageOf1.loadImage("crater1.png");
	imageOf2.loadImage("crater2.png");

	color1.setFromPixels( imageOf1 );	//Convert to ofxCv images
	color2.setFromPixels( imageOf2 );

	float decimate = 0.3;              //Decimate images to 30%
	ofxCvColorImage imageDecimated1;
	imageDecimated1.allocate( color1.width * decimate, 
                          color1.height * decimate );
	//High-quality resize
	imageDecimated1.scaleIntoMe( color1, CV_INTER_AREA );
	gray1 = imageDecimated1;

	ofxCvColorImage imageDecimated2;
	imageDecimated2.allocate( color2.width * decimate,
		                      color2.height * decimate );
	//High-quality resize
	imageDecimated2.scaleIntoMe( color2, CV_INTER_AREA );
	gray2 = imageDecimated2;
	

	Mat img1( gray1.getCvImage() );  //Create OpenCV images
	Mat img2( gray2.getCvImage() );
	Mat flow;                        //Image for flow
	//Computing optical flow
	  calcOpticalFlowFarneback( img1, img2, flow, 0.7, 3, 11, 5, 5, 1.1, 0 );
	//Split flow into separate images
	vector<Mat> flowPlanes;
	split( flow, flowPlanes );
	//Copy float planes to ofxCv images flowX and flowY
	IplImage iplX( flowPlanes[0] );
	flowX = &iplX;
	IplImage iplY( flowPlanes[1] );
	flowY = &iplY;

	//--------------------------------------------------------------------------
	//ATTENTION: Lines flowX = &iplX; and flowY = &iplY; can raise runtime error, 
	//caused by small bug in ofxOpenCV. 
	//So before running the example, fix it, as it described in testApp.h file
	//--------------------------------------------------------------------------

	w = gray1.width;
	h = gray1.height;

	//Flow image
	planeX = flowX;
	planeY = flowY;

	//create idX, idy
	idX.allocate( w, h );
	idY.allocate( w, h );
	for (int y=0; y<h; y++) {
		for (int x=0; x<w; x++) {
			idX.getPixelsAsFloats()[ x + w * y ] = x;
			idY.getPixelsAsFloats()[ x + w * y ] = y;
		}
	}

	//Load checkerboard image
	ofImage imageTest;
	imageTest.loadImage("checkerBoard.png");
	colorTest.setFromPixels( imageTest );

	//Make morphing at first time
	morphValue = 0;
	morphImageIndex = 1;
	updateMorph( morphValue, morphImageIndex );
}
Ejemplo n.º 26
0
void
LinErodedBinaryImage::erodeAll(BinaryImage* anImg,
			       unsigned int anEroSize)

  throw(QgarErrorDomain)

{
  int linsize =  (2 * anEroSize) + 1;  // size of segment
  int diagSize = (2 * linsize) / 3;
  int width   = anImg->width();
  int height  = anImg->height();

  if ((diagSize > height) || (diagSize > width))
    {
      std::ostringstream os;
      os << "All-orientations erosion size ("
	 << anEroSize
	 << " -> "
	 << linsize
	 << ") too large for image "
	 << width
	 << " X "
	 << height;
      throw QgarErrorDomain(__FILE__, __LINE__,
			    "void qgar::LinErodedBinaryImage::erodeAll(qgar::BinaryImage*, unsigned int)",
			    os.str());
    }

  BinaryImage img2(*anImg);

  // Allocate a table for current input rows
  BinaryImage::pointer crow = new BinaryImage::value_type [width * linsize];

  // Allocate a table for current output row
  BinaryImage::pointer orow = new BinaryImage::value_type [width];
  
  BinaryImage::pointer p;
  BinaryImage::pointer q;
  
  // Now loop on all possible lines

  int i = 0;   // current line number in input image
  int l = anEroSize; // current line number in output image

  for (/* VOID */ ; l < height - (int)anEroSize ; ++l, ++i)
    {
      int ii = i; // current line number while loading current rows

      for (q = crow ; ii < i + linsize ; ++ii, q += width)
	{
	  anImg->row(ii, q); // load rows into crow
	}

      int j = anEroSize;
      anImg->row(l, orow); // initialize orow with old values
      
      for (p = orow + anEroSize, q = crow + anEroSize ;
	   j < width - (int)anEroSize ; ++j, ++q)
	{              // On all columns which can be processed
	  BinaryImage::value_type curhoriz = 1; // start with minimum for all
	  BinaryImage::value_type curverti = 1;
	  BinaryImage::value_type curpdiag = 1;
	  BinaryImage::value_type curndiag = 1;

	  BinaryImage::pointer sl = q;
	  int k = 0;
	  
	  for (/* VOID */ ; k < linsize ; ++k, sl += width)
	    {
	      if (*(sl - anEroSize + k) == 0)
		{
		  curndiag = 0;
		}
	      if (*sl == 0)
		{
		  curverti = 0;
		}
	      if (*(sl + anEroSize -k) == 0)
		{
		  curpdiag = 0;
		}
	      if (k == (int)anEroSize) // if we are on the medium line
		{
		  BinaryImage::pointer ssl = sl - anEroSize;
		  int kk = 0;
		  for (/* VOID */ ; kk < linsize ; ++kk, ++ssl)
		    {
		      if (*ssl == 0)
			{
			  curhoriz = 0;	// found a zero
			  break; // no need to continue
			}
		    } // END for kk
		}
	    } // END for k

          // Write result
	  *p++ = ((curndiag == 0) && (curpdiag == 0) &&
		  (curhoriz == 0) && (curverti == 0)) ? 0 : 1;
	}

      // Write result
      img2.setRow(l, orow);
    }

  for (i = 0; i < height; ++i)
    {
      img2.row(i, orow);
      anImg->setRow(i, orow);
    }

  // Clean up
  delete [] crow;
  delete [] orow;
}
Ejemplo n.º 27
0
	exit_t main()
	{
		const char *equation = "v";
		std::string format = "ARGB";
		int iterations = 1;

		MagickCore::MagickCoreGenesis(*argv, Magick::MagickFalse);

		switch(argc)
		{
			case 4:
				iterations = atoi(argv[3]);
			case 3:
				format = argv[2];
				assert_always(format.length()==4, "Format must consist of 4 chars.");
				// TODO: only RGBACYMK allowed
			case 2:
				equation = argv[1];
				break;
			case 1:
			default:
				exit_usage();
		}
		// big endian -> reverse format for user
		std::reverse(format.begin(), format.end());

		std::vector<char> content = get_file_contents();

		try {

			// this is all much copying,
			// but it seems to be the only way...
			Magick::Blob blob(content.data(), content.size());
			Magick::Image img(blob);

			const dimension dim(img.size().width(),
				img.size().height());
			grid_t grid(dimension(dim.height(), dim.width()), 0);
			img.write(0, 0, dim.width(), dim.height(), format,
				Magick::CharPixel, grid.data().data());


			using ca_sim_t = sca::ca::simulator_t<
				sca::ca::eqsolver_t, def_coord_traits, def_cell_traits>;
			ca_sim_t sim(equation);
			sim.grid() = grid;
			sim.finalize();


			for(int i = 0; i < iterations; ++i)
			 sim.run_once(ca_sim_t::synchronous());

			Magick::Blob blob2;
			Magick::Image img2(dim.width(), dim.height(), format,
				Magick::CharPixel, sim.grid().data().data());
			// needed, otherwise we write "format-less":
			img2.magick(img.magick());
			img2.write(&blob2);

			std::cout.write(reinterpret_cast<const char*>(blob2.data()), blob2.length());
		}
		catch ( Magick::Exception & error) {
			std::cerr << "Caught Magick++ exception: "
				<< error.what() << std::endl;
		}

		return exit_t::success;
	}
Ejemplo n.º 28
0
void Chi2LibFFTW::getChiImage(MyMatrix<double> *kernel, MyMatrix<double> *img, MyMatrix<double> *out, bool use_thread){
	MyLogger::log()->debug("[Chi2LibFFTW][getChiImage] Using cache");
	if(Chi2LibFFTWCache::empty(cached_kernel)){ //Kernel
		MyMatrix<double> *kernel_img = new MyMatrix<double>(kernel->sX(), kernel->sY());
		Chi2LibMatrix::copy(kernel, kernel_img);
		Chi2LibFFTWCache::cache(cached_kernel, kernel_img);
		MyLogger::log()->debug("[Chi2LibFFTW][getChiImage] Kernel Cached");
	}
	if(Chi2LibFFTWCache::empty(cached_kernel2)){ //Kernel2
		MyMatrix<double> *kernel_img2 = new MyMatrix<double>(kernel->sX(), kernel->sY());
		Chi2LibMatrix::copy(kernel, kernel_img2);
		Chi2LibMatrix::squareIt(kernel_img2);
		Chi2LibFFTWCache::cache(cached_kernel2, kernel_img2);
		MyLogger::log()->debug("[Chi2LibFFTW][getChiImage] Kernel^2 Cached");
	}
	if(Chi2LibFFTWCache::empty(cached_first_term)){ //First Term -> conv2d_fft( normaldata, ipf*ipf )
		MyMatrix<double> *first_term = new MyMatrix<double>(img->sX()+kernel->sX()-1, img->sY()+kernel->sY()-1);
		Chi2LibFFTWCache::cache(cached_first_term, first_term);
		MyLogger::log()->debug("[Chi2LibFFTW][getChiImage] First Term Cached");
	}
	if(Chi2LibFFTWCache::empty(cached_second_term)){ //Second Term -> conv2d_fft( normaldata*normaldata, ipf )
		MyMatrix<double> *second_term = new MyMatrix<double>(img->sX()+kernel->sX()-1, img->sY()+kernel->sY()-1);
		Chi2LibFFTWCache::cache(cached_second_term, second_term);
		MyLogger::log()->debug("[Chi2LibFFTW][getChiImage] Second Term Cached");
	}
	if(Chi2LibFFTWCache::empty(cached_third_term)){ //Third Term -> conv2d_fft( blank, ipf*ipf*ipf )
		MyMatrix<double> *third_term = new MyMatrix<double>(img->sX()+kernel->sX()-1, img->sY()+kernel->sY()-1);
		Chi2LibFFTWCache::cache(cached_third_term, third_term);
		MyLogger::log()->debug("[Chi2LibFFTW][getChiImage] Third Term Cached");
	}

	if(use_thread){
		PartitionFFT p1;
		p1.img = img;
		p1.kernel_img = Chi2LibFFTWCache::cache(cached_kernel2);
		p1.output = Chi2LibFFTWCache::cache(cached_first_term);

		PartitionFFT p2;
		MyMatrix<double> img2(img->sX(), img->sY());
		Chi2LibMatrix::copy(img, &img2);
		Chi2LibMatrix::squareIt(&img2);
		p2.img = &img2;
		p2.kernel_img = Chi2LibFFTWCache::cache(cached_kernel);
		p2.output = Chi2LibFFTWCache::cache(cached_second_term);

		pthread_t thread1, thread2, thread3;
		MyLogger::log()->debug("[Chi2LibFFTW] Generating First Convolution");
		pthread_create(&thread1, NULL, conv2d_fftThread, (void *)&p1);
		MyLogger::log()->debug("[Chi2LibFFTW] Generating Second Convolution");
		pthread_create(&thread2, NULL, conv2d_fftThread, (void *)&p2);

		if(!Chi2LibFFTWCache::lock(cached_third_term)){
			PartitionFFT p3;
			
			MyMatrix<double> *blank = new MyMatrix<double>(img->sX(), img->sY(), 1.0);
			p3.img = blank;
			
			MyMatrix<double> *kernel_img3 = new MyMatrix<double>(kernel->sX(), kernel->sY());
			Chi2LibMatrix::copy(kernel, kernel_img3);
			Chi2LibMatrix::cubeIt(kernel_img3);
			p3.kernel_img = kernel_img3;
			p3.output = Chi2LibFFTWCache::cache(cached_third_term);

			MyLogger::log()->debug("[Chi2LibFFTW] Generating Third Convolution");
			pthread_create(&thread3, NULL, conv2d_fftThread, (void *)&p3);

			pthread_join(thread3, NULL);
			
			delete blank;
			delete kernel_img3;
			Chi2LibFFTWCache::lock(cached_third_term, true);
		}

		pthread_join(thread1, NULL);
		pthread_join(thread2, NULL);

	}else{
		MyLogger::log()->debug("[Chi2LibFFTW] Generating First Convolution");
		conv2d_fft(img, Chi2LibFFTWCache::cache(cached_kernel2), Chi2LibFFTWCache::cache(cached_first_term));	// ~200 Milisegundos

		MyLogger::log()->debug("[Chi2LibFFTW] Generating Second Convolution");
		MyMatrix<double> img2(img->sX(), img->sY());
		Chi2LibMatrix::copy(img, &img2);
		Chi2LibMatrix::squareIt(&img2);
		conv2d_fft(&img2, Chi2LibFFTWCache::cache(cached_kernel), Chi2LibFFTWCache::cache(cached_second_term)); // ~180 Milisegundos

		if(!Chi2LibFFTWCache::lock(cached_third_term)){
			MyLogger::log()->debug("[Chi2LibFFTW] Generating Third Convolution");
			MyMatrix<double> *blank = new MyMatrix<double>(img->sX(), img->sY(), 1.0);
			MyMatrix<double> *kernel_img3 = new MyMatrix<double>(kernel->sX(), kernel->sY());
			Chi2LibMatrix::copy(kernel, kernel_img3);
			Chi2LibMatrix::cubeIt(kernel_img3);
			
			conv2d_fft(blank, kernel_img3, Chi2LibFFTWCache::cache(cached_third_term)); // ~170 Milisegundos
			
			delete blank;
			delete kernel_img3;
			Chi2LibFFTWCache::lock(cached_third_term, true);
		}
	}

	MyMatrix<double> *first_term = Chi2LibFFTWCache::cache(cached_first_term);
	MyMatrix<double> *second_term = Chi2LibFFTWCache::cache(cached_second_term);
	MyMatrix<double> *third_term = Chi2LibFFTWCache::cache(cached_third_term);

	MyLogger::log()->debug("[Chi2LibFFTW] Computing result");
	for(unsigned int x=0; x < first_term->sX(); ++x)
		for(unsigned int y=0; y < first_term->sY(); ++y){
			out->at(x,y) = 1.0/(1.0+ (-2.0*first_term->getValue(x,y) +second_term->getValue(x,y))/third_term->getValue(x,y));
		}
	MyLogger::log()->debug("[Chi2LibFFTW] Result Computed");
}