Esempio n. 1
0
int main(int argc, char **argv) {
    std::vector<cv::ogl::Texture2D> images;
    if (argc == 1) {
        fprintf(stderr, "usage: %s <images ...>\n"
                "   cycle between given images\n",
                argv[0]);
        return -1;
    }
    cv::namedWindow("img", cv::WINDOW_OPENGL);

    images.resize(argc - 1);
    for (int i = 1; i < argc; i ++) {
        auto img = cv::imread(argv[i], CV_LOAD_IMAGE_COLOR);
        if (img.empty()) {
            fprintf(stderr, "failed to load %s: %m\n", argv[i]);
            return -1;
        }
        cv::Mat img1(img.rows + 2, img.cols + 2, CV_8UC3, cv::Scalar{0});
        img1.at<cv::Vec3b>(0, 0) = {0, 0, 255};
        img1.at<cv::Vec3b>(0, img.cols + 1) = {0, 255, 0};
        img1.at<cv::Vec3b>(img.rows + 1, 0) = {255, 0, 0};
        img1.at<cv::Vec3b>(img.rows + 1, img.cols + 1) = {255, 0, 0};
        img.copyTo(img1(cv::Rect(1, 1, img.cols, img.rows)));
        img = img1;
        cv::resize(img, img, {0, 0}, SCALE_FACTOR, SCALE_FACTOR,
                cv::INTER_NEAREST);
        images[i - 1].copyFrom(img);
    }
    cv::resizeWindow("img", images[0].cols(), images[0].rows());

    int nr_frame = 0;
    size_t idx = 0;
    double fps_time = get_time();
    for (; ; ) {
        nr_frame ++;
        if (nr_frame >= 100) {
            auto now = get_time();
            printf("fps: %.2f\n", nr_frame / (now - fps_time));
            fps_time = now;
            nr_frame = 0;
        }
        cv::imshow("img", images[idx ++]);
        if (idx == images.size())
            idx = 0;
        if ((cv::waitKey(1) & 0xFF) == 'q')
            break;
    }
    cv::destroyWindow("img");
}
Esempio n. 2
0
// Compare images:
bool KOFilter::imageCompare(const QString &dir1File, const QString &dir2File)
{
    // Check src file:
    QFileInfo fi1(dir1File);

    // Check dst file:
    QFileInfo fi2(dir2File);

    // Check files:
    if (!fi1.exists() || !fi2.exists())
        return false;

    QImage img1(dir1File);
    QImage img2(dir2File);
    if (img1.size() != img2.size())
        return false;

    for (int j=0; j<img1.height(); j++)
        for (int i=0; i<img1.width(); i++)
        {
            QRgb color1 = img1.pixel(i, j);
            QRgb color2 = img2.pixel(i, j);
            if (color1 != color2)
                return false;
        }

    return true;
}
Esempio n. 3
0
void tst_QVolatileImage::sharing()
{
    QVolatileImage img1(100, 100, QImage::Format_ARGB32);
    QVolatileImage img2 = img1;
    img1.beginDataAccess();
    img2.beginDataAccess();
    QVERIFY(img1.constBits() == img2.constBits());
    img2.endDataAccess();
    img1.endDataAccess();
    img1.imageRef(); // non-const call, should detach
    img1.beginDataAccess();
    img2.beginDataAccess();
    QVERIFY(img1.constBits() != img2.constBits());
    img2.endDataAccess();
    img1.endDataAccess();

    // toImage() should return a copy of the internal QImage.
    // imageRef() is a reference to the internal QImage.
    QVERIFY(img1.imageRef().constBits() != img1.toImage().constBits());

#ifdef Q_OS_SYMBIAN
    CFbsBitmap *bmp = new CFbsBitmap;
    QVERIFY(bmp->Create(TSize(100, 50), EColor16MAP) == KErrNone);
    QVolatileImage bmpimg(bmp);
    QVolatileImage bmpimg2;
    bmpimg2 = bmpimg;
    QCOMPARE(bmpimg.constBits(), (const uchar *) bmp->DataAddress());
    QCOMPARE(bmpimg2.constBits(), (const uchar *) bmp->DataAddress());
    // Now force a detach, which should copy the pixel data under-the-hood.
    bmpimg.imageRef();
    QVERIFY(bmpimg.constBits() != (const uchar *) bmp->DataAddress());
    QCOMPARE(bmpimg2.constBits(), (const uchar *) bmp->DataAddress());
    delete bmp;
#endif
}
Esempio n. 4
0
GPU_PERF_TEST(AlphaComp, cv::gpu::DeviceInfo, cv::Size, MatType, AlphaOp)
{
    cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
    cv::gpu::setDevice(devInfo.deviceID());

    cv::Size size = GET_PARAM(1);
    int type = GET_PARAM(2);
    int alpha_op = GET_PARAM(3);

    cv::Mat img1_host(size, type);
    fill(img1_host, 0, 255);

    cv::Mat img2_host(size, type);
    fill(img2_host, 0, 255);

    cv::gpu::GpuMat img1(img1_host);
    cv::gpu::GpuMat img2(img2_host);
    cv::gpu::GpuMat dst;

    cv::gpu::alphaComp(img1, img2, dst, alpha_op);

    TEST_CYCLE()
    {
        cv::gpu::alphaComp(img1, img2, dst, alpha_op);
    }
}
Esempio n. 5
0
GPU_PERF_TEST(BlendLinear, cv::gpu::DeviceInfo, cv::Size, MatType)
{
    cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
    cv::gpu::setDevice(devInfo.deviceID());

    cv::Size size = GET_PARAM(1);
    int type = GET_PARAM(2);

    cv::Mat img1_host(size, type);
    fill(img1_host, 0, 255);

    cv::Mat img2_host(size, type);
    fill(img2_host, 0, 255);

    cv::gpu::GpuMat img1(img1_host);
    cv::gpu::GpuMat img2(img2_host);
    cv::gpu::GpuMat weights1(size, CV_32FC1, cv::Scalar::all(0.5));
    cv::gpu::GpuMat weights2(size, CV_32FC1, cv::Scalar::all(0.5));
    cv::gpu::GpuMat dst;

    cv::gpu::blendLinear(img1, img2, weights1, weights2, dst);

    TEST_CYCLE()
    {
        cv::gpu::blendLinear(img1, img2, weights1, weights2, dst);
    }
}
Esempio n. 6
0
TEST_F(for_each_channel_accumulate_test, should_throw_when_image_dimensions_dont_match)
{
    rgb16_image_t img1(1, 2), img2(1, 3), img3(2, 2);

    auto zero = [](channel16_t, channel16_t) { return 0; };
    ASSERT_THROW(for_each_channel_accumulate(const_view(img1), const_view(img2), 0, zero), std::invalid_argument);
    ASSERT_THROW(for_each_channel_accumulate(const_view(img1), const_view(img3), 0, zero), std::invalid_argument);
}
Esempio n. 7
0
int main(int argc, const char* argv[])
{
	cv::gpu::GpuMat img1(cv::imread("../../Pictures/move1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); 
	cv::gpu::GpuMat img2(cv::imread("../../Pictures/move2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); 

	// Value for keypoints
	std::vector< cv::KeyPoint> keypoints1, keypoints2;
	cv::gpu::GpuMat keypoints1GPU, keypoints2GPU; 
	// Value for descriptor
	std::vector< float> descriptors1, descriptors2;
	cv::gpu::GpuMat descriptors1GPU, descriptors2GPU; 
	// orb detector and extractor
	cv::gpu::ORB_GPU orb(2000);
	// Create Object of DescriptorMatcher
	cv::gpu::BruteForceMatcher_GPU< cv::Hamming > matcher;
	// values of how maching
	std::vector< std::vector< cv::DMatch> > matches; 

	cv::TickMeter gpumeter;
	gpumeter.start();

	// get feauters
	orb(img1, cv::gpu::GpuMat(), keypoints1GPU, descriptors1GPU);
	orb(img2, cv::gpu::GpuMat(), keypoints2GPU, descriptors2GPU);

	// matching
	matcher.knnMatch(descriptors1GPU, descriptors2GPU, matches, 2);
	// threshold 
	std::vector< cv::DMatch > good_matches;
	for(int k = 0; k < std::min(descriptors1GPU.rows-1,(int) matches.size()); k++) 
	{
		if((matches[k][0].distance < 0.6*(matches[k][1].distance)) && ((int) matches[k].size()<=2 && (int) matches[k].size()>0))
		{
			good_matches.push_back(matches[k][0]);
		}
	}    

	gpumeter.stop();

	// show result
	std::cout << "ORB (GPU): " << gpumeter.getTimeMilli() << "ms" << std::endl;
	std::cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << std::endl; 
	std::cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << std::endl; 
	orb.downloadKeyPoints(keypoints1GPU, keypoints1);
	orb.downloadKeyPoints(keypoints2GPU, keypoints2);
	std::cout << "keypoint1 :" << keypoints1.size() << "  keypoint2 :" << keypoints2.size() << std::endl;

	cv::Mat img_matches; 
	cv::Mat img1_cpu, img2_cpu;
	img1.download(img1_cpu);
	img2.download(img2_cpu);
	cv::drawMatches(img1_cpu, keypoints1, img2_cpu, keypoints2, good_matches, img_matches);
	cv::namedWindow("matches", 0);
	cv::imshow("matches", img_matches);
	cv::waitKey(0);

	return 0;
}
Esempio n. 8
0
bool compareTwoImageTolerance(const FileName &fn1, const FileName &fn2, double tolerance, size_t index1, size_t index2)
{
    double min_val, max_val, avg, stddev;
    Image<double> img1;
    Image<double> img2;
    if (index1)
        img1.read(fn1, DATA, index1);
    else
        img1.read(fn1);
    if (index2)
        img2.read(fn2, DATA, index2);
    else
        img2.read(fn2);
    img1() -= img2();
    img1().computeStats(avg, stddev, min_val, max_val);
    //return true if equal, false if different
    return !(abs(max_val - avg) > tolerance || abs(avg - min_val) > tolerance);
}
Esempio n. 9
0
// Main procedure
//---------------
int main(int argc,char **argv) {

  // Read and check command line parameters.
  cimg_usage("Compute a linear fading between two 2D images");
  const char *file_i1 = cimg_option("-i1",cimg_imagepath "sh0r.pgm","Input Image 1");
  const char *file_i2 = cimg_option("-i2",cimg_imagepath "milla.bmp","Input Image 2");
  const char *file_o  = cimg_option("-o",(char*)0,"Output Image");
  const bool visu     = cimg_option("-visu",true,"Visualization mode");
  const double pmin   = cimg_option("-min",40.0,"Begin of the fade (in %)")/100.0;
  const double pmax   = cimg_option("-max",60.0,"End of the fade (in %)")/100.0;
  const double angle  = cimg_option("-angle",0.0,"Fade angle")*cil::cimg::PI/180;

  // Init images.
  cil::CImg<unsigned char> img1(file_i1), img2(file_i2);
  if (!img2.is_sameXYZC(img1)) {
    int
      dx = std::max(img1.width(),img2.width()),
      dy = std::max(img1.height(),img2.height()),
      dz = std::max(img1.depth(),img2.depth()),
      dv = std::max(img1.spectrum(),img2.spectrum());
    img1.resize(dx,dy,dz,dv,3);
    img2.resize(dx,dy,dz,dv,3);
  }
  cil::CImg<unsigned char> dest(img1);

  // Compute the faded image.
  const double ca = std::cos(angle), sa = std::sin(angle);
  double alpha;
  cimg_forXYZC(dest,x,y,z,k) {
    const double X = ((double)x/img1.width() - 0.5)*ca + ((double)y/img1.height() - 0.5)*sa;
    if (X + 0.5<pmin) alpha = 0; else {
      if (X + 0.5>pmax) alpha = 1; else
        alpha = (X + 0.5 - pmin)/(pmax - pmin);
    }
    dest(x,y,z,k) = (unsigned char)((1 - alpha)*img1(x,y,z,k) + alpha*img2(x,y,z,k));
  }

  // Save and exit
  if (file_o) dest.save(file_o);
  if (visu) dest.display("Image fading");
  return 0;
}
Esempio n. 10
0
AutoCorr::AutoCorr(Texture *t)
{
    //convert texture to cv::Mat
    cv::Mat img1(cv::Size(t->m_width, t->m_height), CV_MAKETYPE(t->m_numBytesPerChan * 8, t->m_numChannels), t->m_data);
    //make input gray scale
    cv::Mat img;
    cv::cvtColor(img1, img, CV_RGB2GRAY);

    m_image = img1;
    autocorrDFT(img, m_autocorr);

}
Esempio n. 11
0
TEST_F(for_each_channel_accumulate_test, should_include_the_initial_value)
{
    rgb16_image_t img1(1, 1), img2(1, 1);

    view(img1)(0, 0) = { 1, 0, 0 };
    view(img2)(0, 0) = { 1, 0, 0 };

    ASSERT_EQ(2 + 8, for_each_channel_accumulate(const_view(img1), const_view(img2), 8, [&](channel16_t p1, channel16_t p2)
    {
        return p1 + p2;
    }));
}
Esempio n. 12
0
void TextureSplatting::onPluginLoad()
{
  // VS
  m_pVs = new QGLShader(QGLShader::Vertex, this);
  m_pVs->compileSourceFile(":/splat.vert");
  
  // FS
  m_pFs = new QGLShader(QGLShader::Fragment, this);
  m_pFs->compileSourceFile(":/splat.frag");
  
  // Program  
  m_pProgram = new QGLShaderProgram(this);
  m_pProgram->addShader(m_pVs);
  m_pProgram->addShader(m_pFs);
  m_pProgram->link();
  
  // Load Texture 1
  glActiveTexture(GL_TEXTURE0);
  QString filename = QFileDialog::getOpenFileName(0, "Open Image", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
  QImage img0(filename);	
  QImage im0 = QGLWidget::convertToGLFormat(img0);
  glGenTextures( 1, &m_textureId0);
  glBindTexture(GL_TEXTURE_2D, m_textureId0);
  glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im0.width(), im0.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im0.bits());
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glBindTexture(GL_TEXTURE_2D, 0);
  
  // Load Texture 2
  glActiveTexture(GL_TEXTURE1);
  QString filename2 = QFileDialog::getOpenFileName(0, "Open Image 2", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
  QImage img1(filename2);	
  QImage im1 = QGLWidget::convertToGLFormat(img1);
  glGenTextures( 1, &m_textureId1);
  glBindTexture(GL_TEXTURE_2D, m_textureId1);
  glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im1.width(), im1.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im1.bits());
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glBindTexture(GL_TEXTURE_2D, 0);
  
  // Load Texture 3
  glActiveTexture(GL_TEXTURE3);
  QString filename3 = QFileDialog::getOpenFileName(0, "Open Image 3", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
  QImage img2(filename3);	
  QImage im2 = QGLWidget::convertToGLFormat(img2);
  glGenTextures( 1, &m_textureId2);
  glBindTexture(GL_TEXTURE_2D, m_textureId2);
  glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im2.width(), im2.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im2.bits());
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
  glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
  glBindTexture(GL_TEXTURE_2D, 0);
}
Esempio n. 13
0
TEST_F(for_each_channel_accumulate_test, should_call_functor_for_each_channel_in_parallel_in_both_images_and_sum_the_results)
{
    rgb16_image_t img1(1, 1);
    rgb8_image_t img2(1, 1);

    view(img1)(0, 0) = { 1025, 18, 36 };
    view(img2)(0, 0) = { 1, 2, 4 };

    ASSERT_EQ(1024 + 16 + 32, for_each_channel_accumulate(const_view(img1), const_view(img2), 0, [&](channel16_t p1, channel8_t p2)
    {
        return p1 - p2;
    })) << "should sum all results";
}
Esempio n. 14
0
void TextureSplatting::onPluginLoad(){
	// VS + FS
  vs=new QGLShader(QGLShader::Vertex, this);
  vs->compileSourceFile("/home/llop/Llop/FIB/2015-2016QT/G/Lab/NewViewer/plugins/texture-splatting/texture-splatting.vert");
  fs=new QGLShader(QGLShader::Fragment, this);
  fs->compileSourceFile("/home/llop/Llop/FIB/2015-2016QT/G/Lab/NewViewer/plugins/texture-splatting/texture-splatting.frag");
  
	// Program  
	program=new QGLShaderProgram(this);
  program->addShader(vs);
	program->addShader(fs);
	program->link();

	// Load noise
	glActiveTexture(GL_TEXTURE0);
	QString noiseFilename = QFileDialog::getOpenFileName(0, "Open Noise Image", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
	QImage img3(noiseFilename);	
	QImage im3=QGLWidget::convertToGLFormat(img3);
	glGenTextures( 1, &noiseId);
	glBindTexture(GL_TEXTURE_2D, noiseId);
	glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im3.width(), im3.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im3.bits());
	glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
	glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
	glBindTexture(GL_TEXTURE_2D, 0);

	// Load Texture 1
	glActiveTexture(GL_TEXTURE0);
	QString filename = QFileDialog::getOpenFileName(0, "Open Rock Image", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
	QImage img0(filename);	
	QImage im0 = QGLWidget::convertToGLFormat(img0);
	glGenTextures( 1, &textureId0);
	glBindTexture(GL_TEXTURE_2D, textureId0);
	glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im0.width(), im0.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im0.bits());
	glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
	glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
	glBindTexture(GL_TEXTURE_2D, 0);

	// Load Texture 2
	glActiveTexture(GL_TEXTURE1);
	QString filename2 = QFileDialog::getOpenFileName(0, "Open Grass Image", "/assig/grau-g/Textures", "Image file (*.png *.jpg)");	
	QImage img1(filename2);	
	QImage im1 = QGLWidget::convertToGLFormat(img1);
	glGenTextures( 1, &textureId1);
	glBindTexture(GL_TEXTURE_2D, textureId1);
	glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, im1.width(), im1.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, im1.bits());
	glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
	glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
	glBindTexture(GL_TEXTURE_2D, 0);
}
Esempio n. 15
0
TEST_F(for_each_channel_accumulate_test, should_call_functor_for_each_pixel_in_parallel_in_both_images)
{
    rgb16_image_t img1(3, 2), img2(3, 2);
    auto v1 = view(img1), v2 = view(img2);

    v1(0, 0) = { 1, 0, 0 }; v1(1, 0) = { 2, 0, 0 };  v1(2, 0) = { 4, 0, 0 };
    v1(0, 1) = { 8, 0, 0 }; v1(1, 1) = { 16, 0, 0 }; v1(2, 1) = { 32, 0, 0 };
    v2(0, 0) = { 64, 0, 0 };  v2(1, 0) = { 128, 0, 0 };  v2(2, 0) = { 256, 0, 0 };
    v2(0, 1) = { 512, 0, 0 }; v2(1, 1) = { 1024, 0, 0 }; v2(2, 1) = { 2048, 0, 0 };

    ASSERT_EQ(4095, for_each_channel_accumulate(const_view(img1), const_view(img2), 0, [&](channel16_t p1, channel16_t p2)
    {
        return p1 + p2;
    })) << "should call functor for each pixel";
}
Esempio n. 16
0
int main(void)
{
	GetHistogram gh;
	ObjectFinder of;
	cv::Mat img1, img2;
	cv::Mat imgROI;
	cv::Mat histogram;
	cv::Mat backProject;

	img1 = cv::imread("..\\..\\..\\source\\1.jpg");
	if(!img1.data)
		return 0;

	imgROI = img1(cv::Rect(80,300,35,40));
	cv::rectangle(img1,cv::Rect(110,260,35,40),cv::Scalar(0,0,255));
	cv::namedWindow("img1");
	cv::imshow("img1", img1);
	//获得imgROI的直方图
	histogram = gh.getHueHistogram(imgROI, 65);
	cv::normalize(histogram,histogram,1.0);
	of.setHistogram(histogram);
	//获取并处理第二幅图像
	cv::Mat hsv; 
	std::vector<cv::Mat> hue;
	img2 = cv::imread("..\\..\\..\\source\\2.jpg");
	cv::cvtColor(img2, hsv, CV_BGR2HSV);
	cv::split(hsv, hue);
	cv::threshold(hue[1], hue[1], 65, 255, cv::THRESH_BINARY);//这个是为了将低饱和度的图像从反向投影结果中剔除
	//对于第二幅图进行反向投影,使用第一幅图ROI的直方图
	backProject = of.finder(img2);
	cv::bitwise_and(backProject, hue[1], backProject);
	cv::namedWindow("backProject");
	cv::imshow("backProject",backProject);
	//均值漂移
	cv::Rect rect(110,260,35,40);
	cv::rectangle(img2, rect, cv::Scalar(0,0,255));
	cv::TermCriteria criteria(cv::TermCriteria::MAX_ITER,10,0.01);//最大迭代次数是10,移动距离阈值是0.01
	cv::meanShift(backProject,rect,criteria);
	cv::rectangle(img2, rect, cv::Scalar(0,255,0));
	cv::namedWindow("img2");
	cv::imshow("img2",img2);

	cv::waitKey(0);
	return 0;
}
Esempio n. 17
0
//Made templates for test calibration
void CalibrationMain::on_MakeTemplateB_clicked()
{
    QGraphicsScene * temp1 = new QGraphicsScene;
    QGraphicsScene * temp2 = new QGraphicsScene;
    QFileInfo ourFile(currentFile);
    QString targetFile =  QApplication::applicationDirPath() + "/calibrator/templates/" + ourFile.baseName() + ".gif";
    QString targetFile2 =  QApplication::applicationDirPath() + "/calibrator/templates/" + ourFile.baseName() + "m.gif";

    temp1->setBackgroundBrush(QBrush(Qt::black));
    temp2->setBackgroundBrush(QBrush(Qt::white));

    for(int i=0; i<10; i++)
        for(int j=0; j<10; j++)
        {
            if(framesX[i][j].used)
            {
                temp1->addRect(framesX[i][j].offsetX + 100*i, framesX[i][j].offsetY + 100 * j,
                              framesX[i][j].W-1, framesX[i][j].H-1, QPen(Qt::yellow, 1),Qt::transparent);
                temp2->addRect(framesX[i][j].offsetX + 100*i, framesX[i][j].offsetY + 100 * j,
                              framesX[i][j].W-1, framesX[i][j].H-1, QPen(Qt::black, 1),Qt::transparent);
            }
        }

    QImage img1(1000,1000,QImage::Format_ARGB32_Premultiplied);
    QImage img2(1000,1000,QImage::Format_ARGB32_Premultiplied);
    QPainter p1(&img1);
    QPainter p2(&img2);
    temp1->render(&p1, QRectF(0,0,1000,1000),QRectF(0,0,1000,1000));
    temp2->render(&p2, QRectF(0,0,1000,1000),QRectF(0,0,1000,1000));
    p1.end();
    p2.end();

    QApplication::setOverrideCursor(Qt::WaitCursor);
    Graphics::toGif(img1, targetFile);
    Graphics::toGif(img2, targetFile2);
    //img1.save(targetFile);
    //img2.save(targetFile2);
    QApplication::restoreOverrideCursor();

    QMessageBox::information(this, tr("Saved"), tr("Sprite drawing templates saved in:\n")
                             +targetFile+"\n"+targetFile2);

}
Esempio n. 18
0
void tst_QVolatileImage::sharing()
{
    QVolatileImage img1(100, 100, QImage::Format_ARGB32);
    QVolatileImage img2 = img1;
    img1.beginDataAccess();
    img2.beginDataAccess();
    QVERIFY(img1.constBits() == img2.constBits());
    img2.endDataAccess();
    img1.endDataAccess();
    img1.imageRef(); // non-const call, should detach
    img1.beginDataAccess();
    img2.beginDataAccess();
    QVERIFY(img1.constBits() != img2.constBits());
    img2.endDataAccess();
    img1.endDataAccess();

    // toImage() should return a copy of the internal QImage.
    // imageRef() is a reference to the internal QImage.
    QVERIFY(img1.imageRef().constBits() != img1.toImage().constBits());
}
Esempio n. 19
0
// 버튼 이미지를 설정한다.
void cBitmap2ButtonEx::SetButton2Bitmap(const wxString &fileName)
{
	if (fileName.IsEmpty())
		return;

	const wxString ext = GetFileExt();

	wxImage img1(fileName + _("_0") + ext);
	wxImage img2(fileName + _("_1") + ext);
	if (!img1.IsOk() || !img2.IsOk())
		return;

	m_btnImage[BTN3_STATE::NORMAL] = wxBitmap(img1);
	m_btnImage[BTN3_STATE::PRESSED] = wxBitmap(img1);
	m_btnImage[BTN3_STATE::HOVER] = wxBitmap(img2);

	Refresh();
	Layout();
	Fit();
}
Esempio n. 20
0
Mat oneStr2oneImg (const string& text, int fontFace, int thickness, Scalar color)
{
	double fontScale = 1;
	int baseline = 0;
	Size textSize = getTextSize(text, fontFace,fontScale, thickness, &baseline);
	// scale to fit Height = 1000;
	fontScale *= 1000 / textSize.height;
	baseline += thickness;
	textSize = getTextSize(text, fontFace,fontScale, thickness, &baseline);
	Mat img1(textSize.height*1.1, textSize.width*1.2, CV_8UC3, Scalar(0,0,0));
	// center the text
	Point textOrg((img1.cols - textSize.width)/2,(img1.rows + textSize.height)/2);
	Mat img(textSize.height*1.5, textSize.width*1.2, CV_8UC3, Scalar(0,0,0));
	// then put the text itself
	putText(img, text, textOrg, fontFace, fontScale,color, thickness, 8);
	/*
	namedWindow( "gg window", WINDOW_AUTOSIZE );
	imshow( "gg window", img );
	waitKey(0);
	*/
	return img;
}
Esempio n. 21
0
//--------------------------------------------------------------
void testApp::setup()
{
	ofImage imageOf1, imageOf2;			//Load openFrameworks' images
	imageOf1.loadImage("crater1.png");
	imageOf2.loadImage("crater2.png");

	color1.setFromPixels( imageOf1 );	//Convert to ofxCv images
	color2.setFromPixels( imageOf2 );

	float decimate = 0.3;              //Decimate images to 30%
	ofxCvColorImage imageDecimated1;
	imageDecimated1.allocate( color1.width * decimate, 
                          color1.height * decimate );
	//High-quality resize
	imageDecimated1.scaleIntoMe( color1, CV_INTER_AREA );
	gray1 = imageDecimated1;

	ofxCvColorImage imageDecimated2;
	imageDecimated2.allocate( color2.width * decimate,
		                      color2.height * decimate );
	//High-quality resize
	imageDecimated2.scaleIntoMe( color2, CV_INTER_AREA );
	gray2 = imageDecimated2;
	

	Mat img1( gray1.getCvImage() );  //Create OpenCV images
	Mat img2( gray2.getCvImage() );
	Mat flow;                        //Image for flow
	//Computing optical flow
	  calcOpticalFlowFarneback( img1, img2, flow, 0.7, 3, 11, 5, 5, 1.1, 0 );
	//Split flow into separate images
	vector<Mat> flowPlanes;
	split( flow, flowPlanes );
	//Copy float planes to ofxCv images flowX and flowY
	IplImage iplX( flowPlanes[0] );
	flowX = &iplX;
	IplImage iplY( flowPlanes[1] );
	flowY = &iplY;

	//--------------------------------------------------------------------------
	//ATTENTION: Lines flowX = &iplX; and flowY = &iplY; can raise runtime error, 
	//caused by small bug in ofxOpenCV. 
	//So before running the example, fix it, as it described in testApp.h file
	//--------------------------------------------------------------------------

	w = gray1.width;
	h = gray1.height;

	//Flow image
	planeX = flowX;
	planeY = flowY;

	//create idX, idy
	idX.allocate( w, h );
	idY.allocate( w, h );
	for (int y=0; y<h; y++) {
		for (int x=0; x<w; x++) {
			idX.getPixelsAsFloats()[ x + w * y ] = x;
			idY.getPixelsAsFloats()[ x + w * y ] = y;
		}
	}

	//Load checkerboard image
	ofImage imageTest;
	imageTest.loadImage("checkerBoard.png");
	colorTest.setFromPixels( imageTest );

	//Make morphing at first time
	morphValue = 0;
	morphImageIndex = 1;
	updateMorph( morphValue, morphImageIndex );
}
Esempio n. 22
0
vector<Rect> visionUtils::segmentLineBoxFit(Mat img0, int minPixelSize, int maxSegments, Mat *returnMask, std::vector<std::vector<cv::Point> > *returnContours, vector<RotatedRect> *rotatedBoundingBox, bool displayFaces)
{
    // Segments items in gray image (img0)
    // minPixelSize= pixels, threshold for removing smaller regions, with less than minPixelSize pixels
    // 0, returns all detected segments
    // maxSegments = max no segments to return, 0 = all
    RNG rng(12345);


    int padPixels=15;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;
    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));

    // find the contours
    std::vector<std::vector<cv::Point> > contours;
    vector<Vec4i> hierarchy;

    findContours(img1, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented region
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC3);

    vector<double> areas(contours.size());

    // Case for using minimum pixel size
    Vec4f lines;
    Scalar color;

    // sort contours
    std::sort(contours.begin(), contours.end(), compareContourAreas);

    // grab contours
    vector<Rect> boundingBox;

    // LB testing
    vector<RotatedRect> tempRotatedBoundingBox;

    std::vector<std::vector<cv::Point> > tempReturnContours;
    int maxIterations = 0;

    if( contours.size() > 0 )
    {
        if (maxSegments==0)// return all contours..
            maxIterations = contours.size();
        else if((int)contours.size() >= maxSegments)
            maxIterations = maxSegments;
        else
            maxIterations = 1;    // LB: need to check this is correct!
        int contourCount=0;

        for (int j = 1; j < maxIterations+1; j++)
        {
            int i = contours.size()-j;
            if (contourArea(Mat(contours[i]))>minPixelSize)
            {
                // Fit rotated rect to contour
                tempRotatedBoundingBox.push_back(minAreaRect( Mat(contours[i]) ));

                Point2f rectCentre=tempRotatedBoundingBox[contourCount].center;
                rectCentre.x=rectCentre.x-padPixels;
                rectCentre.y=rectCentre.y-padPixels;
                tempRotatedBoundingBox[contourCount].center=rectCentre;

                // Find line limits....
                boundingBox.push_back(boundingRect(Mat(contours[i])));

                // Remove edge padding effects....
                boundingBox[contourCount].x=boundingBox[contourCount].x-padPixels;
                boundingBox[contourCount].y=boundingBox[contourCount].y-padPixels;
                boundingBox[contourCount]=checkRoiInImage(img0, boundingBox[contourCount]);

                contourCount++;

                tempReturnContours.push_back(contours[i]);
            }
        }
        // Return contours
        returnContours->resize(tempReturnContours.size());
        *returnContours = tempReturnContours;
        // Return rotated rects
        rotatedBoundingBox->resize(tempRotatedBoundingBox.size());
        *rotatedBoundingBox = tempRotatedBoundingBox;

        // normalize so imwrite(...)/imshow(...) shows the mask correctly!
        cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);
        // To Remove border added at start...
        *returnMask=mask(tempRect);
        // show the images
        if (displayFaces)   imshow("Seg line utils: Img in", img0);
        if (displayFaces)   imshow("Seg line utils: Mask", *returnMask);
        if (displayFaces)   imshow("Seg line utils: Output", img1);
    }
    return boundingBox;
}
int main(int argc, char * argv[])
{
    system("bash dataprep.sh");

    #pragma omp parallel for
    for(int n=1; n<argc; n++)
    {
        Mat srcImg=imread(argv[n],CV_LOAD_IMAGE_GRAYSCALE);
        Mat img=imread(argv[n],CV_LOAD_IMAGE_COLOR);
        string str=argv[n];
        cout<<"\n\n processing the image: "<<str<<endl;
        if (!srcImg.data)
        {
            cout<<"failed to load the image!\n";
            // return 0;
            continue;
        }

        for(int i=0; i< srcImg.rows; i++)
        {
            for(int j=0; j<5; j++)
            {
                srcImg.at<uchar>(i,j)=uchar(0);
                srcImg.at<uchar>(i,srcImg.cols-j-1)=uchar(0);
            }
        }

        for(int i=0; i<srcImg.cols; i++)
        {
            for(int j=0; j<5; j++)
            {
                srcImg.at<uchar>(j,i)=uchar(0);
                srcImg.at<uchar>(srcImg.rows-j-1,i)=uchar(0);
            }
        }

        //detect lsd lines in an input image;
        int cols=srcImg.rows;
        int rows=srcImg.cols;
        double*  lsd_srcImg=new double[cols*rows];
        for (int i=0; i<cols; i++)
        {
            for (int j=0; j<rows; j++)
            {
                lsd_srcImg[i+j*cols]=static_cast<double>(srcImg.at<uchar>(i,j));
            }
        }
        double* lsd_dstImg;
        int n=0;
        lsd_dstImg=lsd(&n,lsd_srcImg,cols,rows);

        cout<<"finished the lsd detection!\n";
        vector<LSDline> lsdLine;
        for (int i=0; i<n; i++)
        {
            LSDline lsdLine_tmp;
            lsdLine_tmp.lineBegin.y=lsd_dstImg[i*7+0];
            lsdLine_tmp.lineBegin.x=lsd_dstImg[i*7+1];
            lsdLine_tmp.lineEnd.y=lsd_dstImg[i*7+2];
            lsdLine_tmp.lineEnd.x=lsd_dstImg[i*7+3];
            lsdLine_tmp.width=lsd_dstImg[i*7+4];
            lsdLine_tmp.p=lsd_dstImg[i*7+5];
            lsdLine_tmp.log_nfa=lsd_dstImg[i*7+6];
            lsdLine_tmp.tagBegin=1;
            lsdLine_tmp.tagEnd=1;
            cout<<lsdLine_tmp.lineBegin.x<<" "<<lsdLine_tmp.lineBegin.y<<" "<<lsdLine_tmp.lineEnd.x<<"  "<<lsdLine_tmp.lineEnd.y<<endl;
            float distThreshold=12;
            if(sqrt((lsdLine_tmp.lineBegin.x-lsdLine_tmp.lineEnd.x)*(lsdLine_tmp.lineBegin.x-lsdLine_tmp.lineEnd.x)+
                    (lsdLine_tmp.lineBegin.y-lsdLine_tmp.lineEnd.y)*(lsdLine_tmp.lineBegin.y-lsdLine_tmp.lineEnd.y))>distThreshold)
            {
                lsdLine.push_back(lsdLine_tmp);
            }
        }

        cout<<"the detected lsd lines' number is: "<<lsdLine.size()<<endl;
        //define the img1 to display the detected LSD lines and junctions;
        Mat img1(img.size(),CV_8UC3,Scalar::all(0));
        delete[] lsd_srcImg;


        displayLSDline(lsdLine,img1);
        //imwrite("img1.bmp",img1);

        vector<Ljunct> Jlist;
        vector<LsdJunction> lsdJunction;



        if(LSD2Junct(lsdLine,Jlist,lsdJunction,search_distance,img))
        {
            cout<<"transform successfully!\n";
        }
        else
        {
            cout<<"cannot form L-junctions from LSD lines!\n";
            //for processing, we also need to write the the detect result;
            char c='_';
            int name_end=str.find(c,0);
            string ori_name_tmp1=str.substr(7,name_end-7);
            // char* ch2=".bmp";
            // int location=str.find(ch2,0);
            // string ori_name_tmp1 = str.substr(7,location-7);
            string dst_tmp = "./DetectResultOri/"+ori_name_tmp1;
            string ori_name_tmp="./OrigImg/"+ori_name_tmp1;

            // Mat oriImg_tmp = imread(ori_name_tmp.c_str(),CV_LOAD_IMAGE_COLOR);
            // imwrite(dst_tmp,oriImg_tmp);
            string filestring_tmp="./dstFile/"+str.substr(srcImgDir.size(),str.size()-4)+".jpg.txt";
            ofstream file_out(filestring_tmp.c_str());
            if(!file_out.is_open())
            {
                cout<<"cannot open the txt file!\n";
            }
            string imageName=str.substr(srcImgDir.size(),str.size()-4)+".jpg";
            file_out<<imageName<<"\t"<<img.cols<<"\t"<<img.rows<<endl;

            continue;
        }
        //vector<string> code_string1;
        vector<Ljunct> Jlist_coding;
        vector<codeStringBoundingBox> code_string;
        code_string=encodingFromLsdJunction(lsdJunction, Jlist_coding,srcImg);
        classifyRoadMarking(code_string,srcImg);

        string str_tmp=str.substr(srcImgDir.size(),str.size());
        cout<<"!!!!!the Jlist_coding size is: "<<Jlist_coding.size()<<endl<<endl;
        
        displayLjunct(Jlist_coding,img1,str_tmp);

        DrawBoundingBox(code_string,img,str_tmp);

        //drawing the bounding box in original image;
        char c='_';
        int name_end=str.find(c,0);
      //  string ori_name=str.substr(7,name_end-7);
        
         char* ch=".bmp";
         int location=str.find(ch,0);
         cout<<"the find .bmp in "<<str<<" is in "<<location<<endl;
         string ori_name=str.substr(7,location-7);
        
         cout<<ori_name<<endl;
         string ori_img="./OrigImg/"+ori_name+".JPG";

         Mat oriImg=imread(ori_img.c_str(),CV_LOAD_IMAGE_COLOR);
         if(!oriImg.data)
         {
             cout<<"cannot load the original image!\n";
             //return 0;
             char ch;
             cin.get(ch);
             continue;
         }
         
        /*
        Point2f imgP1=Point2f(219,668);
        Point2f imgP2=Point2f(452,469);
        Point2f imgP3=Point2f(622,472);
        Point2f imgP4=Point2f(882,681);
        Point2f imgP5=Point2f(388,520);
        Point2f imgP6=Point2f(688,523);
        Point2f imgP7=Point2f(454,538);
        Point2f imgP8=Point2f(645,539);
        Point2f imgP9=Point2f(508,486);
        Point2f imgP10=Point2f(573,509);

        Point2f imgP[10]= {imgP1,imgP2,imgP3,imgP4,imgP5,imgP6,imgP7,imgP8,imgP9,imgP10};
        Point2f objP1=Point2f(250,900);
        Point2f objP2=Point2f(250,100);
        Point2f objP3=Point2f(800,100);
        Point2f objP4=Point2f(800,900);
        Point2f objP5=Point2f(250,550);
        Point2f objP6=Point2f(800,550);
        Point2f objP7=Point2f(400,625);
        Point2f objP8=Point2f(650,625);
        Point2f objP9=Point2f(450,300);
        Point2f objP10=Point2f(600,475);



        Point2f objP[10]= {objP1,objP2,objP3,objP4,objP5,objP6,objP7,objP8,objP9,objP10};
        */
        vector<Point2f> imgP;
        imgP.push_back(Point2f(300,450));
        imgP.push_back(Point2f(700,450));
        imgP.push_back(Point2f(465,450));
        imgP.push_back(Point2f(535,450));
        imgP.push_back(Point2f(260,820));
        imgP.push_back(Point2f(740,820));

        vector<Point2f> objP;
        objP.push_back(Point2f(0,0));
        objP.push_back(Point2f(1000,0));
        objP.push_back(Point2f(400,0));
        objP.push_back(Point2f(600,0));
        objP.push_back(Point2f(400,1000));
        objP.push_back(Point2f(600,1000));

        //Mat H=getPerspectiveTransform(objP,imgP);
        Mat H=findHomography(objP,imgP,CV_RANSAC);
        DrawBoundingBox_Ori(code_string,oriImg,ori_name,H,str_tmp);
    }
    return 0;
}
Esempio n. 24
0
int main(int argc, char** argv)
{


    Point2f cp;
    cv::initModule_nonfree();

    // Read the VIDEO
    VideoCapture cap("video1.avi");
    if( !cap.isOpened() )
    { cout << "Could not initialize capturing...\n"; return 0;}

    //Initialize Video Writer
    //writeOut.open("MStrack_3.avi", CV_FOURCC('M', 'J', 'P', 'G'), 15, Size(640,480), 1 );

    cv::SURF mySURF;    mySURF.extended = 0;
    Ptr<FeatureDetector> detector = FeatureDetector::create( "SURF"); // SURF,SIFT ,MSER
    Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( "SURF" );
    Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "FlannBased" ); // FlannBased , BruteForce
    int matcherFilterType = getMatcherFilterType( "CrossCheckFilter" );

    // Get the first frame and select the ROI to be tracked in the subsequent frames
    Mat frame, img1, img2;
    cap >> frame;

    if( frame.empty() )
        return -1;
    else
        img1 = frame.clone() ;

    Mat temp = img1.clone() ;

    if(img1.empty())
    {
        cout << "Exiting as the input image is empty" << endl;
        exit(-1);
    }

    const char* name = "Initiate_ROI";
    box = cvRect(-1,-1,0,0);


    cvNamedWindow( name );

    // Set up the callback
    cvSetMouseCallback( name, my_mouse_callback);

    // Wait until ROI is selected by the user
    while( 1 )
    {
        img1.copyTo(temp);

        if( drawing_box )
            draw_box( temp, box );
        cv::imshow(name,temp) ;

        cvWaitKey( 15 );
        if(rect_drawn)
            break;
    }

    // storing the initial selected Box, as "box" variable changes in consecutive matching
    boxOrg = box;

    Mat img1ROI, labels1, clusters1, descriptors1, descriptors2;
    vector<int> reprojections; // number of reprojections per keypoint, size same as keypoint (increasing)
    vector<KeyPoint> keypoints1, keypoints2;

    //human aspect ratio (not used)
    double aspectRatio = (double)box.width / box.height;


    // Compute SURF features within the *selected* ROI

    img1ROI = img1(boxOrg);
    mySURF.detect(img1ROI, keypoints1 );
    mySURF.compute(img1ROI, keypoints1, descriptors1 );


    // Create a Template Pool that contains both descriptors as well as Keypoints (local & Global)

    Mat tpDescriptors;
    vector<KeyPoint> tpKeypoints;
    vector<float> tpWeights;

   int tpMaxSize = 1000;

    //Initially copy of the descriptor of Ist image ROI into it.
    descriptors1.copyTo(tpDescriptors);
    tpWeights.resize(tpDescriptors.rows,2.0); // Initial values of all weights is 2.0

    for(uint i = 0; i < keypoints1.size(); ++i)
        tpKeypoints.push_back(keypoints1.at(i));


    //==========================================
    // Main Iteration Loop starts here : Tracking
    //============================================

    int MP, count;
    struct timeval t1, t2;
    //Rect msBox; // Box obtained from mean-shift tracker

    // Loop over all images
    for(int k=1;;k++) //int i=2;i<1002;i+=1)
    {
        gettimeofday(&t1, NULL);

        //create clusters in the SURF descriptor space
        // clusters are created in the template pool
        cv::kmeans(tpDescriptors, NOC, labels1,
                   TermCriteria( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 50, 1.0 ), 1,
                   /*KMEANS_PP_CENTERS*/KMEANS_RANDOM_CENTERS, clusters1);


        // img1 - source image
        // img2 - Destination image
        // Mean-shift algorithm returns the window on Destination image given the source ROI in boxOrg.


        //Capture a New frame
        cap >> frame;
        if( frame.empty() )
            return -1;
        else
            img2 = frame.clone() ;

        temp = img2.clone();

        if(img2.empty() )
        {
            cout<< "Could not open image: " << img2 << endl ;
            //continue ;
            exit(-1);
        }

        int flag=1;  // what is this flag ??
        MP=0; count=0;  // ??

        //Call the mean-shift tracker
        vector<int> queryIdxs, trainIdxs;
        meanShift(img1, img2, descriptorMatcher, matcherFilterType, keypoints1, descriptors1,
                  keypoints2, descriptors2, clusters1, reprojections,  cp, flag, count, MP,
                  temp, queryIdxs, trainIdxs);



        DivideAndComputeScaling(img1, img2);

//        box.height = (int)(scaleValue * box.height);
//        box.width = (int)(aspectRatio * box.height);
//        box.x = cp.x - box.width/2.0;
//        box.y = cp.y - box.height/2.0;

        //cout << "Scale Value = " << scaleValue << endl;


         // Add the target ROI descriptors into the template pool.
         for(int i=0;i< descriptors2.rows;i++)
         {
             tpDescriptors.push_back(descriptors2.row(i));
             tpKeypoints.push_back(keypoints2.at(i));
         }

         // If the size of template pool exceeds max size, remove that many number of points from top
         Mat tempMat;
         if(tpDescriptors.rows > tpMaxSize)
         {
             //cout << "Time to Truncate Template Pool" << endl;
             uint dLength = tpDescriptors.rows - tpMaxSize;
             tempMat = tpDescriptors.rowRange(Range(dLength, tpDescriptors.rows));
             tpKeypoints.erase(tpKeypoints.begin(), tpKeypoints.begin()+dLength);

             //tpDescriptors.release(); tpDescriptors = tempMat;
             tpDescriptors = tempMat;
         }
         tempMat.release();
         //cout << "Template Pool size =" << tpDescriptors.rows << endl;

         // Current target image becomes the source image for the next iteration
         img1=img2.clone();
         boxOrg = box;

         // source descriptors and keypoints are taken from the template pool
         keypoints1 = tpKeypoints;
         descriptors1 = tpDescriptors;



        gettimeofday(&t2, NULL);
        double diff = (float)((t2.tv_sec * 1000000 + t2.tv_usec) - (t1.tv_sec * 1000000 + t1.tv_usec));
        diff = diff/1000;
        cout << k << "\tTime taken in mili sec \t" <<  diff<< endl;
        //f1 <<  k << "\t" << MP << "\t"   << count  << "\t"   << diff << "\n";



        cv::circle(temp, cp, 2, Scalar(0,255,255), 2);
        //=======================================


        imshow("main", temp);
        //imshow("img2", img2);


        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
        {
            cout << "Exiting ..." << endl;
            break;
        }

        waitKey(5);

    }
    return 0;
}
Esempio n. 25
0
int main(int argc, char** argv)
{
	Image img1 (argv[1]);
	img1.display("imagen 1");
	int size=img1.get_width()*img1.get_height()*img1.get_depth()*img1.get_spectrum();
	int *matrix= (int*)malloc(size*sizeof(int));
	int *mat_result= (int*)malloc(size*sizeof(int));
	Image result(img1.get_width(), img1.get_height(), img1.get_depth(), img1.get_spectrum(), 0); 
	int x,y,z,c, procs, id, local_size, i,*matrix_local, *result_local;
	i=0;
	//creamos una matriz de enteros a partir de la imagen

	double time;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &id);
	MPI_Comm_size(MPI_COMM_WORLD, &procs);
	time=MPI_Wtime();
	
	local_size=size/procs;
	
	//if(id==0)
	{
	
		for(c=0; c< img1.get_spectrum();++c)
		{
			for(z=0; z< img1.get_depth(); ++z)
			{	
				for(x=0; x<img1.get_width();++x)
				{					
					for(y=0; y< img1.get_height();++y)
					{	

						matrix[img1.get_height()*x+y+img1.get_height()*img1.get_width()*z+img1.get_height()*img1.get_width()*img1.get_depth()*c]=img1.get_pixel_value(x,y,z,c);
						
					}	
				}
			}
		}		
		//crea las matrices locales que debe tener cada thread
		matrix_local=(int*)malloc(local_size*sizeof(int));
		result_local=(int*)malloc(local_size*sizeof(int));
	}	
	
	MPI_Barrier(MPI_COMM_WORLD);
	cout<<local_size<<"   "<<size<<endl;
	MPI_Scatter(matrix, local_size , MPI_INT, matrix_local, local_size, MPI_INT, 0, MPI_COMM_WORLD);
	free(matrix);
	
	for(i=0; i<local_size; ++i)
	{
		result_local[i] = abs(255-matrix_local[i]);
	
	}
	free(matrix_local);
	MPI_Gather(result_local, local_size, MPI_INT, mat_result, local_size, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Barrier(MPI_COMM_WORLD);
	free(result_local);
	time=MPI_Wtime()-time;
	MPI_Finalize();
	// Esto lo único que hace es volver a construir la matriz

	for(c=0; c< img1.get_spectrum();++c)
	{
		for(z=0; z< img1.get_depth(); ++z)
		{	
			for(x=0; x<img1.get_width();++x)
			{					
				for(y=0; y< img1.get_height();++y)
				{	

					result.set_pixel_value(x,y,z,c,static_cast<unsigned char>(mat_result[img1.get_height()*x+y+img1.get_height()*img1.get_width()*z+img1.get_height()*img1.get_width()*img1.get_depth()*c]));
				}	
			}
		}
	}
	free(mat_result);
	cout<<"Tiempo de ejecución con "<<procs<<" procesadores: "<< time <<endl;
	result.display("disp");
	result.save("inversempi.jpg");
	return 0;
}
Esempio n. 26
0
int main(int argc, char** argv)
{
	// Call to a constructor of the class Image by giving it the name of the picture file.
	
	Image img1 ("../../Multimedia/huge.jpg"); // Image "__name__" ("__image_path__");
	Image img2 ("../../Multimedia/huge.jpg"); // Image "__name__" ("__image_path__");
	
	//It is possible to display the image created before:
	img1.display("imagen 1");
	img2.display("imagen 2");
	
	//OpenMPI works only with primitive types so we need to pass 
	//the images to a vector of integers or other type,
	// if we want to parallelize the operation.
	
	int size=img1.get_width()*img1.get_height()*img1.get_depth()*img1.get_spectrum(); //define the size of the image result
	
	//As the images could be huge, we need to use malloc to apart the block of memory of the vector. 
	//Note that to use MPI the blocks of memory needs to be continuous so we use malloc.
	int *matrix= (int*)malloc(size*sizeof(int)); //img1
	int *matrix2= (int*)malloc(size*sizeof(int)); //img2
	int *mat_result= (int*)malloc(size*sizeof(int)); //result matrix
	
	Image result(img1.get_width(), img1.get_height(), img1.get_depth(), img1.get_spectrum(), 0);  //This is the constructor of the result image
	
	//We need to declare the variables of the function, the num of processes to use, the id of each process and the local parts of the matrix
	int x,y,z,c, procs, id, local_size, i,*matrix_local, *matrix2_local, *result_local;
	i=0;

	double time;

	MPI_Init(&argc, &argv); //This function is used to initialize the parallel section
	MPI_Comm_rank(MPI_COMM_WORLD, &id);// This function obtains the identifier of each process
	MPI_Comm_size(MPI_COMM_WORLD, &procs); // This function obtains the number of processes that be used in the communicator.
	time=MPI_Wtime(); //This function measure the time of the execution
	
	//Set the local size of each chunk to be send to each process
	local_size=size/procs;

	//Fill the vector with the pixel values of the images.
		for(c=0; c< img1.get_spectrum();++c)
		{
			for(z=0; z< img1.get_depth(); ++z)
			{	
				for(x=0; x<img1.get_width();++x)
				{					
					for(y=0; y< img1.get_height();++y)
					{	

						matrix[img1.get_height()*x+y+img1.get_height()*img1.get_width()*z+img1.get_height()*img1.get_width()*img1.get_depth()*c]=img1.get_pixel_value(x,y,z,c);
						matrix2[img1.get_height()*x+y+img1.get_height()*img1.get_width()*z+img1.get_height()*img1.get_width()*img1.get_depth()*c]=img2.get_pixel_value(x,y,z,c);

					}	
				}
			}
		}		
		//Create the local matrices, and the local result
		matrix2_local=(int*)malloc(local_size*sizeof(int));
		matrix_local=(int*)malloc(local_size*sizeof(int));
		result_local=(int*)malloc(local_size*sizeof(int));
	}	
Esempio n. 27
0
Mat visionUtils::cannySegmentation(Mat img0, int minPixelSize, bool displayFaces)
{
    // Segments items in gray image (img0)
    // minPixelSize=
    // -1, returns largest region only
    // pixels, threshold for removing smaller regions, with less than minPixelSize pixels
    // 0, returns all detected segments


    // LB: Zero pad image to remove edge effects when getting regions....
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;

    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));


    if (useGPU)// converted to GPU -> NOT tested to speed up here!
    {
        GpuMat imgGPU;
        imgGPU.upload(img1);
#if CV_MAJOR_VERSION == 2
        gpu::Canny(imgGPU, imgGPU, 100, 200, 3); //100, 200, 3);
#elif CV_MAJOR_VERSION == 3
        cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(100, 200, 3);
        canny->detect(imgGPU, imgGPU);
#endif
        imgGPU.download(img1);
    }
    else
    {
        Canny(img1, img1, 100, 200, 3); //100, 200, 3);
    }


    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

    if (minPixelSize==-1)
    {   // Case of taking largest region
        for(int i = 0; i < (int)contours.size(); i++)
            areas[i] = contourArea(Mat(contours[i]));
        double max;
        Point maxPosition;
        cv::minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
        drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
    }
    else
    {   // Case for using minimum pixel size
        for (int i = 0; i < (int)contours.size(); i++)
        {
            if (contourArea(Mat(contours[i]))>minPixelSize)
                drawContours(mask, contours, i, Scalar(1), CV_FILLED);
        }
    }
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);

    Mat returnMask;
    returnMask=mask(tempRect);

    // show the images
    if (displayFaces)   imshow("Canny: Img in", img0);
    if (displayFaces)   imshow("Canny: Mask", returnMask);
    if (displayFaces)   imshow("Canny: Output", img1);

    return returnMask;
}
Esempio n. 28
0
void drawKdenliveTitle( producer_ktitle self, mlt_frame frame, int width, int height, double position, int force_refresh )
{
  	// Obtain the producer 
	mlt_producer producer = &self->parent;
	mlt_profile profile = mlt_service_profile ( MLT_PRODUCER_SERVICE( producer ) ) ;
	mlt_properties producer_props = MLT_PRODUCER_PROPERTIES( producer );

	// Obtain properties of frame
	mlt_properties properties = MLT_FRAME_PROPERTIES( frame );
        
        pthread_mutex_lock( &self->mutex );
	
	// Check if user wants us to reload the image
	if ( mlt_properties_get( producer_props, "_animated" ) != NULL || force_refresh == 1 || width != self->current_width || height != self->current_height || mlt_properties_get( producer_props, "_endrect" ) != NULL )
	{
		//mlt_cache_item_close( self->image_cache );
		self->current_image = NULL;
		mlt_properties_set_data( producer_props, "cached_image", NULL, 0, NULL, NULL );
		mlt_properties_set_int( producer_props, "force_reload", 0 );
	}
	
	if (self->current_image == NULL) {
		// restore QGraphicsScene
		QGraphicsScene *scene = static_cast<QGraphicsScene *> (mlt_properties_get_data( producer_props, "qscene", NULL ));

		if ( force_refresh == 1 && scene )
		{
			scene = NULL;
			mlt_properties_set_data( producer_props, "qscene", NULL, 0, NULL, NULL );
		}

		if ( scene == NULL )
		{
			int argc = 1;
			char* argv[1];
			argv[0] = (char*) "xxx";
			
			// Warning: all Qt graphic objects (QRect, ...) must be initialized AFTER 
			// the QApplication is created, otherwise their will be NULL
			
			if ( app == NULL ) {
				if ( qApp ) {
					app = qApp;
				}
				else {
#ifdef linux
					if ( getenv("DISPLAY") == 0 )
					{
						mlt_log_panic( MLT_PRODUCER_SERVICE( producer ), "Error, cannot render titles without an X11 environment.\nPlease either run melt from an X session or use a fake X server like xvfb:\nxvfb-run -a melt (...)\n" );
						pthread_mutex_unlock( &self->mutex );
						return;
					}
#endif
					app = new QApplication( argc, argv );				
					const char *localename = mlt_properties_get_lcnumeric( MLT_SERVICE_PROPERTIES( MLT_PRODUCER_SERVICE( producer ) ) );
					QLocale::setDefault( QLocale( localename ) );
				}
				qRegisterMetaType<QTextCursor>( "QTextCursor" );
			}
			scene = new QGraphicsScene();
			scene->setItemIndexMethod( QGraphicsScene::NoIndex );
                        scene->setSceneRect(0, 0, mlt_properties_get_int( properties, "width" ), mlt_properties_get_int( properties, "height" ));
			if ( mlt_properties_get( producer_props, "resource" ) && mlt_properties_get( producer_props, "resource" )[0] != '\0' )
			{
				// The title has a resource property, so we read all properties from the resource.
				// Do not serialize the xmldata
				loadFromXml( producer, scene, mlt_properties_get( producer_props, "_xmldata" ), mlt_properties_get( producer_props, "templatetext" ) );
			}
			else
			{
				// The title has no resource, all data should be serialized
				loadFromXml( producer, scene, mlt_properties_get( producer_props, "xmldata" ), mlt_properties_get( producer_props, "templatetext" ) );
			  
			}
			mlt_properties_set_data( producer_props, "qscene", scene, 0, ( mlt_destructor )qscene_delete, NULL );
		}
                
                QRectF start = stringToRect( QString( mlt_properties_get( producer_props, "_startrect" ) ) );
                QRectF end = stringToRect( QString( mlt_properties_get( producer_props, "_endrect" ) ) );
	
		int originalWidth = mlt_properties_get_int( producer_props, "_original_width" );
		int originalHeight= mlt_properties_get_int( producer_props, "_original_height" );
		const QRectF source( 0, 0, width, height );
		if (start.isNull()) {
		    start = QRectF( 0, 0, originalWidth, originalHeight );
		}

		// Effects
		QList <QGraphicsItem *> items = scene->items();
		QGraphicsTextItem *titem = NULL;
		for (int i = 0; i < items.count(); i++) {
		    titem = static_cast <QGraphicsTextItem*> ( items.at( i ) );
		    if (titem && !titem->data( 0 ).isNull()) {
			    QStringList params = titem->data( 0 ).toStringList();
			    if (params.at( 0 ) == "typewriter" ) {
				    // typewriter effect has 2 param values:
				    // the keystroke delay and a start offset, both in frames
				    QStringList values = params.at( 2 ).split( ";" );
				    int interval = qMax( 0, ( ( int ) position - values.at( 1 ).toInt()) / values.at( 0 ).toInt() );
				    QTextCursor cursor = titem->textCursor();
				    cursor.movePosition(QTextCursor::EndOfBlock);
				    // get the font format
				    QTextCharFormat format = cursor.charFormat();
				    cursor.select(QTextCursor::Document);
				    QString txt = params.at( 1 ).left( interval );
				    // If the string to insert is empty, insert a space / linebreak so that we don't loose
				    // formatting infos for the next iterations
				    int lines = params.at( 1 ).count( '\n' );
				    QString empty = " ";
				    for (int i = 0; i < lines; i++)
					    empty.append( "\n " );
				    cursor.insertText( txt.isEmpty() ? empty : txt, format );
				    if ( !titem->data( 1 ).isNull() )
					  titem->setTextWidth( titem->data( 1 ).toDouble() );
			    }
		    }
		}

		//must be extracted from kdenlive title
		QImage img( width, height, QImage::Format_ARGB32 );
		img.fill( 0 );
		QPainter p1;
		p1.begin( &img );
		p1.setRenderHints( QPainter::Antialiasing | QPainter::TextAntialiasing | QPainter::HighQualityAntialiasing );
		//| QPainter::SmoothPixmapTransform );
                mlt_position anim_out = mlt_properties_get_position( producer_props, "_animation_out" );

		if (end.isNull())
		{
			scene->render( &p1, source, start, Qt::IgnoreAspectRatio );
		}
		else if ( position > anim_out ) {
                        scene->render( &p1, source, end, Qt::IgnoreAspectRatio );
                }
		else {
                        double percentage = 0;
			if ( position && anim_out )
				percentage = position / anim_out;
			QPointF topleft = start.topLeft() + ( end.topLeft() - start.topLeft() ) * percentage;
			QPointF bottomRight = start.bottomRight() + ( end.bottomRight() - start.bottomRight() ) * percentage;
			const QRectF r1( topleft, bottomRight );
			scene->render( &p1, source, r1, Qt::IgnoreAspectRatio );
			if ( profile && !profile->progressive ){
				int line=0;
				double percentage_next_filed	= ( position + 0.5 ) / anim_out;
				QPointF topleft_next_field = start.topLeft() + ( end.topLeft() - start.topLeft() ) * percentage_next_filed;
				QPointF bottomRight_next_field = start.bottomRight() + ( end.bottomRight() - start.bottomRight() ) * percentage_next_filed;
				const QRectF r2( topleft_next_field, bottomRight_next_field );
				QImage img1( width, height, QImage::Format_ARGB32 );
				img1.fill( 0 );
				QPainter p2;
				p2.begin(&img1);
				p2.setRenderHints( QPainter::Antialiasing | QPainter::TextAntialiasing | QPainter::HighQualityAntialiasing );
				scene->render(&p2,source,r2,  Qt::IgnoreAspectRatio );
				p2.end();
				int next_field_line = (  mlt_properties_get_int( producer_props, "top_field_first" ) ? 1 : 0 );
				for (line = next_field_line ;line<height;line+=2){
						memcpy(img.scanLine(line),img1.scanLine(line),img.bytesPerLine());
				}

			}
		}
		p1.end();

		int size = width * height * 4;
		uint8_t *pointer=img.bits();
		QRgb* src = ( QRgb* ) pointer;
		self->current_image = ( uint8_t * )mlt_pool_alloc( size );
		uint8_t *dst = self->current_image;
	
		for ( int i = 0; i < width * height * 4; i += 4 )
		{
			*dst++=qRed( *src );
			*dst++=qGreen( *src );
			*dst++=qBlue( *src );
			*dst++=qAlpha( *src );
			src++;
		}

		mlt_properties_set_data( producer_props, "cached_image", self->current_image, size, mlt_pool_release, NULL );
		self->current_width = width;
		self->current_height = height;
	}

	pthread_mutex_unlock( &self->mutex );
	mlt_properties_set_int( properties, "width", self->current_width );
	mlt_properties_set_int( properties, "height", self->current_height );
}
Esempio n. 29
0
Mat skinDetector::cannySegmentation(Mat img0, int minPixelSize)
{
	// Segments items in gray image (img0)
	// minPixelSize=
	// -1, returns largest region only
	// pixels, threshold for removing smaller regions, with less than minPixelSize pixels
	// 0, returns all detected segments
	
    // LB: Zero pad image to remove edge effects when getting regions....	
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;
    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));
    
	// apply your filter
    Canny(img1, img1, 100, 200, 3); //100, 200, 3);

    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

	if (minPixelSize==-1)
	{ // Case of taking largest region
		for(int i = 0; i < contours.size(); i++)
			areas[i] = contourArea(Mat(contours[i]));
		double max;
		Point maxPosition;
		minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
		drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
	}
	else
	{ // Case for using minimum pixel size
		for (int i = 0; i < contours.size(); i++)
		{
			if (contourArea(Mat(contours[i]))>minPixelSize)
			drawContours(mask, contours, i, Scalar(1), CV_FILLED);

		}
	}
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);


    Mat returnMask;
    returnMask=mask(tempRect);
    
    
    // show the images
    if (verboseOutput)	imshow("Canny Skin: Img in", img0);
    if (verboseOutput)	imshow("Canny Skin: Mask", returnMask);
    if (verboseOutput)	imshow("Canny Skin: Output", img1);
    

    return returnMask;
}
Esempio n. 30
0
int main(int argc, char** argv)
{
    ofstream f1;
    f1.open("result.txt");
    size_t i,j;
    Point2f cp;
    cv::initModule_nonfree();
    vector<Point2f> MP1,MP2;
    vector<int> trainIdxs, queryIdxs;

    //Read Video File
    VideoCapture cap("video1.avi");
    if( !cap.isOpened() )
    { cout << "Could not initialize capturing...\n"; return 0;}



    VideoWriter writer("ms_tracking.avi",CV_FOURCC('D','I','V','3'),
                 10,cvSize(640,480),1);

    cv::SURF mySURF;    mySURF.extended = 0;
    Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "FlannBased" );
    int mactherFilterType = getMatcherFilterType( "CrossCheckFilter" );

    Mat frame,img1,img2;
    cap >> frame;
    if( frame.empty() )
        return -1;
    img1 = frame.clone() ;
    Mat temp,temp1;

    if(img1.empty())
        cout << "Exiting as the input image is empty" << endl;


    const char* name = "Initiate_ROI";
    box = cvRect(-1,-1,0,0);
    cvNamedWindow( name,1);
    cvSetMouseCallback( name, my_mouse_callback2);

    // Main loop
    while( 1 )
    {
        img1.copyTo(temp);

        if( drawing_poly)
        {

            for ( i=0; i < polyPoints.size(); i++)
                circle(temp, polyPoints[i], 2, Scalar(0,255,0), -1,8);
        }
        cv::imshow(name,temp) ;
        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
            break;
        if(poly_drawn)
            break;
    }

    //Read the polygon points from a text file

    FILE *f11;
    polyPoints.clear();
    IpolyPoints.clear();
    f11 = fopen("points.txt","r");
    Point a;
    for(int j=0;j<37;j++)
    {
        fscanf(f11,"%d",&(a.x));
        fscanf(f11,"%d",&(a.y));
        polyPoints.push_back(a);
        IpolyPoints.push_back(a);
    }
    fclose(f11);

    // Drawing Polygon
    Point pointArr[polyPoints.size()];
    for (i=0; i< polyPoints.size(); i++)
        pointArr[i] = polyPoints[i];
    const Point* pointsArray[1] = {pointArr};
    int nCurvePts[1] = { polyPoints.size() };
    polylines(temp, pointsArray, nCurvePts, 1, 1, Scalar(0,255,0), 1);

    cout << polyPoints.size() << endl;
    box= boundingRect(polyPoints);

   //boxOrg = Rect(box.x-15, box.y-15, box.width+30, box.height+30);
   boxOuter = Rect(box.x-30, box.y-30, box.width+60, box.height+60);
    //box =boxOrg; // storing the initial selected Box, as "box" variable changes in consecutive matching
    boxP=box;
    Mat img1ROI, labels1, clusters1, descriptors,roidescriptors, descriptors1,bdescriptors, bmdescriptors;
    vector<int> reprojections; // number of reprojections per KP, size same as KP(incresing)
    vector<Point2f> points,points1,points2, Mpoints1,Mpoints2,bpoints,npoints1,npoints2; //bmpoints,tpoints;
    vector<KeyPoint> roikeypoints, bkeypoints,keypoints,keypoints1, keypoints2;


    draw_box(temp, box ); //Show InnerBox  - This is used by the Mean-Shift Tracker
    draw_box(temp,boxOuter); //Show OuterBox - This is used for removing background points
    bpoints.clear();

    //calculating keypoints and descriptors of the selected polygon in image roi
    //==============================================================================================//
    for(i=0;i<polyPoints.size();i++)
    {
        // cout << polyPoints[i] << endl; //
        polyPoints[i].x = polyPoints[i].x -boxOuter.x;
        polyPoints[i].y = polyPoints[i].y- boxOuter.y;
    }

    img1ROI = img1(boxOuter);
    points1.clear();
    mySURF.detect(img1ROI, roikeypoints);
    KeyPoint::convert(roikeypoints, points);
    mySURF.compute(img1ROI, roikeypoints, roidescriptors);

    bdescriptors.release();bkeypoints.clear();
    bcategorizePoints( points, bpoints,polyPoints, roikeypoints, roidescriptors, bkeypoints, bdescriptors);
    shiftPoints(bpoints,boxOuter);
    for(i=0;i<bpoints.size();i++)
        circle(temp, bpoints[i], 2, Scalar(0,255,0),2);

  vector<KeyPoint> tpkeypoints;    Mat tpdescriptors;
    categorizePoints( points, points1,polyPoints, roikeypoints, roidescriptors, tpkeypoints, tpdescriptors);

    shiftPoints(points1, boxOuter);
    for(i=0;i<points1.size();i++)
        circle(temp, points1[i], 2, Scalar(0,0,255),2);
    //====================================================================================================//
    points1.clear();
    Mat img2ROI;

  //  tpkeypoints = keypoints1;    tpdescriptors = descriptors1;
    cv::imshow(name,temp) ;
    imwrite("a.jpg",temp);
    cout << "BD_SIZE \t" << bdescriptors.rows << "\t" << "FD_SIZE \t"  << tpdescriptors.rows << endl;


//    Mat newimg = img1ROI.clone();
//     KeyPoint::convert(tpkeypoints, points1);
//    for(size_t i=0;i<points1.size();i++)
//         circle(newimg, points1[i], 2, Scalar(255,0,255),2);

//     imshow( "newimg", newimg );
//    points1.clear();

    waitKey(0);
    cvDestroyWindow( name );


    int FG_mp, FG, BG_mp, BG, FG_BG, msI ; //Foreground matching points
    struct timeval t1, t2;

    for(int l=0;;l++)
    {
        gettimeofday(&t1, NULL);
        cv::kmeans(tpdescriptors, NOC, labels1, TermCriteria( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 50, 1.0 ), 1,
                   KMEANS_RANDOM_CENTERS, clusters1);

        cap >> frame;
        img2 = frame.clone() ;
        temp1 =frame.clone() ;

        if(img2.empty() )
        {
            cout<< "Could not open image: " << endl ;
            break;}

        int flag=1;
        Mpoints1.clear();
        Mat descriptors2;

        msI=0;

        meanShift(img1, img2, descriptorMatcher, mactherFilterType, tpkeypoints, tpdescriptors,keypoints2,descriptors2,
                  clusters1, cp, flag, MP1,img2ROI,bkeypoints, bdescriptors, temp1,FG_mp, FG, BG_mp, BG, FG_BG,msI);



        //==========scaling=================
        float scale=1;

       // cout <<"MP1size \t" << MP1.size() <<endl;

        if(APPLY_SCALING)
        {
            vector<DMatch> filteredMatches;

            if(descriptors1.rows > 4 && descriptors2.rows > 4)
            {
                crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );

                trainIdxs.clear();    queryIdxs.clear();

                for( i = 0; i < filteredMatches.size(); i++ )
                {
                    queryIdxs.push_back(filteredMatches[i].queryIdx);
                    trainIdxs.push_back(filteredMatches[i].trainIdx);
                }

                points1.clear(); points2.clear();
                KeyPoint::convert(keypoints1, points1, queryIdxs);
                KeyPoint::convert(keypoints2, points2, trainIdxs);
                //  cout << "point2size" << points2.size() << endl;

                //homography

                npoints1.clear();npoints2.clear();
                Mpoints1.clear();Mpoints2.clear();
                Mat H12, points1t;
                double ransacReprojThreshold = 10;
                if( ransacReprojThreshold >= 0  && points1.size() > 4)
                    H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold );
              vector<char> matchesMask( filteredMatches.size(), 0 );// NONmatchesMask( filteredMatches.size(), 0 );
                if( !H12.empty() )
               {

                    perspectiveTransform(Mat(points1), points1t, H12);

                    double maxInlierDist = 10;//ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;

                    for(i = 0; i < points1.size(); i++ )
                    {
                        if( norm(points2[i] - points1t.at<Point2f>((int)i,0)) <= 5)// maxInlierDist ) // inlier
                        {
                            matchesMask[i] = 1;
                            npoints2.push_back(points2[i]);
                            npoints1.push_back(points1[i]);
                        }
                    }



                    for(i=0; i<npoints2.size();i++)
                    {
                        for(j=0;j<MP1.size();j++)
                        {
                            double dist = norm(npoints2[i]-MP1[j]);
                            // cout <<"dist \t" <<dist << endl;
                            //  waitKey(0);
                            if(dist < 0.1)
                            {
                                Mpoints2.push_back(npoints2[i]);
                                Mpoints1.push_back(npoints1[i]);
                                break;
                            }

                        }
                    }



                }
                Mat drawImg;
                drawMatches( img1ROI, keypoints1, img2ROI, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
             #if DRAW_RICH_KEYPOINTS_MODE
                             , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
             #endif
                             );
                imshow( "correspondance", drawImg );
                cout << "npoints1.size \t" << Mpoints1.size() << "\t" << Mpoints2.size() << endl;
                if(Mpoints1.size() > 8)
                    weightScalingAspect(Mpoints1,Mpoints2,&scale);

            }

        }


        img1=img2;
        img1ROI = img2ROI;
        boxOrg =box;
        keypoints1 = keypoints2;
        descriptors1 =descriptors2;

        box.x += box.width/2;
        box.y += box.height/2;
        box.height = round(boxOrg.height *scale);
        box.width = round(( float(boxOrg.width)/float(boxOrg.height) ) * box.height);
        box.x -= box.width/2;
        box.y -= box.height/2;

        boundaryCheckRect(box);

        cout <<"SCALE \t" << scale << endl;

        gettimeofday(&t2, NULL);
       double diff = (float)((t2.tv_sec * 1000000 + t2.tv_usec) - (t1.tv_sec * 1000000 + t1.tv_usec));
       diff = diff/1000;
        cout <<"Time taken in mili sec \t" <<  diff<< endl;
       // cout << tpdescriptors.rows << endl;
        //cout <<"BD \t" << bdescriptors.rows << endl;
        f1 <<  l << "\t" << FG_mp << "\t"   << BG_mp  << "\t"   << FG   << "\t"<< msI << "\n";
        cout << "l \t" << l << "\t" <<" msI \t"<< msI << endl;
        imshow("img2",temp1);
        writer << temp1;
         waitKey(0);




       // boxOrg = eBox;

        char c = (char)waitKey(10);
        if( c == '\x1b' ) // esc
        {
            cout << "Exiting ..." << endl;
            break;
        }

    }
    trajectory.close();

    return 0;
}