void identifyObject( Mat& frame, Mat& object, const string& objectName ) {

    //Detect the keypoints using SURF Detector
    int minHessian = 500;
    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kp_object;
    detector.detect( object, kp_object );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;
    extractor.compute( object, kp_object, des_object );
    FlannBasedMatcher matcher;


    //Get the corners from the object
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );


    // Match descriptors to frame
    Mat des_image, img_matches;
    std::vector<KeyPoint> kp_image;
    std::vector<vector<DMatch > > matches;
    std::vector<DMatch > good_matches;
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;
    std::vector<Point2f> scene_corners(4);
    Mat H;
    Mat image;

    cvtColor(frame, image, CV_RGB2GRAY);

    detector.detect( image, kp_image );
    extractor.compute( image, kp_image, des_image );

    matcher.knnMatch(des_object, des_image, matches, 2);

    for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
    {
        if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
        {
            good_matches.push_back(matches[i][0]);
        }
    }

    //Draw only "good" matches
    drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    if (good_matches.size() >= 4)
    {
        for( int i = 0; i < good_matches.size(); i++ )
        {
            //Get the keypoints from the good matches
            obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
            scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
        }

        H = findHomography( obj, scene, CV_RANSAC );

        perspectiveTransform( obj_corners, scene_corners, H);

        //Draw lines between the corners (the mapped object in the scene image )
        line( frame, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4 );
        line( frame, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 4 );
        line( frame, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 4 );
        line( frame, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 4 );
    }

    //Show detected matches
    Point2f textPoint = cvPoint( (scene_corners[0].x+scene_corners[1].x+scene_corners[2].x+scene_corners[3].x )/4.0 , (scene_corners[0].y+scene_corners[1].y+scene_corners[2].y+scene_corners[3].y )/4.0 );
    putText( frame, objectName, textPoint, FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,250,150), 1, CV_AA );

}
Exemple #2
0
int main( int argc, char** argv )
{

    VideoCapture cap(0);
    if(!cap.isOpened())  // check camera
    {
        string message = "Camera is Broken";
        cout << message << endl;
        return -1;
    }

    Mat frame_1, frame_2, outpt, outpt_kp;
    std::vector<KeyPoint> keypoints_object_1, keypoints_object_2;

    int minHessian = 2000;
    SurfFeatureDetector detector( minHessian );

    namedWindow("frame",1);

    //take a snapshot from camera, as first image
    for(;;)
    {
        //show every frame with keypoints
        cap >> frame_1; // get a new frame from camera
        detector.detect( frame_1, keypoints_object_1 ); 
        drawKeypoints(frame_1, keypoints_object_1, outpt_kp, Scalar( 0, 255, 255 ), DrawMatchesFlags::DEFAULT );
        
        imshow("frame", outpt_kp);
        if(waitKey(30) >= 0) 
        {
            //save snapshot
            imwrite( "./test_img.jpg", frame_1);
            break;
        }
    }
    //and then load it as reference image
    Mat reference_image;
    reference_image = imread( "./test_img.jpg", 1 );
    detector.detect( reference_image, keypoints_object_1 );
    
    // detect keypoints offset on each frame   
    for(;;)
    {

        //detect keypoints
        cap >> frame_2; // get a new frame from camera
        detector.detect( frame_2, keypoints_object_2 );      

        SurfDescriptorExtractor extractor;
        cv::Mat descriptors1, descriptors2; 

        //kompute keypoints deskriptors
        extractor.compute(reference_image, keypoints_object_1, descriptors1);
        extractor.compute(frame_2, keypoints_object_2, descriptors2);

        //match keypoints between images
        FlannBasedMatcher matcher;
        vector< DMatch > matches;
        matcher.match(descriptors1, descriptors2, matches);

        double max_dist = 0; double min_dist = 100;

        // Quick calculation of max and min distances between keypoints
        for( int i = 0; i < descriptors1.rows; i++ )
        { 
            double dist = matches[i].distance;
            if( dist < min_dist ) min_dist = dist;
            if( dist > max_dist ) max_dist = dist;
        }

        printf("-- Max dist : %f \n", max_dist );
        printf("-- Min dist : %f \n", min_dist );

        // Draw only "good" matches (i.e. whose distance is less than some_value*min_dist )
        std::vector< DMatch > good_matches;

        for( int i = 0; i < descriptors1.rows; i++ )
        { 
            if( matches[i].distance < 2*min_dist )
            { 
                good_matches.push_back( matches[i]); 
            }
        }

        //show keypoints offset (uncomment one of these bellow)
        //drawMatches(reference_image, keypoints_object_1, frame_2, keypoints_object_2, good_matches, outpt, Scalar( 0, 255, 255 ), Scalar( 255, 0, 255 ), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
        drawVectors(reference_image, keypoints_object_1, frame_2, keypoints_object_2, good_matches, outpt, Scalar( 0, 255, 255 ), Scalar( 255, 0, 255 ), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        imshow("frame", outpt);
        if(waitKey(30) >= 0) 
        {
            imwrite( "./test_img1.jpg", frame_2);
            break;
        }
    }

}
Exemple #3
0
/**
 * @function main
 * @brief Main function
 */
int main( int argc, char** argv )
{
  if( argc != 3 )
  { readme(); return -1; }

  Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );

  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );


  //-- Localize the object from img_1 in img_2
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( size_t i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);


  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  Point2f offset( (float)img_object.cols, 0);
  line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );

  waitKey(0);

  return 0;
}
int main()
{
    Mat object = imread( "photo.jpg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !object.data )
    {
        std::cout<< "Error reading object " << std::endl;
        return -1;
    }

    //Detect the keypoints using SURF Detector
    int minHessian = 500;
    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kp_object;
    detector.detect( object, kp_object );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( object, kp_object, des_object );

    FlannBasedMatcher matcher;

    VideoCapture cap(0);

    namedWindow("Good Matches");

    std::vector<Point2f> obj_corners(4);

    //Get the corners from the object
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );

    char key = 'a';
    int framecount = 0;
    while (key != 27)
    {
        Mat frame;
        cap >> frame;

        if (framecount < 5)
        {
            framecount++;
            continue;
        }

        Mat des_image, img_matches;
        std::vector<KeyPoint> kp_image;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
        }

        //Show detected matches
        imshow( "Good Matches", img_matches );

        key = waitKey(1);
    }
    return 0;
}
/** @function main */
int main( int argc, char** argv )
{
 if( argc != 3 )
 { readme(); return -1; }
 
// Load the images
 Mat image1= imread( argv[2] );
 Mat image2= imread( argv[1] );
 Mat gray_image1;
 Mat gray_image2;
 // Convert to Grayscale
 cvtColor( image1, gray_image1, CV_RGB2GRAY );
 cvtColor( image2, gray_image2, CV_RGB2GRAY );
 
imshow("first image",image2);
 imshow("second image",image1);
 
if( !gray_image1.data || !gray_image2.data )
 { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
 
//-- Step 1: Detect the keypoints using SURF Detector
 int minHessian = 400;
 
SurfFeatureDetector detector( minHessian );
 
std::vector< KeyPoint > keypoints_object, keypoints_scene;
 
detector.detect( gray_image1, keypoints_object );
 detector.detect( gray_image2, keypoints_scene );
 
//-- Step 2: Calculate descriptors (feature vectors)
 SurfDescriptorExtractor extractor;
 
Mat descriptors_object, descriptors_scene;
 
extractor.compute( gray_image1, keypoints_object, descriptors_object );
 extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
 
//-- Step 3: Matching descriptor vectors using FLANN matcher
 FlannBasedMatcher matcher;
 std::vector< DMatch > matches;
 matcher.match( descriptors_object, descriptors_scene, matches );
 
double max_dist = 0; double min_dist = 100;
 
//-- Quick calculation of max and min distances between keypoints
 for( int i = 0; i < descriptors_object.rows; i++ )
 { double dist = matches[i].distance;
 if( dist < min_dist ) min_dist = dist;
 if( dist > max_dist ) max_dist = dist;
 }
 
printf("-- Max dist : %f \n", max_dist );
 printf("-- Min dist : %f \n", min_dist );
 
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
 std::vector< DMatch > good_matches;
 
for( int i = 0; i < descriptors_object.rows; i++ )
 { if( matches[i].distance < 3*min_dist )
 { good_matches.push_back( matches[i]); }
 }
 std::vector< Point2f > obj;
 std::vector< Point2f > scene;
 
for( int i = 0; i < good_matches.size(); i++ )
 {
 //-- Get the keypoints from the good matches
 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
 }
 
// Find the Homography Matrix
 Mat H = findHomography( obj, scene, CV_RANSAC );
 // Use the Homography Matrix to warp the images
 cv::Mat result;
 warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
 cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
 image2.copyTo(half);
 imshow( "Result", result );
 
 waitKey(0);
 return 0;
 }
int main(int argc, char *argv[])
{
	// Timer start.
	clock_t tmStart = clock();

	// Argument variable.
	// ==================
	bool bUseColorFeature = false;
	int iColorFeature = 0;

	bool bUseShapeFeature = false;
	int iShapeFeature = 0;
	// ==================

	// Checking correctness of all arguments.
	// ======================================
	if(argc == 1)
	{
		showHowToUse(argv[0]);
	}

	if(argc > 3)
	{
		cout << "You gave exceeded number of arguments.\n";
		exit(1);
	}

	for(int i=1;i<argc;i++)
	{
		string strTemp = argv[i];

		if(strTemp.at(0) != '/')
			showHowToUse(argv[0]);

		if(strTemp.at(1) == 'c' && strTemp.at(2) == '=')
		{
			bUseColorFeature = true;
			iColorFeature = atoi(strTemp.substr(3).c_str());
		}

		if(strTemp.at(1) == 's' && strTemp.at(2) == '=')
		{
			bUseShapeFeature = true;
			iShapeFeature = atoi(strTemp.substr(3).c_str());
		}
	}
	// ======================================
	
	string strDirFlowerDB = "flowerPicDB\\"; // The folder of flower images.
	string strDirDescriptionDB = "descriptionDB\\"; // The folder of features output.
	string strFNameFlowerDB = strDirFlowerDB + "files.txt"; // List of flowers' name.
	string strFNameFlower; // Indexer.
	string strFNameDesc; // Indexer.
	string strFNameDescTemp;
	ifstream inFile;
	int count = 0; // Number of image files.
	string::size_type idx; // A position of '.'.

	SurfFeatureDetector surf(2500.);
	SurfDescriptorExtractor surfDesc;
	HistogramHSV hsvObj;

	// Find number of photos.
	// ======================
	inFile.open(strFNameFlowerDB.c_str());
	if(!inFile.is_open()) cout << "Can't open file " << strFNameFlowerDB << endl;
	while(inFile >> strFNameFlower)
	{
		count++;
	}
	inFile.close();
	inFile.clear(); // This must be call clear() before it will be made second call
					// otherwise the command will finished immediately.

	cout << "Number of flower photo = " << count << endl << endl;

	// Extract features of all photos in the DB.
	// =========================================
	// Array of image in DB.
	Mat *imgFlowerDB = new Mat[count];
	// Array of shape feature.
	vecKey *keypointDB;
	Mat *descriptorDB;
	if(bUseShapeFeature)
	{
		keypointDB = new vecKey[count];
		descriptorDB = new Mat[count];
	}
	// Array of colour feature.
	MatND *hueHistogram;
	MatND *saturationHistogram;
	MatND *valueHistogram;
	if(bUseColorFeature)
	{
		hueHistogram = new MatND[count];
		saturationHistogram = new MatND[count];
		valueHistogram = new MatND[count];
	}
	// File pointer of output file.
	FileStorage outDescFileSurf,outDescFileH,outDescFileS,outDescFileV;
	count = 0; // Reset count to 0.

	inFile.open(strFNameFlowerDB.c_str());
	if(!inFile.is_open()) cout << "Can't open file " << strFNameFlowerDB << endl;
	while(inFile >> strFNameFlower)
	{
		cout << "Read  " << strFNameFlower << endl;
		// Read all flower photos to array.
		// ================================
		imgFlowerDB[count] = imread(strDirFlowerDB + strFNameFlower);
		// Extract shape feature.
		// ======================
		if(bUseShapeFeature)
		{
			surf.detect(imgFlowerDB[count],keypointDB[count]);
			surfDesc.compute(imgFlowerDB[count],keypointDB[count],descriptorDB[count]);
		}
		// Extract colour feature.
		// =======================
		if(bUseColorFeature)
		{
			cv::Mat imgTempROI = getCenterOfImage(imgFlowerDB[count],iColorFeature);
			hueHistogram[count] = hsvObj.getHueHistogram(imgTempROI);
			saturationHistogram[count] = hsvObj.getSaturationHistogram(imgTempROI);
			valueHistogram[count] = hsvObj.getValueHistogram(imgTempROI);
		}

		// Write all description.
		// ======================
		strFNameDesc = strFNameFlower;
		/* position = strFNameDesc.find("jpg",0);
		strFNameDesc.replace(position,3,"yml"); */
		idx = strFNameDesc.find('.');
		strFNameDesc = strFNameDesc.substr(0,idx);

		// Write shape feature.
		// --------------------
		if(bUseShapeFeature)
		{
			strFNameDescTemp = strFNameDesc + "Surf" + ".yml";
			cout << "Write " << strFNameDescTemp << endl;
			outDescFileSurf.open(strDirDescriptionDB + strFNameDescTemp,FileStorage::WRITE);
			if(!outDescFileSurf.isOpened()) cout << "Can't open file " << strFNameDescTemp
				<< endl;
			outDescFileSurf << "descriptionSurfOfPic" << descriptorDB[count];
			cout << "Number of vector = " << descriptorDB[count].rows << endl;
			outDescFileSurf.release();
		}

		// Write colour feature.
		// ---------------------
		if(bUseColorFeature)
		{
			strFNameDescTemp = strFNameDesc + "H" + ".yml";
			cout << "Write " << strFNameDescTemp << endl;
			outDescFileH.open(strDirDescriptionDB + strFNameDescTemp,FileStorage::WRITE);
			if(!outDescFileH.isOpened()) cout << "Can't open file " << strFNameDescTemp
				<< endl;
			outDescFileH << "descriptionHOfPic" << hueHistogram[count];
			outDescFileH.release();

			strFNameDescTemp = strFNameDesc + "S" + ".yml";
			cout << "Write " << strFNameDescTemp << endl;
			outDescFileS.open(strDirDescriptionDB + strFNameDescTemp,FileStorage::WRITE);
			if(!outDescFileS.isOpened()) cout << "Can't open file " << strFNameDescTemp
				<< endl;
			outDescFileS << "descriptionSOfPic" << saturationHistogram[count];
			outDescFileS.release();

			strFNameDescTemp = strFNameDesc + "V" + ".yml";
			cout << "Write " << strFNameDescTemp << endl;
			outDescFileV.open(strDirDescriptionDB + strFNameDescTemp,FileStorage::WRITE);
			if(!outDescFileV.isOpened()) cout << "Can't open file " << strFNameDescTemp
				<< endl;
			outDescFileV << "descriptionVOfPic" << valueHistogram[count];
			outDescFileV.release();
		}

		count++;
	}
	inFile.close();

	// Timer stop.
	clock_t tmStop = clock();
	cout << endl << "Time for extractDesc = " << float(tmStop - tmStart)/CLOCKS_PER_SEC
		<< " sec" << endl;
	/*cout << "Press enter to exit.";
	getchar();*/

	return 0;
}
Exemple #7
0
int main(int argc, char *argv[])
{

	if( argc != 3 )
	{
		_tprintf(TEXT("Usage: %s [target_file]\n"), argv[0]);
		return 0;
	}

	Mat img1 = imread(argv[1], 1);
	Mat img2 = imread(argv[2], 1);

	if(img1.empty() || img2.empty())
	{
		printf("Can't read one of the images\n");
		return -1;
	}

	//MATCHING PHASE// //MATCHING PHASE// //MATCHING PHASE//

	// detecting keypoints
	SurfFeatureDetector detector(400);
	vector<KeyPoint> keypoints1, keypoints2;
	detector.detect(img1, keypoints1);
	detector.detect(img2, keypoints2);

	// computing descriptors
	SurfDescriptorExtractor extractor;
	Mat descriptors1, descriptors2;
	extractor.compute(img1, keypoints1, descriptors1);
	extractor.compute(img2, keypoints2, descriptors2);

	// matching descriptors
	FlannBasedMatcher matcher;
	vector<DMatch> matches;
	matcher.match(descriptors1, descriptors2, matches);

	float min = 10000;

	for(int i=0; i<matches.size(); i++)
	{
		if(matches[i].distance < min)
		{
			min = matches[i].distance;
		}
	}
	vector<DMatch> good_matches;
	for(int i=0; i<matches.size(); i++)
	{
		if(matches[i].distance <= 2*min)
		{
			good_matches.push_back(matches[i]);
		}
	}
	
	// drawing the results
	namedWindow("matches", 0);
	resizeWindow("matches",1280,360);
	Mat img_matches;
	drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches);
	imshow("matches", img_matches);
	imwrite("matches.jpg", img_matches);
	
	waitKey(0);

	//END OF MATCHING PHASE// //END OF MATCHING PHASE// //END OF MATCHING PHASE//
	
	//HOMOGRAPHY CALCULATION// //HOMOGRAPHY CALCULATION// //HOMOGRAPHY CALCULATION//
	vector<Point2f> pts_img1,pts_img2;

	for(int i=0; i < matches.size(); i++)
	{
		pts_img1.push_back(keypoints1[matches[i].queryIdx].pt);
		pts_img2.push_back(keypoints2[matches[i].trainIdx].pt);
	}

	Mat homography = findHomography(pts_img1,pts_img2,CV_RANSAC,3);

	cout << "H  = " << endl << " " << homography  << endl << endl;

	waitKey(0);
	printf("homography\n\t.rows : %d\n\t.cols : %d",homography.rows,homography.cols);

	Mat img1_transformed;
	warpPerspective(img1,img1_transformed,homography,img1.size(),INTER_LINEAR,0,0);

	double alpha = 0.5;
	double beta = 1.0 - alpha;
	
	Mat shifted;
	Mat compared;
	addWeighted(img1,alpha,img1_transformed,beta,0.0,shifted);
	addWeighted(img2,alpha,img1_transformed,beta,0.0,compared);


	namedWindow("transformed",0);
	resizeWindow("transformed",960,540);
	imshow("transformed",img1_transformed);
	namedWindow("shifted",0);
	resizeWindow("shifted",960,540);
	imshow("shifted",shifted);
	namedWindow("compared",0);
	resizeWindow("compared",960,540);
	imshow("compared",compared);

	imwrite("blend_h.jpg",compared);
	waitKey(0);

	//AFFINE TRANSFORM CALCULATION// //AFFINE TRANSFORM CALCULATION// //AFFINE TRANSFORM CALCULATION//

	//find 3 best matches and store matches-indices in an array
	DMatch tri_matches[3];

#if AFFINE_CHOICE

	float tri_match_dist[3];

	tri_match_dist[0] = 1000000;
	tri_match_dist[1] = 1000000;
	tri_match_dist[2] = 1000000;
	
	for(int i=0;i<matches.size();i++)
	{
		if(matches[i].distance < tri_match_dist[2])
		{
			if(matches[i].distance < tri_match_dist[1])
			{
				tri_match_dist[2] = tri_match_dist[1];
				tri_matches[2] = tri_matches[1];

				if(matches[i].distance < tri_match_dist[0])
				{
					tri_match_dist[1] = tri_match_dist[0];
					tri_matches[1] = tri_matches[0];
					tri_match_dist[0] = matches[i].distance;
					tri_matches[0] = matches[i];
				} else
				{
					tri_match_dist[1] = matches[i].distance;
					tri_matches[1] = matches[i];
				}
			} else
			{
				tri_match_dist[2] = matches[i].distance;
				tri_matches[2] = matches[i];
			}
		}
	}
#elif !AFFINE_CHOICE
	//determine min error out of all matches
	min = matches[0].distance;
	float max = matches[0].distance;
	for(int i=1; i<matches.size(); i++)
	{
		if(matches[i].distance < min)
		{
			min = matches[i].distance;
		}
		if(matches[i].distance > max)
		{
			max = matches[i].distance;
		}
	}

	printf("\nmin = %f\n",min);

	int i=0;
	srand(time(NULL));
	while(true)
	{
		//randomly choose a match by index of match vector
		int idx = (int)(rand()%(matches.size()-1));
		
		if(matches[idx].distance < 2*min) //if match error is w/in acceptable limits, then store it in array of matches to calc transform from
		{
			if(i == 0)
			{
				tri_matches[0] = matches[idx];
				i++;
			} else if(i == 1)
			{
				tri_matches[1] = matches[idx];
				i++;
			} else if(i == 2)
			{
				tri_matches[2] = matches[idx];
				i++;
			} else
			{
				break;
			}
		}
	}
#endif

	waitKey(0);

	//END OF TRANSFORM CALCULATION// //END OF TRANSFORM CALCULATION// //END OF TRANSFORM CALCULATION//
	
	//WARP-PHASE// //WARP-PHASE// //WARP-PHASE// //WARP-PHASE//

	//store the best 3 matches from array to a vector to pass to drawMatches
	vector<DMatch> tri_vector;
	for(int i=0;i<3;i++)
	{
		tri_vector.push_back(tri_matches[i]);
	}

	//draw chosen matches between the two images
	Mat tri_force;
	drawMatches(img1, keypoints1, img2, keypoints2, tri_vector, tri_force);
	namedWindow("chosen matches",0);
	resizeWindow("chosen matches",1280,360);
	imshow("chosen matches", tri_force);
	waitKey(0);

	Point2f tri1[3],tri2[3];

	//store x,y coordinates of triangle formed by best match points in 2 different arrays
	tri1[0] = keypoints1[tri_matches[0].queryIdx].pt;
	tri1[1] = keypoints1[tri_matches[1].queryIdx].pt;
	tri1[2] = keypoints1[tri_matches[2].queryIdx].pt;

	tri2[0] = keypoints2[tri_matches[0].trainIdx].pt;
	tri2[1] = keypoints2[tri_matches[1].trainIdx].pt;
	tri2[2] = keypoints2[tri_matches[2].trainIdx].pt;

	//calculate affine transformation of triangle1 to triangle2
	
	//GETTING THE AFFINE TRANSFORM BETWEEN THE IMAGES
	
	Mat affine_transform = getAffineTransform(tri1,tri2);

	cout << "\naffine_transform  = " << endl << " " << affine_transform << endl << endl;

	//apply the transform to img1
	warpAffine(img1,img1_transformed,affine_transform,img1.size());

	//display the warped image
	namedWindow("affine_transform",0);
	resizeWindow("affine_transform",640,360);
	imshow("affine_transform",img1_transformed);

	//blend the warped image with original and also with target image
	Mat affine_shifted;
	addWeighted(img1,alpha,img1_transformed,beta,0.0,affine_shifted);
	Mat affine_compared;
	addWeighted(img2,alpha,img1_transformed,beta,0.0,affine_compared);

	//display the warped image blended with the original
	namedWindow("affine_shifted",0);
	resizeWindow("affine_shifted",960,540);
	imshow("affine_shifted",affine_shifted);

	//dispaly the warped image blended with target image
	namedWindow("affine_compared",0);
	resizeWindow("affine_compared",960,540);
	imshow("affine_compared",affine_compared);

	imwrite("blend_a.jpg",affine_compared);

	waitKey(0);

	//WARP-PHASE COMPLETED// //WARP-PHASE COMPLETED// //WARP-PHASE COMPLETED//
	return 0;
	//TERMINATING PROGRAM//
}
int main(int argc, char** argv)
{
	if (argc != 3){
		readme(); return -1;
	}

	Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);

	// -- getting sample(Because of canny)
	sample_obj = cvLoadImage(argv[1], 1);
	sample_scene = cvLoadImage(argv[2], 1);

	if (!img_object.data || !img_scene.data){
		std::cout << " --(!) Error reading images " << std::endl; return -1;
	}

	//-- Step 1: Detect the keypoints using SURF Detector****************************************************
	int minHessian = 20; //Hessian critical value basic 200

	SurfFeatureDetector detector(minHessian);

	std::vector<KeyPoint> keypoints_object, keypoints_scene;	// keypoints(cv::KeyPoint)

	detector.detect(img_object, keypoints_object);	// 1
	detector.detect(img_scene, keypoints_scene);	// 2

	//-- Step 2: Calculate descriptors (feature vectors)
	SurfDescriptorExtractor extractor;

	Mat descriptors_object, descriptors_scene;

	extractor.compute(img_object, keypoints_object, descriptors_object);
	extractor.compute(img_scene, keypoints_scene, descriptors_scene);

	//-- Step 3: Matching descriptor vectors using FLANN matcher	// matching keypoints
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	matcher.match(descriptors_object, descriptors_scene, matches);

	double max_dist = 0; double min_dist = 100;

	//-- Quick calculation of max and min distances between keypoints
	for (int i = 0; i < descriptors_object.rows; i++){
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}

	printf("-- Max dist : %f \n", max_dist);
	printf("-- Min dist : %f \n", min_dist);

	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )		// good matches (the real keypoints)*************************
	std::vector< DMatch > good_matches;

	for (int i = 0; i < descriptors_object.rows; i++){
		if (matches[i].distance < 3 * min_dist){
			good_matches.push_back(matches[i]);
		}
	}
	
	Mat img_matches;
	drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
		good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
		vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

	//-- Localize the object
	std::vector<Point2f> obj;			// the keypoint's coordinate <x, y value>
	std::vector<Point2f> scene;

	for (int i = 0; i < good_matches.size(); i++){
		//-- Get the keypoints from the good matches
		obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
		scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
	}

	Mat H = findHomography(obj, scene, CV_RANSAC);

	//-- Get the corners from the image_1 ( the object to be "detected" )
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(img_object.cols, 0);
	obj_corners[2] = cvPoint(img_object.cols, img_object.rows);
	obj_corners[3] = cvPoint(0, img_object.rows);
	
	perspectiveTransform(obj_corners, scene_corners, H);

	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);

	// -- param process (Point2f to Point3f)
	DataProcess2TO3(obj, scene);

	//-- Show detected matches
	imshow("Sample(Good Matches & Object detection)", img_matches);
	
	// -- param analysis
	std::ofstream fs("data.txt");

	std::cout << " 0 : " << scene_corners[0] << " 1 : " << scene_corners[1] << " 2 : " << scene_corners[2] << " 3 : " << scene_corners[3] << std::endl;
	std::cout << " 0 : " << obj_corners[0] << " 1 : " << obj_corners[1] << " 2 : " << obj_corners[2] << " 3 : " << obj_corners[3] << std::endl;

	for (int i = 0; i < good_matches.size(); i++){
		obj_3[i].x = Rounding(obj_3[i].x, 1);
		obj_3[i].y = Rounding(obj_3[i].y, 1);
		scene_3[i].x = Rounding(scene_3[i].x, 1);
		scene_3[i].y = Rounding(scene_3[i].y, 1);

		fs << "obj [" << i << "] : " << obj_3[i];
		fs << " scene [" << i << "] : " << scene_3[i] << std::endl;
	}

	std::ofstream axi3("3dcoor.txt");
	// -- Finding Edge
	//Edge_Map();

	Things_3D();

	for (int i = 0; i < obj_3.size(); i++){
		obj_3[i].z /= 10;
		obj_3[i].z = Rounding(obj_3[i].z, 1);
		axi3 << "obj" << i << obj_3[i] << std::endl;
	}


	// --OpenGL GLUT_DEPTH | GLUT_DOUBLE | 
	glutInitDisplayMode(GLUT_DEPTH | GLUT_RGBA);
	glutInitWindowPosition(100, 100);
	glutInitWindowSize(1300, 1030);
	glutCreateWindow("glsample");
	glutSpecialFunc(SpecialKey);
	glutDisplayFunc(renderscene);
	glutMainLoop();
		
	// -- closing
	cvReleaseData(&argv);
	cvDestroyWindow("Sample(Good Matches & Object detection)");
	cvDestroyWindow("canny");

	return 0;
}
int main(int argc, char** argv)
{
    const char* algorithm_opt = "--algorithm=";
    const char* maxdisp_opt = "--max-disparity=";
    const char* blocksize_opt = "--blocksize=";
    const char* nodisplay_opt = "--no-display=";
    const char* scale_opt = "--scale=";

    if(argc < 3)
    {
        print_help();
        return 0;
    }
    const char* img1_filename = 0;
    const char* img2_filename = 0;
    const char* intrinsic_filename = 0;
    const char* extrinsic_filename = 0;
    const char* disparity_filename = 0;
    const char* point_cloud_filename = 0;

    enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };
    int alg = STEREO_SGBM;
    int SADWindowSize = 0, numberOfDisparities = 0;
    bool no_display = false;
    float scale = 1.f;

    StereoBM bm;
    StereoSGBM sgbm;
    StereoVar var;

    for( int i = 1; i < argc; i++ )
    {
        if( argv[i][0] != '-' )
        {
            if( !img1_filename )
                img1_filename = argv[i];
            else
                img2_filename = argv[i];
        }
        else if( strncmp(argv[i], algorithm_opt, strlen(algorithm_opt)) == 0 )
        {
            char* _alg = argv[i] + strlen(algorithm_opt);
            alg = strcmp(_alg, "bm") == 0 ? STEREO_BM :
                  strcmp(_alg, "sgbm") == 0 ? STEREO_SGBM :
                  strcmp(_alg, "hh") == 0 ? STEREO_HH :
                  strcmp(_alg, "var") == 0 ? STEREO_VAR : -1;
            if( alg < 0 )
            {
                printf("Command-line parameter error: Unknown stereo algorithm\n\n");
                print_help();
                return -1;
            }
        }
        else if( strncmp(argv[i], maxdisp_opt, strlen(maxdisp_opt)) == 0 )
        {
            if( sscanf( argv[i] + strlen(maxdisp_opt), "%d", &numberOfDisparities ) != 1 ||
                numberOfDisparities < 1 || numberOfDisparities % 16 != 0 )
            {
                printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n");
                print_help();
                return -1;
            }
        }
        else if( strncmp(argv[i], blocksize_opt, strlen(blocksize_opt)) == 0 )
        {
            if( sscanf( argv[i] + strlen(blocksize_opt), "%d", &SADWindowSize ) != 1 ||
                SADWindowSize < 1 || SADWindowSize % 2 != 1 )
            {
                printf("Command-line parameter error: The block size (--blocksize=<...>) must be a positive odd number\n");
                return -1;
            }
        }
        else if( strncmp(argv[i], scale_opt, strlen(scale_opt)) == 0 )
        {
            if( sscanf( argv[i] + strlen(scale_opt), "%f", &scale ) != 1 || scale < 0 )
            {
                printf("Command-line parameter error: The scale factor (--scale=<...>) must be a positive floating-point number\n");
                return -1;
            }
        }
        else if( strcmp(argv[i], nodisplay_opt) == 0 )
            no_display = true;
        else if( strcmp(argv[i], "-i" ) == 0 )
            intrinsic_filename = argv[++i];
        else if( strcmp(argv[i], "-e" ) == 0 )
            extrinsic_filename = argv[++i];
        else if( strcmp(argv[i], "-o" ) == 0 )
            disparity_filename = argv[++i];
        else if( strcmp(argv[i], "-p" ) == 0 )
            point_cloud_filename = argv[++i];
        else
        {
            printf("Command-line parameter error: unknown option %s\n", argv[i]);
            return -1;
        }
    }

    if( !img1_filename || !img2_filename )
    {
        printf("Command-line parameter error: both left and right images must be specified\n");
        return -1;
    }

    if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) )
    {
        printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
        return -1;
    }

    if( extrinsic_filename == 0 && point_cloud_filename )
    {
        printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
        return -1;
    }

    int color_mode = alg == STEREO_BM ? 0 : -1;
    Mat img1 = imread(img1_filename, color_mode);
    Mat img2 = imread(img2_filename, color_mode);

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	Mat leftImage=img1.clone();
	cvtColor(leftImage,leftImage,6);
	//imshow("good old greyey", img_object);
	//waitKey(0);
	Mat rightImage=img2.clone();
	cvtColor(rightImage,rightImage,6);
	//Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
	//Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
	
    if( scale != 1.f )
    {
        int method = scale < 1 ? INTER_AREA : INTER_CUBIC;
        Mat temp1, temp2;
        resize(img1, temp1, Size(), scale, scale, method);
        img1 = temp1;
        resize(img2, temp2, Size(), scale, scale, method);
        img2 = temp2;
    }

    Size img_size = img1.size();

    Rect roi1, roi2;
    Mat Q;

    if( intrinsic_filename )
    {
        // reading intrinsic parameters
        FileStorage fs(intrinsic_filename, CV_STORAGE_READ);
        if(!fs.isOpened())
        {
            printf("Failed to open file %s\n", intrinsic_filename);
            return -1;
        }

        Mat M1, D1, M2, D2;
        fs["M1"] >> M1;
        fs["D1"] >> D1;
        fs["M2"] >> M2;
        fs["D2"] >> D2;

        M1 *= scale;
        M2 *= scale;

        fs.open(extrinsic_filename, CV_STORAGE_READ);
        if(!fs.isOpened())
        {
            printf("Failed to open file %s\n", extrinsic_filename);
            return -1;
        }

        Mat R, T, R1, P1, R2, P2;
        fs["R"] >> R;
        fs["T"] >> T;
		
		printf("in here baby and living the dream");
		
        stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );

        Mat map11, map12, map21, map22;
        initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
        initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);

        Mat img1r, img2r;
        remap(img1, img1r, map11, map12, INTER_LINEAR);
        remap(img2, img2r, map21, map22, INTER_LINEAR);

        img1 = img1r;
        img2 = img2r;
    }

    numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;

    bm.state->roi1 = roi1;
    bm.state->roi2 = roi2;
    bm.state->preFilterCap = 9;
    bm.state->SADWindowSize = 9;
    bm.state->minDisparity = 0;
    bm.state->numberOfDisparities = 32;
    bm.state->textureThreshold = 0;
    bm.state->uniquenessRatio = 0;
    bm.state->speckleWindowSize = 0;
    bm.state->speckleRange = 0;
    bm.state->disp12MaxDiff = 0;

    sgbm.preFilterCap = 63;
    sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;

    int cn = img1.channels();

    sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
    sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
    sgbm.minDisparity = 0;
    sgbm.numberOfDisparities = numberOfDisparities;
    sgbm.uniquenessRatio = 10;
    sgbm.speckleWindowSize = bm.state->speckleWindowSize;
    sgbm.speckleRange = bm.state->speckleRange;
    sgbm.disp12MaxDiff = 1;
    sgbm.fullDP = alg == STEREO_HH;

   /* var.levels = 3; // ignored with USE_AUTO_PARAMS
    var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
    var.nIt = 100;
    var.minDisp = -numberOfDisparities;
    var.maxDisp = 50;
    var.poly_n = 7;
    var.poly_sigma = 50.5;
    var.fi = 100.0f;
    var.lambda = 0.01f;
    var.penalization = var.PENALIZATION_PERONA_MALIK; // ignored with USE_AUTO_PARAMS
    var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
    var.flags = var.USE_AUTO_PARAMS | var.USE_EQUALIZE_HIST | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;*/
    
    var.levels = 3; // ignored with USE_AUTO_PARAMS
    var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
    var.nIt = 75;
    var.minDisp = -numberOfDisparities;
    var.maxDisp = 15;
    var.poly_n = 7;
    var.poly_sigma = 50.0;
    var.fi = 20.0f;
    var.lambda = 0.04f;
    var.penalization = var.PENALIZATION_TICHONOV; // ignored with USE_AUTO_PARAMS
    var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
    var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_EQUALIZE_HIST  | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;

    Mat disp, disp8;
    //Mat img1p, img2p, dispp;
    //copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
    //copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);

    int64 t = getTickCount();
    if( alg == STEREO_BM )
        bm(leftImage, rightImage, disp);
    else if( alg == STEREO_VAR ) {
        var(img1, img2, disp);
    }
    else if( alg == STEREO_SGBM || alg == STEREO_HH )
        sgbm(img1, img2, disp);
    t = getTickCount() - t;
    printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
    imshow("disp1",disp);
    waitKey(0);

    //disp = dispp.colRange(numberOfDisparities, img1p.cols);
    if( alg != STEREO_VAR )
        disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
    else
        disp.convertTo(disp8, CV_8U);
    if( !no_display )
    {
        namedWindow("left", 1);
        imshow("left", img1);
        namedWindow("right", 1);
        imshow("right", img2);
        namedWindow("disparity", 0);
        imshow("disparity", disp8);
        imwrite("disparity.jpg",disp8);
        printf("press any key to continue...");
        fflush(stdout);
        waitKey();
        printf("\n");
    }

    if(disparity_filename)
        imwrite(disparity_filename, disp8);

    if(point_cloud_filename)
    {
        printf("storing the point cloud...");
        fflush(stdout);
        Mat xyz;
        reprojectImageTo3D(disp, xyz, Q, true);
        saveXYZ(point_cloud_filename, xyz);
        printf("\n");
    }
    ///////////////////////////////////////////////////////FANCY STUFF////////////////////////////////////
   //-- Step 1: Detect the keypoints using SURF Detector
  printf("starting fancy fancy stuff");
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( leftImage, keypoints_1 );
  detector.detect( rightImage, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( leftImage, keypoints_1, descriptors_1 );
  extractor.compute( rightImage, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_1.rows; i++ )
  { if( matches[i].distance <= 2*min_dist )
    { good_matches.push_back( matches[i]); }
  }

  //-- Draw only "good" matches
  Mat img_matches;
  drawMatches( leftImage, keypoints_1, rightImage, keypoints_2,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  //-- Show detected matches
  printf("about to show matches");
  imshow( "Good Matches", img_matches );

  for( int i = 0; i < good_matches.size(); i++ )
  { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }

  waitKey(0);
  Mat fundaMat;
  
  //convert into point2f
  std::vector<int> pointIndexesLeft;
    std::vector<int> pointIndexesRight;
    for (std::vector<cv::DMatch>::const_iterator it= good_matches.begin(); it!= good_matches.end(); ++it) {

         // Get the indexes of the selected matched keypoints
         pointIndexesLeft.push_back(it->queryIdx);
         pointIndexesRight.push_back(it->trainIdx);
    }

    // Convert keypoints into Point2f
    std::vector<cv::Point2f> selPointsLeft, selPointsRight;
    cv::KeyPoint::convert(keypoints_1,selPointsLeft,pointIndexesLeft);
    cv::KeyPoint::convert(keypoints_2,selPointsRight,pointIndexesRight);
  
  cv::Mat fundemental= cv::findFundamentalMat(
            cv::Mat(selPointsLeft), // points in first image
            cv::Mat(selPointsRight), // points in second image
            CV_FM_RANSAC);       // 8-point method

  printf("funda mat funda finished\n");
  waitKey(0);
  
  Mat H1(4,4, rightImage.type());
  Mat H2(4,4, leftImage.type());
  stereoRectifyUncalibrated(selPointsRight, selPointsLeft, fundemental, leftImage.size(), H1, H2);

  Mat rectified1(rightImage.size(), rightImage.type());
  cv::warpPerspective(rightImage, rectified1, H1, rightImage.size());
  cv::imwrite("rectified1.jpg", rectified1);
  
  cv::Mat rectified2(leftImage.size(), leftImage.type());
  cv::warpPerspective(leftImage, rectified2, H2, leftImage.size());
  cv::imwrite("rectified2.jpg", rectified2);

printf("rectify this \n");
  waitKey(0);

  
  
  return 0;
  }
bool findObjectSURF( cv::Mat objectMat, cv::Mat sceneMat, int hessianValue )
{
    bool objectFound = false;
    float nndrRatio = 0.7f;
    //vector of keypoints
    vector< cv::KeyPoint > keypointsO;
    vector< cv::KeyPoint > keypointsS;

    Mat descriptors_object, descriptors_scene;

    //-- Step 1: Extract keypoints
    SurfFeatureDetector surf(hessianValue);
    surf.detect(sceneMat,keypointsS);
    if(keypointsS.size() < 7) return false; //Not enough keypoints, object not found
    surf.detect(objectMat,keypointsO);
    if(keypointsO.size() < 7) return false; //Not enough keypoints, object not found

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    extractor.compute( sceneMat, keypointsS, descriptors_scene );
    extractor.compute( objectMat, keypointso, descriptors_object );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    cv::FlannBasedMatcher matcher;
    descriptors_scene.size(), keypointsO.size(), keypointsS.size());
    std::vector<std::vector<cv::DMatch> > matches;
    matcher.knnMatch( descriptors_object, descriptors_scene, matches, 2 );
    vector< cv::DMatch > good_matches;
    good_matches.reserve(matches.size());

    for (size_t i = 0; i < matches.size(); ++i)
    {
        if (matches[i].size() < 2)
            continue;

        const cv::DMatch &m1 = matches[i][0];
        const cv::DMatch &m2 = matches[i][1];

        if(m1.distance <= nndrRatio * m2.distance)
            good_matches.push_back(m1);
    }



    if( (good_matches.size() >=7))
    {
        std::cout << "OBJECT FOUND!" << std::endl;

        std::vector< cv::Point2f > obj;
        std::vector< cv::Point2f > scene;

        for( unsigned int i = 0; i < good_matches.size(); i++ )
        {
            //-- Get the keypoints from the good matches
            obj.push_back( keypointsO[ good_matches[i].queryIdx ].pt );
            scene.push_back( keypointsS[ good_matches[i].trainIdx ].pt );
        }

        Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector< Point2f > obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( objectMat.cols, 0 );
        obj_corners[2] = cvPoint( objectMat.cols, objectMat.rows ); obj_corners[3] = cvPoint( 0, objectMat.rows );
        std::vector< Point2f > scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);


        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( objectMat, scene_corners[0] , scene_corners[1], color, 2 ); //TOP line
        line( objectMat, scene_corners[1] , scene_corners[2], color, 2 );
        line( objectMat, scene_corners[2] , scene_corners[3], color, 2 );
        line( objectMat, scene_corners[3] , scene_corners[0] , color, 2 );
        objectFound=true;
    } else {
        std::cout << "OBJECT NOT FOUND!" << std::endl;
    }

    std::cout << "Matches found: " << matches.size() << std::endl;
    std::cout << "Good matches found: " << good_matches.size() << std::endl;

    return objectFound;
}
Exemple #11
0
int match(Mat img_1)
{
	  int i=0,j=0;
	
	  Mat img_2 = imread( "/home/ankur/Desktop/data/new.jpg", CV_LOAD_IMAGE_GRAYSCALE );

	  if( !img_1.data || !img_2.data )
	  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

	  //-- Step 1: Detect the keypoints using SURF Detector
	  int minHessian = 400;
	  float good=0,total=0;

	  SurfFeatureDetector detector( minHessian );

	  std::vector<KeyPoint> keypoints_1, keypoints_2;

	  detector.detect( img_1, keypoints_1 );
	  detector.detect( img_2, keypoints_2 );

	  //-- Step 2: Calculate descriptors (feature vectors)
	  SurfDescriptorExtractor extractor;

	  Mat descriptors_1, descriptors_2;

	  extractor.compute( img_1, keypoints_1, descriptors_1 );
	  extractor.compute( img_2, keypoints_2, descriptors_2 );

	  //-- Step 3: Matching descriptor vectors using FLANN matcher
	  FlannBasedMatcher matcher;
	  std::vector< DMatch > matches;
	  matcher.match( descriptors_1, descriptors_2, matches );

	  double max_dist = 0; double min_dist = 100;

	  //-- Quick calculation of max and min distances between keypoints
	  for( int i = 0; i < descriptors_1.rows; i++ )
	  { double dist = matches[i].distance;
	    if( dist < min_dist ) min_dist = dist;
	    if( dist > max_dist ) max_dist = dist;
	  }

	 // printf("-- Max dist : %f \n", max_dist );
	//  printf("-- Min dist : %f \n", min_dist );

	  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
	  //-- PS.- radiusMatch can also be used here.
	  std::vector< DMatch > good_matches;

	  for( int i = 0; i < descriptors_1.rows; i++ )
	  {
		  if( matches[i].distance <= 2*min_dist )
	      {
		     good_matches.push_back( matches[i]);
	      }
	  }

	  //-- Draw only "good" matches
	  Mat img_matches;
	  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
	               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
	               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	  //-- Show detected matches
	  float x,y;
	  //imshow( "Good Matches", img_matches );
	  for( int i = 0; i < (int)good_matches.size(); i++ )
	  {

		  x= keypoints_2[good_matches[i].queryIdx].pt.x;
		  y= keypoints_2[good_matches[i].queryIdx].pt.y;
		  total++;
		  if(y<eye_y1 && y>eye_y2)
			  if(x<eye_x1 && x>eye_x2)
				  good++;
		//  cout<<"("<<x<<","<<y<<")\n";
		  //printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
	  }
	  data[k++]=(good/total)*100;
	  //cout<<(good/total)*100<<"% matching\n";
	  //cout<<"("<<eye_x1<<","<<eye_y1<<")\n"<<"("<<eye_x2<<","<<eye_y2<<")\n";
	  return 0;
}
bool TrackerForProject::filterRANSAC(cv::Mat newFrame_, vector<Point2f> &corners, vector<Point2f> &nextCorners)
{
	int ransacReprojThreshold = 3;

	cv::Mat prev_(prevFrame_(position_));
	cv::Mat new_(newFrame_);

	// detecting keypoints
    SurfFeatureDetector detector;

	detector.detect(prev_, keypoints1);

    vector<KeyPoint> keypoints2;
    detector.detect(new_, keypoints2);

    // computing descriptors
    SurfDescriptorExtractor extractor;
    Mat descriptors1;
    extractor.compute(prev_, keypoints1, descriptors1);
    Mat descriptors2;
    extractor.compute(newFrame_, keypoints2, descriptors2);

    // matching descriptors
    BFMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);
	
	std::cout << matches.size() << std::endl;

	vector<Point2f> points1, points2;

    // fill the arrays with the points
    for (int i = 0; i < matches.size(); i++)
    {
		points1.push_back(keypoints1[matches[i].queryIdx].pt);
    }
    for (int i = 0; i < matches.size(); i++)
    {
        points2.push_back(keypoints2[matches[i].trainIdx].pt);
    }

    Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);

    Mat points1Projected;
    perspectiveTransform(Mat(points1), points1Projected, H);

	vector<KeyPoint> keypoints3;

	for(int i = 0; i < matches.size(); i++)
	{
		Point2f p1 = points1Projected.at<Point2f>(matches[i].queryIdx);
        Point2f p2 = keypoints2.at(matches[i].trainIdx).pt;
		if(((p2.x - p1.x) * (p2.x - p1.x) +
			(p2.y - p1.y) * (p2.y - p1.y) <= ransacReprojThreshold * ransacReprojThreshold)&& ((p2.x > position_.x - 10) 
			&& (p2.x < position_.x + position_.width + 10) && (p2.y > position_.y - 10) &&(p2.y < position_.y + position_.height + 10)) )
		{
			corners.push_back(keypoints1.at(matches[i].queryIdx).pt);
			nextCorners.push_back(keypoints2.at(matches[i].trainIdx).pt);

			keypoints3.push_back(keypoints2.at(matches[i].trainIdx));
		}		
	}

	for(int i = 0; i < corners.size(); i++)
	{
		corners[i].x += position_.x;
		corners[i].y += position_.y;
	}

	keypoints1 = keypoints3;

	for(int i = 0; i < keypoints1.size(); i++)
	{
		keypoints1[i].pt.x -= position_.x;
		keypoints1[i].pt.y -= position_.y;
	}

    if (keypoints1.empty())
    {
        return false;
    }

    return true;
}
/**
 * @function main
 * @brief Main function
 */
int main( int argc, char** argv )
{
  if( argc != 3 )
  { readme(); return -1; }

  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );

  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_1.rows; i++ )
  { if( matches[i].distance <= 2*min_dist )
    { good_matches.push_back( matches[i]); }
  }

  //-- Draw only "good" matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  //-- Show detected matches
  imshow( "Good Matches", img_matches );

  for( int i = 0; i < (int)good_matches.size(); i++ )
  { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }

  waitKey(0);

  return 0;
}
Exemple #14
0
 void *cv_threadfunc (void *ptr)
 {
     cvNamedWindow( FREENECTOPENCV_WINDOW_D, CV_WINDOW_AUTOSIZE );
     cvNamedWindow( FREENECTOPENCV_WINDOW_N, CV_WINDOW_AUTOSIZE );
     depthimg = cvCreateImage(cvSize(FREENECTOPENCV_DEPTH_WIDTH, FREENECTOPENCV_DEPTH_HEIGHT), IPL_DEPTH_8U, FREENECTOPENCV_DEPTH_DEPTH);
     rgbimg = cvCreateImage(cvSize(FREENECTOPENCV_RGB_WIDTH, FREENECTOPENCV_RGB_HEIGHT), IPL_DEPTH_8U, FREENECTOPENCV_RGB_DEPTH);
     tempimg = cvCreateImage(cvSize(FREENECTOPENCV_RGB_WIDTH, FREENECTOPENCV_RGB_HEIGHT), IPL_DEPTH_8U, FREENECTOPENCV_RGB_DEPTH);

    int index=0;
    // use image polling
    while (1) {

            //lock mutex for depth image
            pthread_mutex_lock( &mutex_depth );
            // show image to window
            cvCvtColor(depthimg,tempimg,CV_GRAY2BGR);
            cvCvtColor(tempimg,tempimg,CV_HSV2BGR);
            cvShowImage(FREENECTOPENCV_WINDOW_D,tempimg);
            //unlock mutex for depth image
            pthread_mutex_unlock( &mutex_depth );





            //lock mutex for rgb image
            pthread_mutex_lock( &mutex_rgb );
            // show image to window
            cvCvtColor(rgbimg,tempimg,CV_BGR2RGB);

            //-- Step 0: Initialization
            img=tempimg;
            //-- Step 1: Detect the keypoints using SURF Detector&-- Step 2: Calculate descriptors (feature vectors)
            int minHessian = 1000;

            SurfFeatureDetector detector( minHessian );
            SurfDescriptorExtractor extractor;
            if(first_time)
            {
                img_old=tempimg;
                detector.detect( img_old, keypoints_old );
                extractor.compute( img_old, keypoints_old, descriptors_old );
                first_time=false;
            }
            detector.detect( img, keypoints );
            extractor.compute( img, keypoints, descriptors );
            //printf("--keypoints: %d, %d \n", keypoints_old.size(), keypoints.size());

            //-- Step 3: Matching descriptor vectors using FLANN matcher
            FlannBasedMatcher matcher;
            std::vector< DMatch > matches;
            matcher.match( descriptors_old, descriptors, matches );
            //printf("--Matches: %d\n", matches.size());

            //-- PS.- radiusMatch can also be used here.
            //-- Draw only matches
            Mat img_matches;
            drawMatches( img_old, keypoints_old, img, keypoints,
                        matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

            imshow(FREENECTOPENCV_WINDOW_N, img_matches);

            //Copy Data
            img_old=img.clone();
            keypoints_old=keypoints;
            descriptors_old=descriptors;

            //unlock mutex
            pthread_mutex_unlock( &mutex_rgb );

            // wait for quit key
            if( cvWaitKey( 15 )==27 ) break;

            index++;
    }
    pthread_exit(NULL);
}
int main(int argc, char* argv[])
{
	Mat image_next;
	VideoCapture cap(0);
	if(!cap.isOpened()) return -1;

	//cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);	
	//cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);	

	clicks = 0;
	cap >> image;
	cout << "rows: " << image.rows << " cols: " << image.cols << endl;
	namedWindow( "Click Points", CV_WINDOW_AUTOSIZE);
	imshow("Click Points", image);	

	setMouseCallback("Click Points", onMouse2, 0);

	int points_clicked = 0;
	int check = 1;

	

	//imshow("Click Points", image);
	cout << "Camera Calibration" << endl;
	Size pattern(7,7);

    
	//calibrate camera
	/*
	vector<vector<Point3f> > object_points;
	
	vector<Point3f> obj;
	for (int x = 0; x < pattern.height; x++)
	{
		for (int y = 0; y < pattern.width; y++)
		{
			obj.push_back(Point3f(x,y,0));
		}
	}
	int num_images = 0;
	vector<vector<Point2f> > image_points;
	vector<Point2f> corners;
	double t = (double)getTickCount();
	double t2;
	while (check)
	{
		cap >> image;
		char key = waitKey(30);
		switch(key) {
		case 27:
			cout << "ESC was pressed" << endl;
			if (num_images > 0) check = 0;
			break;
		case 'p':
			if (findChessboardCorners(image, pattern, corners))
			{
				drawChessboardCorners(image, pattern, corners, true);
				image_points.push_back(corners);
				object_points.push_back(obj);
				num_images++;
			}
			else
				drawChessboardCorners(image, pattern, corners, false);
			//check = 0;
			imshow("Click Points", image);
		    waitKey(1000);	
			
		default:
			imshow("Click Points", image);
		}
	    
	}
    
    Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
	Mat distCoeffs = Mat::zeros(8, 1, CV_64F);
	vector<Mat> rvecs, tvecs;
	cout << cameraMatrix << endl;
	cout << distCoeffs << endl;
	double rpe = calibrateCamera(object_points, image_points, image.size(), cameraMatrix, distCoeffs,
								 rvecs, tvecs);

	cout << "Camera Matrix:" << endl << cameraMatrix << endl;
	cout << "distCoeffs:" << endl << distCoeffs << endl;
	cout << "reprojection error: " << rpe << endl;
    */
	Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
	Mat distCoeffs = Mat::zeros(8, 1, CV_64F);
	cameraMatrix.at<double>(0,0) = 805.681948728115;
	cameraMatrix.at<double>(0,2) = 322.6647811002137;
	cameraMatrix.at<double>(1,1) = 805.3642730837905;
	cameraMatrix.at<double>(1,2) = 231.1740947838633;
	double rpe = 0.273059;

	check = 1;
	cout << "Click 4 points of your plane" << endl;
    //click 4 points
    while (check)
	{
		cap >> image;
		char key = waitKey(30);

		switch(key) {
		case 27:
			cout << "ESC was pressed" << endl;
			check = 0;
            break;
		case 'p':
			if (points_clicked)
				break;	
			clicks = 0;	
			setMouseCallback("Click Points", onMouse, 0);
			while (clicks < 4)
			{
				imshow("Click Points", image);
				waitKey(1);
			}
			setMouseCallback("Click Points", onMouse2, 0);
			points_clicked = 1;
			check = 0;
		default:
			imshow("Click Points", image);
		}
	}


	cout << "ITS HOMOGRAPHY TIME!" << endl;
	//H_w^0 homography
	
	Mat H_wi;
	vector<Point2f> unit_square(4);
	unit_square[0] = Point(0,0);
	unit_square[1] = Point(0,1);
	unit_square[2] = Point(1,1);
	unit_square[3] = Point(1,0);

	//find homography between unit_square and im0_corners
	Mat H = findHomography(unit_square, im0_corners);
	//cout << "Homography: " << H << endl;
	Mat K_inv, KM;
	invert(cameraMatrix, K_inv);
	KM = K_inv*H;

	double h2 = pow(KM.at<double>(0,1),2) + pow(KM.at<double>(1,1),2) +	pow(KM.at<double>(2,1),2);
	double h1 = pow(KM.at<double>(0,0),2) + pow(KM.at<double>(1,0),2) +	pow(KM.at<double>(2,0),2);
	double s = h2 / h1;

	cout << "s = " << s << endl;
	
    Mat scale = Mat::eye(3, 3, CV_64F);
	scale.at<double>(1,1) = 1 / s;

	cout << "detect features points" << endl;
	H_wi = H*scale;

	
	//test H_w0
    Mat mp0 = (Mat_<double>(3,1) << 0, 0, 1);
	Mat mp1 = (Mat_<double>(3,1) << 0, s, 1);
	Mat mp2 = (Mat_<double>(3,1) << 1, s, 1);
	Mat mp3 = (Mat_<double>(3,1) << 1, 0, 1);

	Point p0 = transform_corner(H_wi, mp0);
	Point p1 = transform_corner(H_wi, mp1);
	Point p2 = transform_corner(H_wi, mp2);
	Point p3 = transform_corner(H_wi, mp3);


	cout << "Testing H_w0..." << endl;
	circle(image, p0, 4, Scalar(255,255,255), -1); 
	circle(image, p1, 4, Scalar(255,255,0), -1); 
	circle(image, p2, 4, Scalar(0,0,0), -1); 
	circle(image, p3, 4, Scalar(255,0,255), -1);
	imshow("Click Points", image);
	cout << "H_w0*p:" << endl
		 << "(x,y)" << endl 
		 << "(" << p0.x << "," << p0.y << ")" << endl
		 << "(" << p1.x << "," << p1.y << ")" << endl
		 << "(" << p2.x << "," << p2.y << ")" << endl
		 << "(" << p3.x << "," << p3.y << ")" << endl;
	check = 1;

	
	//GoodFeaturesToTrackDetector detector(500, 0.01, 1, 3, true, 0.04);
	SurfFeatureDetector detector(400);
    vector<KeyPoint> keypoints_0, keypoints_next;
	detector.detect(image, keypoints_0);

	//BriefDescriptorExtractor extractor;
	//FREAK* extractor = new FREAK();
	SurfDescriptorExtractor extractor;

	Mat descriptors_0, descriptors_next;
	extractor.compute(image, keypoints_0, descriptors_0);
	//extractor.compute(image, keypoints_0, descriptors_0);

	//FlannBasedMatcher matcher;
	BFMatcher matcher( NORM_L2, true);
	//BFMatcher matcher(NORM_HAMMING, true);
	char key = 0;
	Mat H_ii1, H_wi1;
	Point p0_1, p1_1, p2_1, p3_1;
	while (check)
	{
		key = waitKey(1);
		switch(key) {
		case 'q':
			check = 0;
			break;
		case 'f':
			cap >> image_next;
			H_ii1 = find_next_homography(image, image_next, keypoints_0, descriptors_0,
							detector, extractor, matcher, keypoints_next, descriptors_next);
			H_wi1 = H_ii1 * H_wi;
			p0_1 = transform_corner(H_wi1, mp0);
			p1_1 = transform_corner(H_wi1, mp1);
			p2_1 = transform_corner(H_wi1, mp2);
			p3_1 = transform_corner(H_wi1, mp3);
			
			drawPlane(image_next, p0_1, p1_1, p2_1, p3_1);
			imshow("H_ii1", image_next);
			keypoints_0 = keypoints_next;
			descriptors_0 = descriptors_next;
			image = image_next; 
			H_wi = H_wi1;
		default:
			break;
		}
			cap >> image_next;
			H_ii1 = find_next_homography(image, image_next, keypoints_0, descriptors_0,
							detector, extractor, matcher, keypoints_next, descriptors_next);
			H_wi1 = H_ii1 * H_wi;
			p0_1 = transform_corner(H_wi1, mp0);
			p1_1 = transform_corner(H_wi1, mp1);
			p2_1 = transform_corner(H_wi1, mp2);
			p3_1 = transform_corner(H_wi1, mp3);
			
			drawPlane(image_next, p0_1, p1_1, p2_1, p3_1);
			imshow("H_ii1", image_next);
			keypoints_0 = keypoints_next;
			descriptors_0 = descriptors_next;
			image = image_next; 
			H_wi = H_wi1;
		 
	}
	waitKey(0);
	return 0;

}
Exemple #16
0
//
// Following an example from
// http:// ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/
//
void calcHomographyFeature(const Mat& image1, const Mat& image2)
{
    static const char* difffeat = "Difference feature registered";

    Mat gray_image1;
    Mat gray_image2;
    // Convert to Grayscale
    if(image1.channels() != 1)
        cvtColor(image1, gray_image1, CV_RGB2GRAY);
    else
        image1.copyTo(gray_image1);
    if(image2.channels() != 1)
        cvtColor(image2, gray_image2, CV_RGB2GRAY);
    else
        image2.copyTo(gray_image2);

    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 400;

    SurfFeatureDetector detector(minHessian);

    std::vector<KeyPoint> keypoints_object, keypoints_scene;

    detector.detect(gray_image1, keypoints_object);
    detector.detect(gray_image2, keypoints_scene);

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;

    Mat descriptors_object, descriptors_scene;

    extractor.compute(gray_image1, keypoints_object, descriptors_object);
    extractor.compute(gray_image2, keypoints_scene, descriptors_scene);

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( descriptors_object, descriptors_scene, matches );

    double max_dist = 0; double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for(int i = 0; i < descriptors_object.rows; i++)
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    //-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector<DMatch> good_matches;

    for(int i = 0; i < descriptors_object.rows; i++) {
        if(matches[i].distance < 3*min_dist) {
            good_matches.push_back( matches[i]);
        }
    }
    std::vector< Point2f > obj;
    std::vector< Point2f > scene;

    for(size_t i = 0; i < good_matches.size(); i++)
    {
        //-- Get the keypoints from the good matches
        obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }

    // Find the Homography Matrix
    Mat H = findHomography( obj, scene, CV_RANSAC );
    // Use the Homography Matrix to warp the images
    Mat result;
    Mat Hinv = H.inv();
    warpPerspective(image2, result, Hinv, image1.size());

    cout << "--- Feature method\n" << H << endl;
    
    Mat imf1, resf;
    image1.convertTo(imf1, CV_64FC3);
    result.convertTo(resf, CV_64FC3);
    showDifference(imf1, resf, difffeat);
}
int main( int argc, char** argv )
{
  if( argc != 3 )
  { readme(); return -1; }

  Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );

  if( !img_1.data || !img_2.data )
  { printf(" --(!) Error reading images \n"); return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  //-- Draw matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
               matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

 std::vector< Point2f > obj;
 std::vector< Point2f > scene;

  for( int i = 0; i < matches.size(); i++ )
 {
 //-- Get the keypoints from the  matches
 obj.push_back( keypoints_1[ matches[i].queryIdx ].pt );
 scene.push_back( keypoints_2[ matches[i].trainIdx ].pt );
 }
 
  //-- Show detected matches
  imshow( " Matches", img_matches );

  for( int i = 0; i < (int)matches.size(); i++ )
  { printf( "--  Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, matches[i].queryIdx, matches[i].trainIdx ); }
  
   // Mat H = findHomography( obj, scene, CV_RANSAC );
  Mat H = ransac(obj, scene);
 // Use the Homography Matrix to warp the images
 cv::Mat result;
 warpPerspective(img_1,result,H,cv::Size(img_1.cols+img_2.cols,img_1.rows));
 cv::Mat half(result,cv::Rect(0,0,img_2.cols,img_2.rows));
 img_2.copyTo(half);
 imshow( "Result", result );
 imwrite("result.png", result);
  waitKey(0);

  return 0;
}
Exemple #18
0
int main()
{
    int imgNum=300;

    vector<Mat>  imgVec;
    imgVec.resize(imgNum);

    vector<string> nameVec;
    nameVec.resize(imgNum);

    vector<vector<KeyPoint> > keyPointsVec;
    keyPointsVec.resize(imgNum);

    vector<Mat> descriptorsVec;
    descriptorsVec.resize(imgNum);

    for(int i=0; i<imgNum; i++)
    {
        char fileName[1024] = {NULL};

        sprintf(fileName, "/home/lili/workspace/SLAM/vocabTree/Lip6IndoorDataSet/Images/lip6kennedy_bigdoubleloop_%06d.ppm", i);

        nameVec[i]=string(fileName);

        imgVec[i]=imread(nameVec[i], CV_LOAD_IMAGE_GRAYSCALE);
    }

    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 400;

    SurfFeatureDetector detector(minHessian);

    SurfDescriptorExtractor extractor;

    vector<unsigned int> labels;
    for(int i=0; i<imgNum; i++)
    {
        detector.detect(imgVec[i], keyPointsVec[i]);

        extractor.compute(imgVec[i], keyPointsVec[i], descriptorsVec[i]);
        for(int j = 0; j<descriptorsVec[i].rows; j++)
        {
            labels.push_back(i);
        }
    }

    Mat all_descriptors;

    for(int i = 0; i<descriptorsVec.size(); i++)
    {
        all_descriptors.push_back(descriptorsVec[i]);
    }

    assert(labels.size() == all_descriptors.rows);
    cout<<"all_descriptors.rows "<<all_descriptors.rows<<endl;
    cout<<"hahha1 "<<endl;
    Vocabulary vocab(imgNum);

    vocab.indexedDescriptors_ = all_descriptors;


    vector<KeyPoint> newKeypoints;
    Mat newDescriptors;

    ///add new image to the randomized kd tree
    {
        string newImageName="/home/lili/workspace/SLAM/vocabTree/Lip6IndoorDataSet/Images/lip6kennedy_bigdoubleloop_000350.ppm";
        Mat newImg=imread(newImageName, CV_LOAD_IMAGE_GRAYSCALE);
        detector.detect(newImg, newKeypoints);
        extractor.compute(newImg, newKeypoints, newDescriptors);
        cout<<"newDescriptors.rows: "<<newDescriptors.rows<<endl;
    }

    vocab.notIndexedDescriptors_ = newDescriptors;

    ///clustering
    int clustersNum;
    Mat clusters(15000,64,CV_32F);
    //Mat float_all_descriptors;


    clustersNum=vocab.clustering(all_descriptors, clusters);
    cout<<"clustersNum  "<<clustersNum<<endl;

    ///flann build tree
    clock_t begin1 = clock();
    vocab.update();
    clock_t end1 = clock();
    double buildTree_time = double(end1 - begin1) / CLOCKS_PER_SEC;
    cout.precision(5);
    cout<<"buildTree time "<<buildTree_time<<endl;


    cout<<"hahha2 "<<endl;
    vector<KeyPoint> queryKeypoints;
    Mat queryDescriptors;

    ///QueryImage
    {
        string queryImageName="/home/lili/workspace/SLAM/vocabTree/Lip6IndoorDataSet/Images/lip6kennedy_bigdoubleloop_000381.ppm";
        Mat queryImg=imread(queryImageName, CV_LOAD_IMAGE_GRAYSCALE);
        detector.detect(queryImg, queryKeypoints);
        extractor.compute(queryImg, queryKeypoints, queryDescriptors);
        cout<<"queryDescriptors.rows: "<<queryDescriptors.rows<<endl;
    }



    Mat indices;
    Mat results;
    Mat dists;
    int k=1;
    vector<int> imageLabelsVec;

    multimap<int, int> imageScore;
    vector<RankedScore> rankedScore;
    vocab.search_image(queryDescriptors, indices, dists, k, imgNum, labels, imageLabelsVec, imageScore, rankedScore);

    vector<double> likelihood;
    vocab.computeLikelihood(imgNum, rankedScore, likelihood);

    return 0;
}
Exemple #19
0
//Takes Mat object and finds its keypoints, then compares against the keypoints in segmentedCapture
//If there are 4 or more matching keypoints, then it reports a match
bool match(Mat object, IplImage* segmentedCapture, int i)
{
	printf("Size check of segmented capture: height: %d, width: %d\n", segmentedCapture->height, segmentedCapture->width);
	printf("attempting to read object now\n");

	bool matchFound = false;
	if( !object.data )
	{
		std::cout<< "Error reading object " << std::endl;
		return -1;
	}
	int minHessian = 500;

	SurfFeatureDetector detector(minHessian);
	//Detect the keypoints using SURF Detector

	std::vector<KeyPoint> kp_object;
	detector.detect( object, kp_object );

	//Calculate descriptors (feature vectors)
	Mat des_object;
	SurfDescriptorExtractor extractor;

	extractor.compute( object, kp_object, des_object );
	printf("Number of descriptors found for initial object: %d\n", (int)kp_object.size());

	FlannBasedMatcher matcher;

	char *windowName = new char[20];
	sprintf(windowName, "Match %d", i);
	destroyWindow(windowName);
	namedWindow(windowName);

	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0,0);
	obj_corners[1] = cvPoint( object.cols, 0 );
	obj_corners[2] = cvPoint( object.cols, object.rows );
	obj_corners[3] = cvPoint( 0, object.rows );

	Mat des_image, img_matches;
	std::vector<KeyPoint> kp_image;
	std::vector<vector<DMatch > > matches;
	std::vector<DMatch > good_matches;
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;
	std::vector<Point2f> scene_corners(4);
	Mat H;
	Mat image;


	cvResetImageROI(segmentedCapture);
	printf("creating image to store it in");
	//	IplImage *image2 = cvCreateImage(cvSize(segmentedCapture->width, segmentedCapture->height), IPL_DEPTH_8U,1);
	printf("about to convert to gray\n");
	//	cvCvtColor(segmentedCapture, image2, CV_BGR2GRAY);
	//
	//	printf("converted to gray\n");
	Mat matCon(segmentedCapture);
	image = segmentedCapture;
	//	printf("before detection\n");
	detector.detect( image, kp_image );
	//	printf("after detection, number of descriptors for detected object: %d\n", kp_image.size());
	extractor.compute( image, kp_image, des_image );
	//	printf("after computation  of extraction\n");

	if(des_image.empty()){
		printf("key points from capture frame are empty\n");
	} else {

		matcher.knnMatch(des_object, des_image, matches, 2);
		//		matcher.match(des_object, des_image, matches);
		printf("after knnmatch: matches.size() is %d\n", matches.size());
		for(int j = 0; j < min(des_image.rows-1,(int) matches.size()); j++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
		{
			if((matches[j][0].distance < 0.5*(matches[j][1].distance)) && ((int) matches[j].size()<=2 && (int) matches[j].size()>0))
			{
				good_matches.push_back(matches[j][0]);
				//			printf("Outer loop is on: %d, Number of matches is: %d\n", i, (int)good_matches.size());
			}
		}

		//Draw only "good" matches
		drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

		if (good_matches.size() >= 4)
		{
			matchFound = true;
			printf("Found %d matched points for detectedObject %d", good_matches.size(), i );
			for( int i = 0; i < good_matches.size(); i++ )
			{
				//Get the keypoints from the good matches
				obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
				scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
			}

			H = findHomography( obj, scene, CV_RANSAC );

			perspectiveTransform( obj_corners, scene_corners, H);

			//Draw lines between the corners (the mapped object in the scene image )
			line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
			line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
			line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
			line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
		}
		imshow( windowName, img_matches );

	}
	return matchFound;
}
Exemple #20
0
int main(int argc, char * argv[])
{
	if(argc < 2)
	{
		std::cout << "Use: tracker <target_image>" << std::endl;
        return -1;
	}
	
    Mat mTarget = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );

    if( !mTarget.data )
    {
        std::cout<< "Error reading target image." << std::endl;
        return -1;
    }

    //Detect the keypoints using SURF Detector
    int minHessian = 500;

    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kpTarget;

    detector.detect( mTarget, kpTarget );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( mTarget, kpTarget, des_object );

    FlannBasedMatcher matcher;

    //VideoCapture cap("http://192.168.1.200/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");
	VideoCapture cap("http://nidq.no-ip.org/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");

    namedWindow("Capture");

    std::vector<Point2f> tgt_corners(4);

    //Get the corners from the object
    tgt_corners[0] = cvPoint(0,0);
    tgt_corners[1] = cvPoint( mTarget.cols, 0 );
    tgt_corners[2] = cvPoint( mTarget.cols, mTarget.rows );
    tgt_corners[3] = cvPoint( 0, mTarget.rows );

    char key = 'a';
    int framecount = 0;
    while (key != 27)
    {
        Mat frame;
        cap >> frame;

        if (framecount < 5)
        {
            framecount++;
            continue;
        }

        Mat des_image, img_matches;
        std::vector<KeyPoint> kpImage;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kpImage );
        extractor.compute( image, kpImage, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        drawMatches( mTarget, kpTarget, image, kpImage, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kpTarget[ good_matches[i].queryIdx ].pt );
                scene.push_back( kpImage[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( tgt_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( img_matches, scene_corners[0] + Point2f( mTarget.cols, 0), scene_corners[1] + Point2f( mTarget.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( mTarget.cols, 0), scene_corners[2] + Point2f( mTarget.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( mTarget.cols, 0), scene_corners[3] + Point2f( mTarget.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( mTarget.cols, 0), scene_corners[0] + Point2f( mTarget.cols, 0), Scalar( 0, 255, 0), 4 );
        }

        //Show detected matches
        imshow( "Capture", img_matches );

        key = waitKey(1);
    }
    return 0;
}
Mat OpenCVImageProcessor::drawDetectedObject(Mat srcImage,Mat sceneImage){
    Mat img_object = srcImage;
    Mat img_scene =sceneImage;
    
    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 400;
    
    SurfFeatureDetector detector( minHessian );
    
    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    
    detector.detect( img_object, keypoints_object );
    detector.detect( img_scene, keypoints_scene );

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    
    Mat descriptors_object, descriptors_scene;
    
    extractor.compute( img_object, keypoints_object, descriptors_object );
    extractor.compute( img_scene, keypoints_scene, descriptors_scene );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( descriptors_object, descriptors_scene, matches );
    
    double max_dist = 0; double min_dist = 100;
    
    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_object.rows; i++ )
    { double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    
    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );
    
    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;
    
    for( int i = 0; i < descriptors_object.rows; i++ )
    { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
    }
    
    Mat img_matches;
    drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
                good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
    
    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;
    
    for( int i = 0; i < good_matches.size(); i++ )
    {
        //-- Get the keypoints from the good matches
        obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }
    
    Mat H = findHomography( obj, scene, FM_RANSAC );
    
    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
    obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
    std::vector<Point2f> scene_corners(4);
    
    perspectiveTransform( obj_corners, scene_corners, H);
    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
    line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );


    return sceneImage;
}
  //-- Step 1: Detect the keypoints using SURF Detector
 int main()
 {

  Mat img_object = imread("boat1.jpg");
  VideoCapture stream1(0);
if(!stream1.isOpened())
{
cout <<"Cannot Open Camera";
}
while(true)
{
Mat frame;
stream1.read(img_scene);
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );


  //-- Localize the object from img_1 in img_2
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( size_t i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  }

  Mat H = findHomography( obj, scene, RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
  obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);


  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  Point2f offset( (float)img_object.cols, 0);
  line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
 imshow( "Good Matches & Object detection", img_matches );
  /*if((scene_corners) == true)
  {
    cout << "good";
  }*/
  char key = cvWaitKey(30);
        if (key == 27) // ESC
            break;
      }

  return 0;
}
Exemple #23
0
void callback(leap_t *leap)
{
  //FeatureDetector *feat = FeatureDetector::create("SURF");
  //FeatureDetector *detector = FeatureDetector::create("FAST");
  FastFeatureDetector detector(6);
  //OrbFeatureDetector detector(5);
  //SurfFeatureDetector detector(20);
  SurfDescriptorExtractor ext;
  FlannBasedMatcher matcher;
  //BFMatcher matcher;

  Mat left(leap->left);
  Mat right(leap->right);
  
  vector <DMatch> matches;
  vector <DMatch> good_matches;

  vector <KeyPoint> keypoints_left; 
  vector <KeyPoint> keypoints_right; 

  Mat descriptors_left, descriptors_right;

  map -= Scalar(20,20,20);

  //FAST(left, keypoints_left, 8, true);
  //FAST(right, keypoints_right, 8, true);

  detector.detect(left, keypoints_left);
  detector.detect(right, keypoints_right);

  if(keypoints_left.size() == 0)
    return;
  if(keypoints_right.size() == 0)
    return;

  ext.compute(left, keypoints_left, descriptors_left);
  ext.compute(right, keypoints_right, descriptors_right);

  matcher.match(descriptors_left, descriptors_right, matches);

  if(matches.size() == 0)
    return;

  //drawKeypoints(left, keypoints_left, display_left);
  //drawKeypoints(right, keypoints_right, display_right);

  double max_dist = 0; double min_dist = 1000;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_left.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }
 
  for( int i = 0; i < (int)matches.size(); i++ )
  {
    int kp1 = matches[i].queryIdx;
    int kp2 = matches[i].trainIdx;
    float dx,dy;

    KeyPoint p1,p2;

    //printf( "Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );

    p1 = keypoints_left[kp1];
    p2 = keypoints_right[kp2];
    //printf("%f,%f -> %f,%f\n", p1.pt.x, p1.pt.y, p2.pt.x, p2.pt.y);

    dx = p1.pt.x - p2.pt.x;
    dy = p1.pt.y - p2.pt.y;

    if(fabs(dy) <= 10 && fabs(dx) > 20)
    {
      float distance = (fabs(dx)/200.0)*100.0;
      //printf("%f\n", fabs(dx));
      good_matches.push_back(matches[i]);


      //printf("%f\n", distance);

      // Draw on the map
      circle(map, Point(500-p1.pt.x,1000-distance*10), 3, Scalar(0,255,0));

    }
   
    //if( matches[i].distance <= max(2*min_dist, 0.001) )
      //good_matches.push_back( matches[i]);
  }

  if(good_matches.size() > 0)
    drawMatches(left, keypoints_left, right, keypoints_right, good_matches, display_match);
 

}
bool detectLogo(Mat person, Mat desObject, Mat object, vector<KeyPoint> kpObject, vector<Point2f> objCorners)
{
    // scale up the image
    resize(person, person, Size(), 4, 4, CV_INTER_CUBIC);
    
    // sharpen the image
    Mat image;
    GaussianBlur(person, image, cv::Size(0, 0), 3);
    addWeighted(person, 1.75, image, -0.75, 0, image);

    GaussianBlur(person, image, cv::Size(0, 0), 3);
    addWeighted(person, 1.75, image, -0.75, 0, image);

    // detect key points in the input frame
    vector<KeyPoint> kpFrame;
    detector.detect(person, kpFrame);
    
    // extract feature descriptors for the detected key points
    Mat desFrame;
    extractor.compute(person, kpFrame, desFrame);
    if(desFrame.empty() or desObject.empty())
        return false;
    
    // match the key points with object
    FlannBasedMatcher matcher;
    vector< vector <DMatch> > matches;
    matcher.knnMatch(desObject, desFrame, matches, 2);
    
    // compute the good matches among the matched key points
    vector<DMatch> goodMatches;
    for(int i=0; i<desObject.rows; i++)
    {
        if(matches[i][0].distance < 0.6 * matches[i][1].distance)
        {
            goodMatches.push_back(matches[i][0]);
        }
    }
    
    if(goodMatches.size() >= 8)
    {
        vector<Point2f> obj;
        vector<Point2f> scene;
        
        for( int i = 0; i < goodMatches.size(); i++ )
        {
            // get the keypoints from the good matches
            obj.push_back( kpObject[ goodMatches[i].queryIdx ].pt );
            scene.push_back( kpFrame[ goodMatches[i].trainIdx ].pt );
        }
        
        Mat H;
        H = findHomography(obj, scene);
        
        vector<Point2f> sceneCorners(4);
        perspectiveTransform( objCorners, sceneCorners, H);
        
        // draw lines between the corners (the mapped object in the scene image )
        line(person, sceneCorners[0], sceneCorners[1], Scalar(255, 255, 255), 4);
        line(person, sceneCorners[1], sceneCorners[2], Scalar(255, 255, 255), 4);
        line(person, sceneCorners[2], sceneCorners[3], Scalar(255, 255, 255), 4);
        line(person, sceneCorners[3], sceneCorners[0], Scalar(255, 255, 255), 4);
        
        imshow("Person", person);
        cout << "[MESSAGE] LOGO DETECTED" << endl;
        return true;
    }
    return false;
}
void stitchLeftRight(Mat& leftImage, Mat& rightImage, Mat& rightImageWarped, Mat& panorama)
{
    // Detect the keypoints using SURF Detector
    int minHessian = 400;
    SurfFeatureDetector detector(minHessian);
    std::vector<KeyPoint> keypoints_leftImage, keypoints_rightImage;
    detector.detect( leftImage, keypoints_leftImage );
    detector.detect( rightImage, keypoints_rightImage );

    // Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat descriptors_leftImage, descriptors_rightImage;
    extractor.compute( leftImage, keypoints_leftImage, descriptors_leftImage );
    extractor.compute( rightImage, keypoints_rightImage, descriptors_rightImage );

    // Match descriptor vectors using FLANN matcher
    // FLANN matching serves as initialization to the RANSAC feature matching (future step)
    // FLANN finds the nearest neighbors of keypoints in left image present in the right image
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( descriptors_leftImage, descriptors_rightImage, matches );

    double max_dist = 0, min_dist = 100;

    // Find max and min distances between keypoints
    for (int i = 0; i < descriptors_leftImage.rows; i++ )
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    // Use only "good" matches (i.e. whose distance is less than 3*min_dist ) to
    // construct Homography (Projective Transformation)
    std::vector< DMatch > good_matches;
    for (int i = 0; i < descriptors_leftImage.rows; i++)
    {
        if (matches[i].distance < 3*min_dist)
        {
            good_matches.push_back(matches[i]);
        }
    }

    // Isolate the matched keypoints in each image
    std::vector<Point2f> leftImage_matchedKPs;
    std::vector<Point2f> rightImage_matchedKPs;

    for (size_t i = 0; i < good_matches.size(); i++ )
    {
        leftImage_matchedKPs.push_back( keypoints_leftImage[ good_matches[i].queryIdx ].pt );
        rightImage_matchedKPs.push_back( keypoints_rightImage[ good_matches[i].trainIdx ].pt );
    }

    // Find the Homography relating rightImage and leftImage
    Mat H = findHomography( Mat(rightImage_matchedKPs), Mat(leftImage_matchedKPs), CV_RANSAC );
    // Warp rightImage to leftImage's space using the Homography just constructed
//    Mat rightImageWarped;  // warped image has twice the width to account for overlap
    warpPerspective(rightImage, rightImageWarped, H, Size(rightImage.cols*2, rightImage.rows), INTER_CUBIC);

    panorama = rightImageWarped.clone();
    // Overwrite leftImage on left end of final panorma image
    Mat roi(panorama, Rect(0, 0, leftImage.cols, leftImage.rows));
    leftImage.copyTo(roi);
}
int main()
{
    // read the logo image
    Mat object = imread("logo.png");
    if(!object.data)
    {
        cout << "[ERROR] CANNOT READ IMAGE" << endl;
        return false;
    }
    
    // detect key points in the image using SURF
    vector<KeyPoint> kpObject;
    
    detector.detect(object, kpObject);
    
    // compute feature descriptors
    Mat desObject;
    
    extractor.compute( object, kpObject, desObject );
    
    // get the corners of the object
    vector<Point2f> objCorners(4);
    
    objCorners[0] = cvPoint(0, 0);
    objCorners[1] = cvPoint(object.cols, 0);
    objCorners[2] = cvPoint(object.cols, object.rows);
    objCorners[3] = cvPoint(0, object.rows);
    
    // capture video from webcam
    VideoCapture input(0);
    
    if(!input.isOpened())
	{
		cout << "[ERROR] CANNOT OPEN WEBCAM" << endl;
		return -1;
	}
    
	cout << "[MESSAGE]: CAPTURING VIDEO FROM WEBCAM" << endl;
    
    int key = 0;
    Mat inputFrame;
    Mat pathImage(360, 640, CV_8UC3, Scalar(0,0,0));
    Point newPoint;
    Point oldPoint;
    
    while(key != 27)
    {
		// capture each frame of the video
		input >> inputFrame;
		if(inputFrame.empty())
			break;
        
        // resize the image for faster processing
        resize(inputFrame, inputFrame, Size(), 0.5, 0.5, INTER_LINEAR);

        // run the HOG detector with default parameters
        HOGDescriptor hog;
        hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
        
        vector<Rect> found, found_filtered;
        hog.detectMultiScale(inputFrame, found, 0, Size(8,8), Size(32,32), 1.05, 2);
        
        size_t i, j;
        for (int i=0; i<found.size(); i++)
        {
            Rect r = found[i];
            for (j=0; j<found.size(); j++)
                if (j!=i && (r & found[j])==r)
                    break;
            if (j==found.size())
                found_filtered.push_back(r);
        }
        
        // draw a bounding box for the detected people
        for (i=0; i<found_filtered.size(); i++)
        {
            Rect r = found_filtered[i];
            
            // the HOG detector returns slightly larger rectangles than the real objects
            // so we slightly shrink the rectangles to get a nicer output
            r.x += cvRound(r.width*0.2);
            r.width = cvRound(r.width*0.55);
            r.y += cvRound(r.height*0.06);
            r.height = cvRound(r.height*0.75);
            
            if(r.x>=0 and r.y>=0 and r.x+r.width<=inputFrame.cols and r.y+r.height<=inputFrame.rows)
            {
                Mat tmp(inputFrame, r);
                // capture the detected person and check for presence of logo
                if(r.height > 0.5*inputFrame.rows)
                {
                    // detect the logo
                    if(detectLogo(tmp, desObject, object, kpObject, objCorners))
                    {
                        // draw a bounding box for the detected people
                        rectangle(inputFrame, r.tl(), r.br(), Scalar(0, 255, 0), 2);
                        
                        // get the new position
                        newPoint.x = r.x+r.width/2;
                        newPoint.y = r.y+r.width/2;
                        
                        // draw the path of the movement of the person
                        line(pathImage, newPoint, oldPoint, Scalar(0,255,0), 2);
                        
                        // save the old position
                        oldPoint.x = newPoint.x;
                        oldPoint.y = newPoint.y;
                    }
                }
            }
        }
        
        // display the detected image
        imshow("People Detection", inputFrame);
        
        // display the path of the person detected
        imshow("Path Traced", pathImage);
        key = waitKey(1);
    }
    return 0;
}
void sift_demo( Mat dst,Mat dst2 ){
 
  SurfFeatureDetector detector (1500); 

  std::vector<KeyPoint> keypoints_1,keypoints_2;  
  detector.detect(dst, keypoints_1);
  detector.detect(dst2, keypoints_2);
  //drawKeypoints(dst, keypoints_1, dst);
  
  SurfDescriptorExtractor extractor;
  Mat descriptors_1, descriptors_2;
  extractor.compute( dst, keypoints_1, descriptors_1 );
  extractor.compute( dst2, keypoints_2, descriptors_2 );
  
  //-- Step 3: Matching descriptor vectors with a brute force matcher
  BFMatcher matcher(NORM_L2);
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );
  pcl::PointXYZRGBA point;
    
  double max_dist = 0; double min_dist = 100;

    //filtrage des associations ratées
  for( int i = 0; i < descriptors_1.rows; i++ )
    { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  std::vector< DMatch > good_matches;
  for( int i = 0; i < descriptors_1.rows; i++ ){
    if( matches[i].distance < 3*min_dist ){    
    good_matches.push_back( matches[i]);    
    }
  }
  nuage2.clear();
  //nuage2=nuage;
 for(int i=0;i<dst.cols;i++){
    for (int j=0;j<dst.rows;j++){ 
  
  //for(int i=0;i<matches.size;i++){  
   
      point.x=i;
      point.y=j;

       point.g=nuage.at(i,j).g;//dst.at<cv::Vec3b>(i,j)[1];
      point.b=nuage.at(i,j).b;//dst.at<cv::Vec3b>(i,j)[2];
      point.r=nuage.at(i,j).r;
	//if (depth.at<float>(i,j)==depth.at<float>(i,j)){
	  if (nuage.at(i,j).z==nuage.at(i,j).z){
//	std::cout<<depth.at<float>(i,j)<<endl;
	 point.z=nuage.at(i,j).z*100;//depth.at<float>(j,i)*100;
	  
	}
	nuage2.push_back(point);
      
    }
  }
 
  //-- Draw matches
  Mat img_matches;
  drawMatches( dst, keypoints_1, dst2, keypoints_2, good_matches, img_matches );  
  imshow("Matches", img_matches );
   
}
Exemple #28
0
HomographyInfo PaperUtil::alignCams(Mat logitech, Mat Kinect){
    double area = 0;
    while (true) {
        //-- Step 1: Detect the keypoints using SURF Detector
        int minHessian = 400;
        
        SurfFeatureDetector detector( minHessian );
        
        vector<KeyPoint> keypoints_object, keypoints_scene;
        
        detector.detect( logitech, keypoints_object );
        detector.detect( Kinect, keypoints_scene );
        
        //-- Step 2: Calculate descriptors (feature vectors)
        SurfDescriptorExtractor extractor;
        
        Mat descriptors_object, descriptors_scene;
        
        extractor.compute( logitech, keypoints_object, descriptors_object );
        extractor.compute( Kinect, keypoints_scene, descriptors_scene );
        
        //-- Step 3: Matching descriptor vectors using FLANN matcher
        FlannBasedMatcher matcher;
        vector< DMatch > matches;
        matcher.match( descriptors_object, descriptors_scene, matches );
        
        double max_dist = 0; double min_dist = 100;
        
        //-- Quick calculation of max and min distances between keypoints
        for( int i = 0; i < descriptors_object.rows; i++ )
        { double dist = matches[i].distance;
            if( dist < min_dist ) min_dist = dist;
            if( dist > max_dist ) max_dist = dist;
        }
        
        //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
        vector< DMatch > good_matches;
        
        for( int i = 0; i < descriptors_object.rows; i++ )
        { if( matches[i].distance < 3*min_dist )
        { good_matches.push_back( matches[i]); }
        }
        
        Mat img_matches;
        drawMatches( logitech, keypoints_object, Kinect, keypoints_scene,
                    good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
        
        
        //-- Localize the object from img_1 in img_2
        vector<Point2f> obj;
        vector<Point2f> scene;
        
        for( size_t i = 0; i < good_matches.size(); i++ )
        {
            //-- Get the keypoints from the good matches
            obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
            scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
        }
        
        Mat H = findHomography( obj, scene, CV_RANSAC );
        
        //-- Get the corners from the image_1 ( the object to be "detected" )
        vector<Point2f> obj_corners(4);
        obj_corners[0] = Point(0,0); obj_corners[1] = Point( logitech.cols, 0 );
        obj_corners[2] = Point( logitech.cols, logitech.rows ); obj_corners[3] = Point( 0, logitech.rows );
        vector<Point2f> scene_corners(4);
        
        perspectiveTransform( obj_corners, scene_corners, H);
        
        area = fabs(contourArea(scene_corners));
        // if angles OK and it's large enough, we return it (area should be roughly 75k)
        if (checkAnglesInVector(scene_corners) && area > 50000) {
            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            Point2f offset( (float)logitech.cols, 0);
            line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
            
            //-- Show detected matches
            imshow( "Good Matches & Object detection", img_matches );
            HomographyInfo result;
            result.homography = H;
            result.roi = scene_corners;
            return result;
        } else {
            cout << "Couldn't get a good homography, trying again" << endl;
        }
    }
}
Exemple #29
0
int main()
{
	Mat object = imread( "/home/pi/opencv/darkL2.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    //Mat inputImg = imread( "/home/pi/opencv/block.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	//Mat object;
	//resize(inputImg,object,Size(0,0),0.03,0.03);

    if( !object.data )
    {
        cout<< "Error reading object " << endl;
        return -1;
    }
	printf("read image\n");
    //Detect the keypoints using SURF Detector
    int minHessian = 300;

    SurfFeatureDetector detector( minHessian );
    vector<KeyPoint> kp_object;

    detector.detect( object, kp_object );
	//printf("detect keypoints\n");

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( object, kp_object, des_object );

    FlannBasedMatcher matcher;

    //VideoCapture cap(0);
	//VideoCapture cap("/home/pi/opencv/mouse2.mp4" );
	Mat cap = imread( "/home/pi/opencv/photo.jpg", CV_LOAD_IMAGE_COLOR );

	//printf("%f \n",cap.get(CV_CAP_PROP_FPS));

	namedWindow("Good Matches");

    vector<Point2f> obj_corners(4);
	
	//printf("calculate descriptors\n");	
    //Get the corners from the object
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );
	//printf("get corners\n");
    char key = 'a';
    int framecount = 0;

	int angle = 0;
	Mat frame;
	int numCycle = 0;

    while (key != 27)
    {
	/*
	Mat inputVid;	
	Mat frame;
        cap.read(inputVid);
	resize(inputVid, frame, Size(0,0), 0.3,0.3);
	
	
	angle += 10;
	printf("while loop\n");
	int cols = frame.cols;
	int rows = frame.rows;
	Point2f abc(cols/2,rows/2);
	Mat M = getRotationMatrix2D(abc,angle,1);
        warpAffine(cap,frame,M,Size(cols,rows));
	*/
	frame = cap;
        if (framecount < 5 )
        {
            framecount++;
            continue;
        }
	

        Mat des_image, img_matches;
        vector<KeyPoint> kp_image;
        vector<vector<DMatch > > matches;
        vector<DMatch > good_matches;
        vector<Point2f> obj;
        vector<Point2f> scene;
        vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;
	//printf("before color call\n");
        cvtColor(frame, image, CV_BGR2GRAY);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);
	//printf("before segfault loop\n");
        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }
	//printf("segfault sensitive loop\n");
        //Draw only "good" matches
        drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), 						DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	printf("Number of matches: %d\n", good_matches.size());
        if (good_matches.size() >= 10)

        {
		printf("good matches >= 10\n");
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
        }
	//printf("draw good matches\n");
        //Show detected matches
        imshow( "Good Matches", img_matches );
	if ( numCycle < 20 )
	{
		stringstream name;
		name << numCycle;
		string filename = string("Match_")+name.str()+string(".jpg");
		imwrite( filename, img_matches );
		numCycle++;
	}

        key = waitKey(33);
    }
    return 0;
}
Exemple #30
0
/** @function main */
int execute(Mat img_scene)
{

  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );
 
  if (descriptors_scene.rows == 0 || descriptors_scene.cols == 0) {
    return 0;
  }
  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
     { good_matches.push_back( matches[i]); }
  }

  Mat img_matches;

  //-- Localize the object
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( int i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);
  double area0 = contourArea(scene_corners) / img_scene.rows / img_scene.cols;
  std::cout << area0 << std::endl;


  // double ratio =  fabs(H.at<double>(0, 1)) + fabs(H.at<double>(1, 0));
  // std::cout << H.at<double>(0, 1) << "\t" << H.at<double>(1, 0) << "\t" << ratio << std::endl;

  double ratio = area0;
  if (ratio > max_metric) {
    max_metric = ratio;
  } 
  if (ratio != 0.0 && ratio < min_metric) {
    min_metric = ratio;
  }
  return 0;
  }