Example #1
1
int getFeatures(Mat &object, Mat &frame, Mat &homography, vector<KeyPoint> &keypoints_object,
    vector<KeyPoint> &keypoints_scene, vector<DMatch> &good_matches) {

    Ptr<SIFT> detector = SIFT::create();

    // Detect features and compute descriptors
    Mat descriptors_object, descriptors_scene;
    detector->detectAndCompute(object, noArray(), keypoints_object, descriptors_object);
    detector->detectAndCompute(frame, noArray(), keypoints_scene, descriptors_scene);

    // Match descriptors using FLANN
    FlannBasedMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors_object, descriptors_scene, matches);

    // Check if too few matches are found
    if (matches.size() <= 4) {
        cout << "Error: too few matches were found" << endl;
        return -1;
    }

    // Find minimum and maximum distances between descriptors
    double max_dist = 0;
    double min_dist = 100;
    for (int i = 0; i < descriptors_object.rows; i++) {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }

    // If there are sufficient matches, filter to find higher-quality subset
    int minMatches = 8;
    for (int i = 0; i < descriptors_object.rows; i++) {
        if (matches[i].distance < 3*min_dist && matches.size() > minMatches) {
            good_matches.push_back(matches[i]);
        }
    }
    // If there are too few good matches, use all matches
    if (good_matches.size() <= minMatches) {
        for (int i = 0; i < matches.size(); i++) {
            if (i < good_matches.size()) {
                good_matches[i] = matches[i];
            } else {
                good_matches.push_back(matches[i]);
            }
        }
    }

    vector<Point2f> obj, scene;
    for (int i = 0; i < good_matches.size(); i++) {
        // Determine keypoints from the matches
        obj.push_back(keypoints_object[ good_matches[i].queryIdx ].pt);
        scene.push_back(keypoints_scene[ good_matches[i].trainIdx ].pt);
    }

    // Transform pixel coordinates between images
    homography = findHomography(obj, scene, CV_RANSAC);
    return good_matches.size();
}
Example #2
0
vector<DMatch> GraphicEnd::match( Mat desp1, Mat desp2 )
{
    cout<<"GraphicEnd::match two desp"<<endl;
    FlannBasedMatcher matcher;
    vector<DMatch> matches;

    if (desp1.empty() || desp2.empty())
    {
        return matches;
    }
    double max_dist = 0, min_dist = 100;
    matcher.match( desp1, desp2, matches);

    for (int i=0; i<desp1.rows; i++)
    {
        double dist = matches[ i ].distance;
        if (dist < min_dist)
            min_dist = dist;
        if (dist > max_dist)
            max_dist = dist;
    }

    //return matches;

    vector<DMatch> good_matches;
    for (size_t i=0; i<matches.size(); i++)
    {
        if (matches[ i ].distance <= max(4*min_dist, _match_min_dist))
        {
            good_matches.push_back(matches[ i ]);
        }
    }
    return good_matches;
}
Example #3
0
int match(vector<DMatch> &match, frame &f1, frame &f2)
{
	vector<DMatch> matches;
  FlannBasedMatcher matcher;
  matcher.match(f1.desp, f2.desp, matches);
  
  static reader pd("../config/config.ini");
  
  double min_distance = 9999;
  double match_threshold = atof(pd.get("match_threshold").c_str());
  
  for (int i = 0; i < matches.size(); ++i)
  {
    if (matches[i].distance < min_distance)
    {
      min_distance = matches[i].distance;
    }
  }
  
  for (int i = 0; i < matches.size(); ++i)
  {
    if (matches[i].distance < (min_distance * match_threshold))
    {
      match.push_back(matches[i]);
    }
  }
  
  return match.size();
}
Example #4
0
void main()
{
	//Give the names of the images to be registered
	const char* imRef_name = "834-r1.png";
	const char* imNxt_name = "835-r1.png";
	int hessianThresh = 100, ransacThresh = 3;;
	Mat mask, H12;
	// Read images
	Mat img1 = imread(imRef_name, CV_LOAD_IMAGE_GRAYSCALE);
	Mat img2 = imread(imNxt_name, CV_LOAD_IMAGE_GRAYSCALE);
	Mat img2Out;	// Registered image2 wrt image1

	// Check to see if images exist
	if(img1.empty() || img2.empty())
	{
		printf("Can’t read one of the images\n");
		exit(0);
	}

	// detecting keypoints
	printf("Finding keypoints ... ");
	SURF ImgSurf(hessianThresh);
	vector<KeyPoint> keypoints1, keypoints2;
	ImgSurf(img1, mask, keypoints1);
	ImgSurf(img2, mask, keypoints2);
	// computing descriptors
	SurfDescriptorExtractor extractor;
	Mat descriptors1, descriptors2;
	extractor(img1,mask,keypoints1,descriptors1,TRUE);
	extractor(img2, mask, keypoints2, descriptors2, TRUE);

	// Match the points
	printf("\nMatching keypoints ... ");
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	matcher.match( descriptors1, descriptors2, matches );

	// Extract indices of matched points
    vector<int> queryIdxs( matches.size() ), trainIdxs( matches.size() );
    for( size_t i = 0; i < matches.size(); i++ )
    {
        queryIdxs[i] = matches[i].queryIdx;
        trainIdxs[i] = matches[i].trainIdx;
    }

	// Extract matched points from indices
    vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
    vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);

	// Use RANSAC to find the homography
	printf("\nComputing homography ... ");
    H12 = findHomography( Mat(points2), Mat(points1), CV_RANSAC, ransacThresh );
	
	// Warp the second image according to the homography
	warpPerspective(img2, img2Out, H12, cvSize(img2.cols, img2.rows), INTER_LINEAR);

	// Write result to file
	imwrite("im2reg.png",img2Out);
	printf("\nDone!!!.... ");
}
void detectSiftMatchWithOpenCV(const char* img1_path, const char* img2_path, MatrixXf &match) {
  Mat img1 = imread(img1_path);   
  Mat img2 = imread(img2_path);   

  SiftFeatureDetector detector;
  SiftDescriptorExtractor extractor;
  vector<KeyPoint> key1;
  vector<KeyPoint> key2;
  Mat desc1, desc2;
  detector.detect(img1, key1);
  detector.detect(img2, key2);
  extractor.compute(img1, key1, desc1);
  extractor.compute(img2, key2, desc2);

  FlannBasedMatcher matcher;
  vector<DMatch> matches;
  matcher.match(desc1, desc2, matches);

  match.resize(matches.size(), 6);
  cout << "match count: " << matches.size() << endl;
  for (int i = 0; i < matches.size(); i++) {
    match(i, 0) = key1[matches[i].queryIdx].pt.x;
    match(i, 1) = key1[matches[i].queryIdx].pt.y;
    match(i, 2) = 1;
    match(i, 3) = key2[matches[i].trainIdx].pt.x;
    match(i, 4) = key2[matches[i].trainIdx].pt.y;
    match(i, 5) = 1;
  }
  
}
Example #6
0
bool RelicScn::Match_an_Obj(RelicObj obj)
{
	string message;

	FlannBasedMatcher matcher;
	vector<DMatch> matches;

	matcher.match(obj.descriptors, this->descriptors, matches);
	vector<DMatch> good_matches = Get_Good_Matches(matches);

	//-- Localize the object
	std::vector<Point2f> obj_points;
	std::vector<Point2f> scn_points;
	for (size_t i = 0; i < good_matches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
		scn_points.push_back(this->keypoints[good_matches[i].trainIdx].pt);
	}
	Mat H = cv::findHomography(obj_points, scn_points, RANSAC);

	std::vector<Point2f> obj_corners(4);
	
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(obj.img_width-1, 0);
	obj_corners[2] = cvPoint(obj.img_width-1, obj.img_height-1);
	obj_corners[3] = cvPoint(0, obj.img_height-1);

	std::vector<Point2f> possible_obj_corners(4);
	perspectiveTransform(obj_corners, possible_obj_corners, H);
	BOOST_LOG_TRIVIAL(info) << "原始目标物体大小(像素): " << contourArea(obj_corners);
	BOOST_LOG_TRIVIAL(info) << "检测到的物体大小(像素): " << contourArea(possible_obj_corners);
	this->corners = possible_obj_corners;
	double possible_target_area = contourArea(possible_obj_corners);
	double whole_scene_area = this->img_gray.rows*this->img_gray.cols;
	BOOST_LOG_TRIVIAL(info) << "环境图像大小(像素): " << whole_scene_area;
	double ratio = possible_target_area / whole_scene_area;
	BOOST_LOG_TRIVIAL(info) << "检测到的目标占全图比例: " << ratio;
	if (ratio>0.03 && ratio<1)
	{
		for (int i;i < possible_obj_corners.size();i++)
		{
			if (possible_obj_corners[i].x < 0 || possible_obj_corners[i].y < 0)
			{
				BOOST_LOG_TRIVIAL(info) << "未能检测到目标物体!";
				return false;
			}
		}
		BOOST_LOG_TRIVIAL(info) << "成功检测到目标物体!";
		return true;
	} 
	else
	{
		BOOST_LOG_TRIVIAL(info) << "未能检测到目标物体!";
		return false;
	}
}
Example #7
0
int compare(Mat img_1, Mat img_2) {
  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ ) { 
    double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
  //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
  //-- small)
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_1.rows; i++ ) { 
    if( matches[i].distance <= max(2 * min_dist, 0.02) ) { 
      good_matches.push_back( matches[i]); 
    }
  }

  // Mat img_matches;
  // drawMatches( img_1, keypoints_1, img_2, keypoints_2,
  //              good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
  //              vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  // //-- Show detected matches
  // imshow( "Good Matches", img_matches );

  waitKey(0);

  return good_matches.size();
}
int PlayedCard::computeSurfGoodMatches(vector<KeyPoint> keypoints_1, vector<KeyPoint> keypoints_2, Mat descriptors_1, Mat descriptors_2) {

	//-- Step 3: Matching descriptor vectors using FLANN matcher
	FlannBasedMatcher matcher;
	vector< DMatch > matches;
	matcher.match(descriptors_1, descriptors_2, matches);

	filterMatchesByAbsoluteValue(matches, 0.125);
	filterMatchesRANSAC(matches, keypoints_1, keypoints_2);

	return (int)matches.size();
}
Example #9
0
int main(int argc, char* argv[])
{
	VideoCapture camera;
	camera.set(CV_CAP_PROP_FRAME_WIDTH, WIDTH);
	camera.set(CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);
	camera.open(0);
	
	//checkOpenCL();
	Ptr<FeatureDetector> detector = FeatureDetector::create("STAR");
	//Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("FREAK");
	BriefDescriptorExtractor extractor;
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	Mat descriptor[2];

	int k = 0;
	camera >> image;
	detector->detect(image, keypoint[1]);
	extractor.compute(image, keypoint[1], descriptor[1]);
	for (bool loop = true; loop; )
	{
		switch (waitKey(10))
		{
		case 'q':
			loop = false;
			break;
		}
		camera >> image;
		if (image.empty())
			break;

		// detect features
		detector->detect(image, keypoint[k % 2]);
		extractor.compute(image, keypoint[k % 2], descriptor[k % 2]);
		try {
			matcher.match(descriptor[0], descriptor[1], matches);
		}
		catch (Exception ex)
		{
			printf("%s", ex.msg);
		}
		printf("%d\n", keypoint[k % 2].size());
		for (int i = 0; i < keypoint[k % 2].size(); i++)
		{
			Point2f pt = keypoint[k % 2][i].pt;
			circle(image, Point(pt.x, pt.y), 3, Scalar(0, 0, 255));
		}
		k++;
		imshow("image", image);
	}
	return 0;
}
Example #10
0
/**
 * @brief featuredetector::testFeatures
 * FLANN based matching.
 * Algorithm is taken from
 * http://docs.opencv.org/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html
 * @return Matched points image.
 */
Mat featuredetector::testFeatures(){
    cv::Mat im1, im2;

    //Grayscale the images.
    if(_image1.channels() == 3)
        cv::cvtColor(_image1,im1, CV_BGR2GRAY);
    else _image1.copyTo(im1);
    if(_image2.channels() == 3)
        cv::cvtColor(_image2,im2, CV_BGR2GRAY);
    else _image2.copyTo(im2);


    int minH = 100; // (should be around ~100)
    Ptr<xfeatures2d::SURF> detector =  xfeatures2d::SURF::create(minH);
    detector->setHessianThreshold(minH);

    std::vector<KeyPoint> keypoints1, keypoints2;
    Mat descriptors1, descriptors2;
    detector->detectAndCompute( im1, Mat(), keypoints1, descriptors1 );
    detector->detectAndCompute( im2, Mat(), keypoints2, descriptors2 );

    FlannBasedMatcher matcher;

    std::vector<DMatch> matches;
    matcher.match( descriptors1, descriptors2, matches );
    double maxDist = 0; double minDist = 80;



    for( int i = 0; i < descriptors1.rows; i++ )
    { double dist = matches[i].distance;
        if( dist < minDist ) minDist = dist;
        if( dist > maxDist ) maxDist = dist;
    }
    std::vector< DMatch > goodMatches;

    for( int i = 0; i < descriptors1.rows; i++ )
    { if( matches[i].distance <= max(2*minDist, 0.02) )
        { goodMatches.push_back( matches[i]); }
    }



    Mat matchMat;
    drawMatches( im1, keypoints1, im2, keypoints2,
                 goodMatches, matchMat, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    imshow( "Matches", matchMat );
    return matchMat;
}
Example #11
0
/*
 * @function main
 * @brief Main function
 */
int flann( int argc, char** argv )
{
  if( argc != 3 )
  { readme(); return -1; }
  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
  //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
  int minHessian = 400;
  Ptr<SURF> detector = SURF::create();
  detector->setHessianThreshold(minHessian);
  std::vector<KeyPoint> keypoints_1, keypoints_2;
  Mat descriptors_1, descriptors_2;
  detector->detectAndCompute( img_1, Mat(), keypoints_1, descriptors_1 );
  detector->detectAndCompute( img_2, Mat(), keypoints_2, descriptors_2 );
  //-- Step 2: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );
  double max_dist = 0; double min_dist = 100;
  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }
  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );
  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
  //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
  //-- small)
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;
  for( int i = 0; i < descriptors_1.rows; i++ )
  { if( matches[i].distance <= max(2*min_dist, 0.02) )
    { good_matches.push_back( matches[i]); }
  }
  //-- Draw only "good" matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
  //-- Show detected matches
  imshow( "Good Matches", img_matches );
  for( int i = 0; i < (int)good_matches.size(); i++ )
  { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
  waitKey(0);
  return 0;
}
Example #12
0
vector<DMatch> GraphicEnd::match( vector<PLANE>& p1, vector<PLANE>& p2 )
{
    cout<<"GraphicEnd::match two planes"<<endl;
    FlannBasedMatcher matcher;
    vector<DMatch> matches;
    cv::Mat des1(p1.size(), 4, CV_32F), des2(p2.size(), 4, CV_32F);
    for (size_t i=0; i<p1.size(); i++)
    {
        pcl::ModelCoefficients c = p1[i].coff;
        float m[1][4] = { c.values[0], c.values[1], c.values[2], c.values[3] };
        Mat mat = Mat(1,4, CV_32F, m);
        mat.row(0).copyTo( des1.row(i) );
    }

    for (size_t i=0; i<p2.size(); i++)
    {
        pcl::ModelCoefficients c = p2[i].coff;
        float m[1][4] = { c.values[0], c.values[1], c.values[2], c.values[3] };
        Mat mat = Mat(1,4, CV_32F, m);
        mat.row(0).copyTo( des2.row(i) );
    }

    matcher.match( des1, des2, matches);

    return matches;
    double max_dist = 0, min_dist = 100;

    for (int i=0; i<des1.rows; i++)
    {
        double dist = matches[ i ].distance;
        if (dist < min_dist)
            min_dist = dist;
        if (dist > max_dist)
            max_dist = dist;
    }

    vector<DMatch> good_matches;
    for (size_t i=0; i<matches.size(); i++)
    {
        if (matches[ i ].distance <= 3*min_dist)
        {
            good_matches.push_back(matches[ i ]);
        }
    }
    return good_matches;
}
Example #13
0
//Realiza el Matching entre puntos
void computeMatching(Mat& img1, Mat& img2,vector<KeyPoint>& keypoints1,vector<KeyPoint>& keypoints2, vector<DMatch>& matches ){
        // computing descriptors
        #if _SURF_
        SurfDescriptorExtractor extractor;
        #else if _SIFT_
        SiftDescriptorExtractor extractor;
        #endif
        Mat descriptors1, descriptors2;
        extractor.compute(img1, keypoints1, descriptors1);
        extractor.compute(img2, keypoints2, descriptors2);

        FlannBasedMatcher matcher;
        matcher.match(descriptors1,descriptors2,matches);

        double max_dist = 0; double min_dist = 100;

        //-- Quick calculation of max and min distances between keypoints
        for( int i = 0; i < descriptors1.rows; i++ ){
           double dist = matches[i].distance;
           if( dist < min_dist ) min_dist = dist;
           if( dist > max_dist ) max_dist = dist;
         }

         printf("-- Max dist : %f \n", max_dist );
         printf("-- Min dist : %f \n", min_dist );

         //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
         //-- PS.- radiusMatch can also be used here.
         std::vector< DMatch > good_matches;

         for( int i = 0; i < descriptors1.rows; i++ ){
             if( matches[i].distance < 2*min_dist ){
                 good_matches.push_back( matches[i]);
             }
         }

         //-- Draw only "good" matches
         Mat img_matches;
         drawMatches( img1, keypoints1, img2, keypoints2,
                        good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

         //-- Show detected matches
         imshow( "Good Matches", img_matches );
}
Example #14
0
void findTopFiveFLANNMatches(Mat hqDesc, vector<Mat>* keyframeDesc, vector<vector< DMatch >>* matchVec, vector<int>* matchIndices){
	FlannBasedMatcher matcher;
	int index = 0;

	//Calculate matches between high quality image and 
	for (vector<Mat>::iterator it = keyframeDesc->begin(); it != keyframeDesc->end(); ++it){
		vector< DMatch > matches;

		//calculate initial matches
		Mat kfDesc = *it;
		matcher.match(hqDesc, kfDesc, matches);

		//determine good matches
		double max_dist = 0; double min_dist = 100;

		//-- Quick calculation of max and min distances between keypoints
		for (int i = 0; i < hqDesc.rows; i++)
		{
			double dist = matches[i].distance;
			if (dist < min_dist) min_dist = dist;
			if (dist > max_dist) max_dist = dist;
		}

		std::vector< DMatch > good_matches;
		for (int i = 0; i < hqDesc.rows; i++)
		{
			if (matches[i].distance <= max(2 * min_dist, 0.02))
			{
				good_matches.push_back(matches[i]);
			}
		}


		matchVec->push_back(good_matches);
		index++;
	}
	//pickTopFive
	pickTopFive(matchVec, matchIndices);
	index = 0;
}
Example #15
0
int main(int argc, char** argv)
{
	std::vector<KeyPoint> keypoints_1;
	vector<vector<KeyPoint>>keypoints = vector<vector<KeyPoint>>();
	//////////////////////////////////////////////////////////////////////////
	Ptr<FeatureDetector> detector = xfeatures2d::SIFT::create();
	Ptr<DescriptorExtractor> extractor = xfeatures2d::SIFT::create();
	//Ptr<DescriptorMatcher> matcher = new FlannBasedMatcher();
	FlannBasedMatcher* matcher = new FlannBasedMatcher();
	//////////////////////////////////////////////////////////////////////////
	Mat image, descriptor, homography;
	Mat posters[7], descriptors[7];
	vector<DMatch> matches = vector<DMatch>();

	//detects and extracts the local features
	openImage("poster_test.jpg", image);

	detector->detect(image, keypoints_1);
	extractor->compute(image, keypoints_1, descriptor);

	for (int i = 0;i < 7;i++)
	{
		vector<KeyPoint> keypoint;
		openImage("poster" + std::to_string(i + 1) + ".jpg", posters[i]);
		detector->detect(posters[i], keypoint);
		extractor->compute(posters[i], keypoint, descriptors[i]);
		keypoints.push_back(keypoint);
	}

	matcher->match(descriptor, descriptors[5], matches);

	filterMatchesByAbsoluteValue(matches, 90);

	homography = filterMatchesRANSAC(matches, keypoints_1, keypoints[5]);

	showResult(image, keypoints_1, posters[5], keypoints[5], matches, homography);

	return 0;
}
Example #16
0
 std::vector< DMatch > matcher (Mat descripteur1,Mat descripteur2){
    //renvoie les indices des "bons" points uniquement

    //on recupere les zones en communs
    FlannBasedMatcher matcher;
    std::vector<DMatch> matches;

    if(descripteur1.type()!=CV_32F) {
        descripteur1.convertTo(descripteur1, CV_32F);//pb de types -< le match prend des descriptors float et non binaires
    }

    if(descripteur2.type()!=CV_32F) {
        descripteur2.convertTo(descripteur2, CV_32F);
    }

    matcher.match( descripteur1, descripteur2, matches );

    double dmax = 0; double dmin= 1000;

    //calcul des distances max et min entre les matches
    for( int i = 0; i < matches.size(); i++ )
    {
        double d = matches[i].distance;
        if( d<dmin )
            dmin=d;
        if( d>dmax)
            dmax=d;
    }

    //on veut ne garder que les "bonnes zones communes" -> définir "bonnes zones" !!!
    std::vector<DMatch> bonMatches;
    for (int i=0;i<matches.size();i++)
    {
        if (matches[i].distance<=2*dmin) //2*distance minimale est fixé arbitrairement
            bonMatches.push_back(matches[i]);
    }
     return bonMatches;

}
//static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                       //vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher )
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                       vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher, const vector<Mat>& trainImages, const vector<string>& trainImagesNames )

{
    cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;

    descriptorMatcher.add( trainDescriptors );
    descriptorMatcher.train();

    descriptorMatcher.match( queryDescriptors, matches );

    CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );

    cout << "Number of matches: " << matches.size() << endl;
    cout << ">" << endl;

    for( int i = 0; i < trainDescriptors.size(); i++){

        std::vector< std::vector< DMatch> > matches2;

        std::vector< DMatch > good_matches;

        descriptorMatcher.knnMatch( queryDescriptors, trainDescriptors[i], matches2, 2);
        CV_Assert( queryDescriptors.rows == (int)matches2.size() || matches2.empty() );

        for (int j = 0; j < matches2.size(); ++j){
            const float ratio = 0.8; // As in Lowe's paper; can be tuned
            if (matches2[j][0].distance < ratio * matches2[j][1].distance){
                good_matches.push_back(matches2[j][0]);
            }

        }

        cout << "currentMatchSize : " << good_matches.size() << endl;

    }

    
}
Example #18
0
std::vector<DMatch> Match::vecMatches(Mat * img1, Mat * img2,
		Mat &descriptors_object, vector<KeyPoint> &keypoints_object,
		vector<KeyPoint> &keypoints_scene)
{
	Mat img_object = *img1;
	Mat img_scene = *img2;

	/*
	  if( !img_object.data || !img_scene.data )
	  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
	 */

	//-- Step 1: Detect the keypoints using SURF Detector
	int minHessian = 400;

	SurfFeatureDetector detector(minHessian);

	//std::vector<KeyPoint> keypoints_object, keypoints_scene;

	detector.detect(img_object, keypoints_object); //TODO: use the third argument here? from previous match?
	detector.detect(img_scene, keypoints_scene);

	//-- Step 2: Calculate descriptors (feature vectors)
	SurfDescriptorExtractor extractor;

	Mat /*descriptors_object,*/ descriptors_scene;

	extractor.compute(img_object, keypoints_object, descriptors_object);
	extractor.compute(img_scene, keypoints_scene, descriptors_scene);

	//-- Step 3: Matching descriptor vectors using FLANN matcher
	FlannBasedMatcher matcher;
	std::vector<DMatch> matches;
	matcher.match(descriptors_object, descriptors_scene, matches);

	return matches;
}
void trainKNN(){

    cout << "(1) Loading Training Set ...\n";

    //read dictionary

    FileStorage fsDict("dictionary.yml", FileStorage::READ);
    fsDict["vocabulary"] >> dictionary;
    fsDict.release();

    //read training data
    Mat trainingData;
    FileStorage fs("training_set.yml", FileStorage::READ);
    fs["training_set"] >> trainingData;
    fs.release();

    Mat test_features;



    cout << "(2) Loading Training Classes\n";

    vector<int> trainingClasses = readClass("training_classes.txt");
    float trng_class_array[trainingClasses.size()]; //copy contents of vector to float array
    std::copy(trainingClasses.begin(), trainingClasses.end(), trng_class_array);
    Mat trainingClasses_mat = Mat(1, trainingClasses.size(), CV_32FC1, &trng_class_array);

    vector<int> testingClasses = readClass("testing_classes.txt");
    float testingClasses_array[testingClasses.size()]; //copy contents of vector to float array
    std::copy(testingClasses.begin(), testingClasses.end(), testingClasses_array);
    Mat testingClasses_mat = Mat(1, testingClasses.size(), CV_32FC1, &testingClasses_array);
    testingClasses_mat = testingClasses_mat.t();

    Mat predicted(testingClasses_mat.rows, 1, CV_32F);

    int K = 1;

    cout << "(3) Initializing knn classifier \n";
    cv::KNearest knn(trainingData, trainingClasses_mat.t(), cv::Mat(), false, K);

///==============================================================================================

        vector<string> filenames = readFile("test_files.txt", "../CS296/data/testing_images/");

for(int i=0; i<filenames.size();i++){
    string file = filenames.at(1);
    Mat img1 = imread(file.c_str(), CV_LOAD_IMAGE_GRAYSCALE );

    Mat img1_equalized;


    cout << "\nProcessing image: " << file << " -  " << img1.rows << "x" << img1.cols << " iteration - " << i << endl;
    img1_equalized = img1.clone();

    equalizeHist( img1, img1_equalized); // histogram equalization
    cout << "\n\n(1) Image histogram equalized ... ";

    Mat img1_sift = img1_equalized.clone();
    Mat sift_descriptors, sift_dest;
    vector<KeyPoint> keypoints;

    //SiftDescriptorExtractor detector_sift;
    //detector_sift.detect(img1_sift, keypoints);
    //detector_sift.compute(img1_sift, keypoints,sift_descriptors);

    SIFT sift(50,3,0.004);
    sift(img1_equalized, img1_equalized, keypoints, sift_descriptors, false);

    drawKeypoints(img1_sift, keypoints, sift_dest, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

    cout << "\n\n(2) SIFT Descriptor for image computed:\n";
    cout << "Keypoints detected: " << keypoints.size() << endl;
    cout << "Feature Vector: " << sift_descriptors.size();

    //lbp<unsigned char>(img1_equalized,  dest_lbp, radius_lbp, neighbor_lbp); /// Local Binary Pattern

    Mat dest_lbp;
    orig_lbp<unsigned char>(img1_equalized, dest_lbp);
    cout << "\n\n(3) LBP Computed Image: " << dest_lbp.size()  <<endl;

    Mat dest_lbp_out = dest_lbp.clone();
    drawKeypoints(dest_lbp_out, keypoints, dest_lbp_out, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

    //obtain patch values per keypoint (8x8)

    Mat patch;
    Mat lbp_descriptors;
    int patchSize = 8;

    Size size = Size(patchSize , patchSize); //square patchSize x patchSize region (8x8 patch)

    for(int i=0; i<keypoints.size();i++){

        Point2f center(keypoints.at(i).pt.x,keypoints.at(i).pt.y);
        getRectSubPix(dest_lbp,size,center,patch);
        patch = patch.reshape(0,1); //flatten matrix to 1-D Vector

        lbp_descriptors.push_back(patch);
    }

    cout << "LBP Descriptors computed ... " << endl;
    cout << "LBP Vector: [" << lbp_descriptors.rows << " x" << lbp_descriptors.cols << "]" << endl;

    lbp_descriptors.convertTo(lbp_descriptors,CV_32FC1);

    Mat sift_lbp_features;

    hconcat(sift_descriptors, lbp_descriptors, sift_lbp_features);


//==============================================================================

    //create a nearest neighbor matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;

    matcher.match( sift_lbp_features, dictionary, matches );

    cout << "Number of matches: " << matches.size() << endl;

    float bins[dictionary.rows] = {0.0};

    for (int i =0; i<matches.size(); i++){

       bins[matches.at(i).trainIdx]= bins[matches.at(i).trainIdx] + 1; //update number of bins

    }


    Mat norm_bins(1,dictionary.rows,CV_32F,&bins);
    normalize( norm_bins, norm_bins, 0, 1, NORM_MINMAX, -1, Mat() );

    predicted.at<float>(i,0) = knn.find_nearest(norm_bins, K);

    cout << "\n\n Predicted class: " << predicted.at<float>(i,0) << " - " << testingClasses_mat.at<float>(i,0) <<  endl;
}
//=======================================================================================


//        plot_binary(testData, prediction, "Predictions Backpropagation");
//store the vocabulary
FileStorage fs2("predicted.yml", FileStorage::WRITE);
fs2 << "predicted" << predicted;
fs2.release();
    /* Mat outImage (img1.rows, img1.cols * 2, CV_8UC1);

    Mat c1(outImage, Rect(0, 0, img1.cols, img1.rows));
    Mat c2(outImage, Rect(img1.cols, 0, img1.cols, img1.rows));




    img1.copyTo(c1);
    img1_equalized.copyTo(c2);


    imshow("SIFT", sift_dest);
    imshow("LBP", dest_lbp_out);
    imshow("Input Image", outImage);*/

}
/**
 * @function main
 */
int main(int, char **argv)
{



    image1 = imread(argv[1], 1);
    image2 = imread(argv[2], 1);
    rows = image1.rows;
    cols = image1.cols;


    namedWindow("image1", WINDOW_AUTOSIZE);
    imshow("image1", image1);
    namedWindow("image2", WINDOW_AUTOSIZE);
    imshow("image2", image2);

    Mat image1_gray;
    Mat image2_gray;


    /// Converts an image from one color space to another.
    cvtColor(image1, image1_gray, COLOR_BGR2GRAY);
    cvtColor(image2, image2_gray, COLOR_BGR2GRAY);

    /// Detector parameters
    int blockSize = 2;
    int apertureSize = 3;
    double k = 0.04;

    /// Detecting corners
    /*
       void ocl::cornerHarris(const oclMat& src, oclMat& dst, int blockSize, int ksize, double k, int bordertype=cv::BORDER_DEFAULT)

       src – Source image. Only CV_8UC1 and CV_32FC1 images are supported now.
       dst – Destination image containing cornerness values. It has the same size as src and CV_32FC1 type.
       blockSize – Neighborhood size
       ksize – Aperture parameter for the Sobel operator
       k – Harris detector free parameter
       bordertype – Pixel extrapolation method. Only BORDER_REFLECT101, BORDER_REFLECT, BORDER_CONSTANT and BORDER_REPLICATE are supported now.
     */

    Mat image1dst;
    Mat image2dst;

    image1dst = Mat::zeros(image1.size(), CV_32FC1);
    image2dst = Mat::zeros(image2.size(), CV_32FC1);


    cornerHarris(image1_gray, image1dst, blockSize, apertureSize, k, BORDER_DEFAULT);
    cornerHarris(image2_gray, image2dst, blockSize, apertureSize, k, BORDER_DEFAULT);

    int threshHarris = 100;

    /// Normalizing
    /*
       void normalize(InputArray src, OutputArray dst, double alpha=1, double beta=0, int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray() )
       src – input array.
       dst – output array of the same size as src .
       alpha – norm value to normalize to or the lower range boundary in case of the range normalization.
       beta – upper range boundary in case of the range normalization; it is not used for the norm normalization.
       normType – normalization type (see the details below).
       dtype – when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth =CV_MAT_DEPTH(dtype).
       mask – optional operation mask.
     */

    Mat image1dst_norm;
    Mat image2dst_norm;

    normalize(image1dst, image1dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
    normalize(image2dst, image2dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());

    /*
       On each element of the input array, the function convertScaleAbs performs three operations sequentially: scaling, taking an absolute value, conversion to an unsigned 8-bit type:

     */

    Mat image1dst_norm_scaled;
    Mat image2dst_norm_scaled;

    convertScaleAbs(image1dst_norm, image1dst_norm_scaled);
    convertScaleAbs(image2dst_norm, image2dst_norm_scaled);

    KeyPoint kp;

    for (int j = 0; j < image1dst_norm.rows; j++) {
	for (int i = 0; i < image1dst_norm.cols; i++) {
	    if ((int) image1dst_norm.at < float >(j, i) > threshHarris) {

		kp.pt.x = (float) j;
		kp.pt.y = (float) i;
		//necessaire je ne sais pas pk
		kp.size = 100.0;
		keypoints1.push_back(kp);

	    }
	}
    }

    for (int j = 0; j < image2dst_norm.rows; j++) {
	for (int i = 0; i < image2dst_norm.cols; i++) {
	    if ((int) image2dst_norm.at < float >(j, i) > threshHarris) {


		kp.pt.x = (float) j;
		kp.pt.y = (float) i;
		//necessaire je ne sais pas pk
		kp.size = 100.0;
		keypoints2.push_back(kp);

	    }
	}
    }


    BriefDescriptorExtractor briefDesc(64);

    Mat descriptors1, descriptors2;
    briefDesc.compute(image1, keypoints1, descriptors1);
    briefDesc.compute(image2, keypoints2, descriptors2);

    //Ptr<DescriptorMatcher> matcher =  new FlannBasedMatcher(DescriptorMatcher::create("Flann"));
    FlannBasedMatcher matcher;

    Mat descriptorAuxKp1;
    Mat descriptorAuxKp2;


    vector < int >associateIdx;

    for (int i = 0; i < descriptors1.rows; i++) {
	//on copie la ligne i du descripteur, qui correspond aux différentes valeurs données par le descripteur pour le Keypoints[i]
	descriptors1.row(i).copyTo(descriptorAuxKp1);

//ici on va mettre que les valeurs du descripteur des keypoints de l'image 2 que l'on veut comparer aux keypoints de l'image1 en cours de traitement
	descriptorAuxKp2.create(0, 0, CV_8UC1);


	//associateIdx va servir à faire la transition entre les indices renvoyés par matches et ceux des Keypoints
	associateIdx.erase(associateIdx.begin(), associateIdx.end());


	for (int j = 0; j < descriptors2.rows; j++) {

	    float p1x = keypoints1[i].pt.x;
	    float p1y = keypoints1[i].pt.y;
	    float p2x = keypoints2[j].pt.x;
	    float p2y = keypoints2[j].pt.y;

	    float distance = sqrt(pow((p1x - p2x), 2) + pow((p1y - p2y), 2));

	    //parmis les valeurs dans descriptors2 on ne va garder que ceux dont les keypoints associés sont à une distance définie du keypoints en cours, en l'occurence le ieme ici.
	    if (distance < 10) {

		descriptorAuxKp2.push_back(descriptors2.row(j));
		associateIdx.push_back(j);

	    }


	}
	//ici on ne matche qu'un keypoints de l'image1 avec tous les keypoints gardés de l'image 2
        matcher.add(descriptorAuxKp1);
        matcher.train();

	matcher.match(descriptorAuxKp2, matches);

	//on remet à la bonne valeur les attributs de matches
	for (int idxMatch = 0; idxMatch < matches.size(); idxMatch++) {
	    //on a comparer le keypoints i
	    matches[idxMatch].queryIdx = i;
	    //avec le keypoints2 j
	    matches[idxMatch].trainIdx = associateIdx[matches[idxMatch].trainIdx];
	}

	//on concatene les matches trouvés pour les points précedents avec les nouveaux
	matchesWithDist.insert(matchesWithDist.end(), matches.begin(), matches.end());


    }



//ici on trie les matchesWithDist par distance des valeurs des descripteurs et non par distance euclidienne
    nth_element(matchesWithDist.begin(), matchesWithDist.begin() + 24, matchesWithDist.end());
    // initial position
    // position of the sorted element
    // end position

    Mat imageMatches;
    Mat matchesMask;
    drawMatches(image1, keypoints1,	// 1st image and its keypoints
		image2, keypoints2,	// 2nd image and its keypoints
		matchesWithDist,	// the matches
		imageMatches,	// the image produced
		Scalar::all(-1),	// color of the lines
		Scalar(255, 255, 255)	//color of the keypoints
	);


    namedWindow(matches_window, CV_WINDOW_AUTOSIZE);
    imshow(matches_window, imageMatches);
    imwrite("resultat.png", imageMatches);



    /// Create a window and a trackbar
    namedWindow(transparency_window, WINDOW_AUTOSIZE);
    createTrackbar("Threshold: ", transparency_window, &thresh, max_thresh, interface);








    interface(0, 0);

    waitKey(0);
    return (0);
}
int main(int argc, char** argv)
{
  if(argc != 3)
  {
    return -1;
  }
  
  Mat img_object = imread(argv[1],1);
  Mat img_scene = imread(argv[2],1);
  //String ImageName = (argv[3]);
  
  if(!img_object.data || !img_scene.data)
  {
    cout << "--(!)Error leyendo imagenes " << endl;
    return -1;
  }
  
  int minHessian = 400;
  SurfFeatureDetector detector(minHessian);
  
  vector<KeyPoint>keypoints_object, keypoints_scene;
  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );
  
  //-- Step 2: Calculate descriptors (feature vectors)
  
  Mat descriptors_object, descriptors_scene;
  
  SurfDescriptorExtractor extractor;
  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );
  
  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  
  vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );
  
  //////////////////////////////////////////////////////////////////////////////
  
  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { 
    double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }
  
  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );
  
  cout << "Initializing Good Matches" << endl;
  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  vector< DMatch > good_matches;
  
  for( int i = 0; i < descriptors_object.rows; i++ )
  { 
    if( matches[i].distance < 3*min_dist )
     { 
       good_matches.push_back( matches[i]); 
     }
  }
  
  Mat img_matches;
  namedWindow("Good Matches & Object detection", 0);
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
               good_matches, img_matches, Scalar::all(-1), Scalar(0,0,255),
               vector<char>() );
               
  //////////////////////////////////////////////////////////////////////////////
  cout << "Finding Object " << endl;
  //-- Localize the object
  vector<Point2f> obj;
  vector<Point2f> scene;
  
  for( int i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  }
  
  Mat H = findHomography( obj, scene, CV_LMEDS );
  
  //-- Get the corners from the image_1 ( the object to be "detected" )
  vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); 
  obj_corners[1] = cvPoint( img_object.cols, 0);
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows); 
  obj_corners[3] = cvPoint( 0, img_object.rows);
  vector<Point2f> scene_corners(4);
  
  perspectiveTransform( obj_corners, scene_corners, H);
  
  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), 
                     scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), 
                     scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), 
                     scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), 
                     scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  
  //-- Show detected matches
  imwrite("../data/Example.jpg",img_matches );
  imshow( "Good Matches & Object detection", img_matches );
  cout << "Write image " << endl;
  
  waitKey(0);
  return 0;
}
Example #22
0
int main(int argc, char** argv) {
  gval_debug_init();

  // get options
  int showhelp = 0;
  unsigned int n_cluster = 0;

  int opt;
  while ((opt = getopt(argc, argv, "k:h")) != -1) {
    switch (opt) {
      case 'h':
        showhelp = 1;
        break;
      case 'k':
        n_cluster = atoi(optarg);
        break;
      default:
        showhelp = 1;
        break;
    }
  }

  if (showhelp || n_cluster <= 0 || argc - optind < 2) {
    fprintf(stderr, "Usage: %s -k n_cluster input_dir output\n", argv[0]);
    return EXIT_SUCCESS;
  }

  path p(argv[optind]);
  if (!exists(p) || !is_directory(p)) {
    fprintf(stderr, "%s is not a directory\n", argv[optind]);
    return EXIT_FAILURE;
  }

  BOWKMeansTrainer bow(n_cluster);

  directory_iterator dir_end;
  for (directory_iterator i(p); i != dir_end; i++) {
    if (i->path().extension() == DESCRIPTORS_EXT) {
      FILE* in = fopen(i->path().c_str(), "r");
      assert(in);
      int counter = 0;
      int nempty = 0;
      Mat* desc = (Mat*) gval_read_cvmat(in);
      while (desc != NULL) {
        counter++;
        if (!desc->empty()) {
          nempty++;
          bow.add(desc->clone());
        }
        gval_free_cvmat(desc);
        desc = (Mat*) gval_read_cvmat(in);
      }
      fclose(in);
      fprintf(stderr, "Read from file %s (%d/%d)\n",
          i->path().c_str(), nempty, counter);
    }
  }

  fprintf(stderr, "Clustering (%d descriptors, %d clusters)...",
      bow.descripotorsCount(), n_cluster);
  Mat voc = bow.cluster();
  fprintf(stderr, " Done\n");

  fprintf(stderr, "Counting document frequency...");
  int* dfcounter = (int*) calloc(n_cluster, sizeof(int));
  vector<Mat> all = bow.getDescriptors();
  FlannBasedMatcher matcher;
  matcher.add(vector<Mat>(1, voc));
  for (vector<Mat>::const_iterator it = all.begin();
      it != all.end(); it++) {
    vector<int> ct(n_cluster, 0);
    vector<DMatch> matches;
    matcher.match(*it, matches);
    for (vector<DMatch>::const_iterator jt = matches.begin();
        jt != matches.end(); jt++) {
      assert(jt->trainIdx >= 0 && jt->trainIdx < n_cluster);
      ct[jt->trainIdx] = 1;
    }

    for (int j = 0; j < n_cluster; j++) {
      dfcounter[j] += ct[j];
    }
  }
  int dtotal = all.size();
  fprintf(stderr, " Done\n");

  FILE* out = fopen(argv[optind + 1], "w");
  assert(out);
  gval_write_cvmat(&voc, out);
  fwrite(dfcounter, sizeof(int), n_cluster, out);
  fwrite(&dtotal, sizeof(int), 1, out);

  // debug
  fprintf(stderr, "total:%d\n", dtotal);
  for (int j = 0; j < n_cluster; j++) {
    fprintf(stderr, "%d:%d\n", j, dfcounter[j]);
  }

  fclose(out);
  fprintf(stderr, "Written to %s\n", argv[optind + 1]);

  free(dfcounter);

  return EXIT_SUCCESS;
}
/**
 * @function main
 * @brief Main function
 */
int main( int argc, char** argv )
{
 // if( argc != 3 )
 // { readme(); return -1; }

  Mat img_1 = imread( "/home/mac/Documents/PROJECT/SIFT/a.JPG", CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( "/home/mac/Documents/PROJECT/SIFT/r.JPG", CV_LOAD_IMAGE_GRAYSCALE );

  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  //SurfFeatureDetector detector( minHessian );
  cv::SiftFeatureDetector detector;
  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
  //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
  //-- small)
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_1.rows; i++ )
  { if( matches[i].distance <= max(2*min_dist, 0.02) )
    { good_matches.push_back( matches[i]); }
  }

  //-- Draw only "good" matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  //-- Show detected matches
  imshow( "Good Matches", img_matches );

  for( int i = 0; i < (int)good_matches.size(); i++ )
  { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }

  waitKey(0);

  return 0;
}
Example #24
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
	{	

	
	if (count1 ==0)
		{

		cv_bridge::CvImagePtr cv_ptr;
		try
		    {
		      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
		    }
		    catch (cv_bridge::Exception& e)
		    {
		      ROS_ERROR("cv_bridge exception: %s", e.what());
		      return;
		    }
			img_1 = cv_ptr->image;	
			imageToDraw = img_1;
                        count1++;
			img_tot=img_1; 
		}


	else
		{
			
		cv_bridge::CvImagePtr cv_ptr;
		try
		    {
		      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
		    }
		    catch (cv_bridge::Exception& e)
		    {
		      ROS_ERROR("cv_bridge exception: %s", e.what());
		      return;
		    }
   
			img_2 = cv_ptr->image;

   
			  if( !img_1.data || !img_2.data )
			  { std::cout<< " --(!) Error reading images " << std::endl;}

			// canny detection

			//dst.create(img_1.size(), img_1.type() );
			
    			namedWindow( window_name, CV_WINDOW_AUTOSIZE );

 			 /// Create a Trackbar for user to enter threshold
  			createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold1 );

 			/// Show the image
 			CannyThreshold1(0, 0);
			CannyThreshold2(0, 0); 
			
			//img_1 = detected_edges_1;
			//img_2 = detected_edges_2;


			  //-- Step 1: Detect the keypoints using SURF Detector
			  int minHessian = 400;

			  SurfFeatureDetector detector( minHessian );

			  std::vector<KeyPoint> keypoints_1, keypoints_2;

			  detector.detect( img_1, keypoints_1 );
			  detector.detect( img_2, keypoints_2 );

			  //-- Step 2: Calculate descriptors (feature vectors)
			  SurfDescriptorExtractor extractor;

			  Mat descriptors_1, descriptors_2;

			  extractor.compute( img_1, keypoints_1, descriptors_1 );
			  extractor.compute( img_2, keypoints_2, descriptors_2 );

			  //-- Step 3: Matching descriptor vectors using FLANN matcher
			  FlannBasedMatcher matcher;
			  std::vector< DMatch > matches;
			  matcher.match( descriptors_1, descriptors_2, matches );

			  double max_dist = 0; double min_dist = 100;

			  //-- Quick calculation of max and min distances between keypoints
			  for( int i = 0; i < descriptors_1.rows; i++ )
			  { double dist = matches[i].distance;
			    if( dist < min_dist ) min_dist = dist;
			    if( dist > max_dist ) max_dist = dist;
			  }

			  printf("-- Max dist : %f \n", max_dist );
			  printf("-- Min dist : %f \n", min_dist );

			  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
			  //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
			  //-- small)
			  //-- PS.- radiusMatch can also be used here.
			  std::vector< DMatch > good_matches;
			  std::vector<KeyPoint> good_keypoints_1, good_keypoints_2;

			  for( int i = 0; i < descriptors_1.rows; i++ )
			  { if( matches[i].distance <= max(3*min_dist, 0.0) )
			    { good_matches.push_back( matches[i]); 
			      good_keypoints_1.push_back(keypoints_1[i]);			     
			      good_keypoints_2.push_back(keypoints_2[i]);
			     }
			  }

			  //-- Draw only "good" matches
			  Mat img_matches;
			  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
				       good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
				       vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

			  //-- Show detected matches
			  imshow( "Good Matches", img_matches );
			
			//float deltax, deltay;
                         float deltax[(int)good_matches.size()], deltay[(int)good_matches.size()];
			 float sum_deltax=0, sum_deltay=0;

			  for( int i = 0; i < (int)good_matches.size(); i++ )
			  { 
			   
			   // printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
			   // cout << "-- Good Match Keypoint 1:" << keypoints_1[good_matches[i].queryIdx].pt.x << endl;
   			   // cout << "-- Good Match Keypoint 2:" << keypoints_2[good_matches[i].trainIdx].pt.x << endl;
				
				deltax[i] = keypoints_2[good_matches[i].trainIdx].pt.x - keypoints_1[good_matches[i].queryIdx].pt.x;
				deltay[i] = keypoints_2[good_matches[i].trainIdx].pt.y - keypoints_1[good_matches[i].queryIdx].pt.y;

			  sum_deltax=+deltax[i];
			  sum_deltay=+deltay[i];
		

			  }

			float av_deltax = sum_deltax/(int)good_matches.size();
			float av_deltay = sum_deltay/(int)good_matches.size();

			float av_deltax2 = 0, av_deltay2 =0;

			cout << "before: av_deltax " << av_deltax << " av_deltay " << av_deltay << endl;
			
			int count2=0;

			for( int i = 0; i < (int)good_matches.size(); i++ )
			  {
			  if ((abs(deltax[i]-av_deltax) < 50 ) & (abs(deltay[i]-av_deltay)<50))
			  	{
				av_deltax2 =+deltax[i];
				av_deltay2 =+deltay[i];
				count2++;	      			
				}
			  }
			
			if (count2>0)
			{av_deltax2 = -av_deltax2/count2;
			 av_deltay2 = -av_deltay2/count2;
			cout << "after: av_deltax " << av_deltax << " av_deltay " << av_deltay << endl;
			}
			
			else
			{cout << "ATTENZIONE:keeping the old value " << endl;
			av_deltax2=0;
			av_deltay2=0;}
			
			 Pt_new.x=Pt_old.x+av_deltax2;
			 Pt_new.y=Pt_old.y+av_deltay2;

			cout << "Pt_new.x " << Pt_new.x << " Pt_old.x " << Pt_old.x << endl;
			
			line(imageToDraw, Pt_new, Pt_old, Scalar(255, 255, 255), 2);
			
			imshow( "Trajectory", imageToDraw );

			//Stitcher stitcher = Stitcher::createDefault(); 
	
			//Mat rImg;

			//vector< Mat > vImg; 

			//vImg.push_back(img_1);  
 			//vImg.push_back(img_2);  

			//Stitcher::Status status = stitcher.stitch(vImg, rImg);
			//if (Stitcher::OK == status)   
 			//imshow("Stitching Result",rImg);  
  			//else  
  			//printf("Stitching fail.");  

			 std::vector< Point2f > obj;
 			std::vector< Point2f > scene;

			for( int i = 0; i < good_matches.size(); i++ )
 			{
			 //-- Get the keypoints from the good matches
			 obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
			 scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
			 }
			 
			// Find the Homography Matrix
			 Mat H = findHomography( obj, scene, CV_RANSAC );
			 // Use the Homography Matrix to warp the images
			 cv::Mat result;
			 warpPerspective(img_1,result,H,cv::Size(img_1.cols+img_tot.cols,img_1.rows));
			 cv::Mat half(result,cv::Rect(0,0,img_tot.cols,img_tot.rows));
			 img_tot.copyTo(half);
			 imshow( "Result", result );


			


			Pt_old = Pt_new;
			img_1 = img_2;

		        waitKey(11);
		}
	}	
//SURF Extraction, with Flann Matcher
void surfAlgo (int, void*)
{
	int key=0, i=0;
	CvMemStorage *storage = cvCreateMemStorage(0);
	CvSeq *imageKeyPoints=0, *imageDescriptors = 0;
	vector<KeyPoint> keyPoints_orig, keyPoints_trans;
	Mat descriptor_orig, descriptor_trans;
	double max_dist=0;double min_dist=100;
	std::vector < DMatch > good_matches;
	
	FlannBasedMatcher matcher;
	std::vector<DMatch>matches;

	SurfFeatureDetector detector(1500);
	SurfDescriptorExtractor extractor;

	cvtColor (src_2,src_trans,CV_BGR2GRAY);
	cvtColor (src_1,src_orig,CV_BGR2GRAY);

	detector.detect(src_orig, keyPoints_orig);
	detector.detect(src_trans, keyPoints_trans);

	extractor.compute(src_orig, keyPoints_orig, descriptor_orig);
	extractor.compute(src_trans, keyPoints_trans, descriptor_trans);
	
	matcher.match(descriptor_orig, descriptor_trans, matches);
	
	for (int count=0;count<descriptor_orig.rows;count++)
	{
		double dist=matches[i].distance;
		if( dist < min_dist ) min_dist=dist;
		if (dist > max_dist) max_dist = dist;
	}

	
	for(int count = 0; count < descriptor_orig.rows; count++)
	{
		if( matches[count].distance < 3*min_dist )
			{
				good_matches.push_back( matches[count]);
			}
	}

	//drawKeypoints(src_trans, keyPoints_trans, src_trans, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	//drawKeypoints(src_orig, keyPoints_orig, src_orig, Scalar(255,0,0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);	
	
	//drawMatches ( src_orig, keyPoints_orig, src_trans, keyPoints_trans, matches, src_trans);

	drawMatches( src_orig, keyPoints_orig, src_trans, keyPoints_trans,good_matches,src_new,Scalar(0,0,255),Scalar(0,0,255),vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

	std::vector<Point2f> obj;
	std::vector<Point2f> scene;

	for (int count=0; count<good_matches.size();count++)
	{
		obj.push_back(keyPoints_trans[ matches[count].queryIdx].pt);
		scene.push_back(keyPoints_orig[ matches[count].trainIdx].pt);
	}

/*	
 	Code work in progress

	Mat H = findHomography( obj, scene, CV_RANSAC );
	std::vector<Point2f> object_corners(4);
  	std::vector<Point2f> scene_corners(4);


	object_corners[0] = cvPoint (0,0);
       	object_corners[1] = cvPoint (src_trans.cols,0);
	object_corners[2]= cvPoint (src_trans.cols,src_trans.rows);
	object_corners[3] = cvPoint (0,src_trans.rows);

	perspectiveTransform( object_corners, scene_corners, H);

	line (src_new, scene_corners[0]+Point2f(src_trans.cols,0), scene_corners[1]+Point2f(src_trans.cols,0),Scalar(0,255,0),4);
	line (src_new, scene_corners[1]+Point2f(src_trans.cols,0), scene_corners[2]+Point2f(src_trans.cols,0),Scalar(0,255,0),4);
	line (src_new, scene_corners[2]+Point2f(src_trans.cols,0), scene_corners[3]+Point2f(src_trans.cols,0),Scalar(0,255,0),4);
	line (src_new, scene_corners[3]+Point2f(src_trans.cols,0), scene_corners[0]+Point2f(src_trans.cols,0),Scalar(0,255,0),4);
*/
	imwrite("/Users/brainwave/Desktop/testrun1.jpg",src_new);
	resize(src_new,src_new,Size(),0.3,0.3,CV_INTER_LINEAR);

	imshow(window_name, src_new);

}	
Example #26
0
int main(int argc, char **argv) {
    if (argc != 3) {
        printUsage();
        return -1;
    }

    cv::Rect finalRect;
    vector<cv::Rect> boundingRects;
    vector<Point2f> tlPoints;
    vector<Point2f> brPoints;

    vidFileName = string(argv[1]);
    featFileName = string(argv[2]);

    // Get BOINC resolved file paths.
#ifdef _BOINC_APP_
    string resolved_vid_path;
    string resolved_feat_path;
    int retval = boinc_resolve_filename_s(vidFileName.c_str(), resolved_vid_path);
    if (retval) {
        cerr << "Error, could not open file: '" << vidFileName << "'" << endl;
        cerr << "Resolved to: '" << resolved_vid_path << "'" << endl;
        return false;
    }
    vidFileName = resolved_vid_path;

    retval = boinc_resolve_filename_s(featFileName.c_str(), resolved_feat_path);
    if (retval) {
        cerr << "Error, could not open file: '" << featFileName << "'" << endl;
        cerr << "Resolved to : '" << resolved_feat_path << "'" << endl;
        return false;
    }
    featFileName = resolved_feat_path;
#endif

    CvCapture *capture = cvCaptureFromFile(vidFileName.c_str());

    Mat descriptors_file;
    FileStorage infile(featFileName, FileStorage::READ);
    if (infile.isOpened()) {
        read(infile["Descriptors"], descriptors_file);
        infile.release();
    } else {
        cout << "Feature file " << featFileName << " does not exist." << endl;
        exit(-1);
    }
    cout << "Descriptors: " << descriptors_file.size() << endl;

    int framePos = cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);
    int total = cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);

    double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    int framesInThreeMin = (int)fps*180;

#ifdef _BOINC_APP_  
    boinc_init();
#endif

    cerr << "Video File Name: " << vidFileName << endl;
    cerr << "Feature File Name: " << featFileName << endl; 
    cerr << "Frames Per Second: " << fps << endl;
    cerr << "Frame Count: " << total << endl;
    cerr << "Number of Frames in Three Minutes: " << framesInThreeMin << endl;
//    cerr << "<slice_probabilities>" << endl;

    checkpoint_filename = "checkpoint.txt";

    if(read_checkpoint()) {
        if(checkpointVidFileName.compare(vidFileName)!=0 || checkpointFeatFileName.compare(featFileName)!=0) {
            cerr << "Checkpointed video or feature filename was not the same as given video or feature filename... Restarting" << endl;
        } else {
            cerr << "Continuing from checkpoint..." << endl;
        }
    } else {
        cerr << "Unsuccessful checkpoint read" << endl << "Starting from beginning of video" << endl;
    }

    skipNFrames(capture, percentages.size() * framesInThreeMin);
    framePos = cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);
    cerr << "Starting at Frame: " << framePos << endl;

    long start_time = time(NULL);

    while ((double)framePos/total < 1.0) {

        if (framePos % 10 == 0) {
            cout << "FPS: " << framePos/((double)time(NULL) - (double)start_time) << endl;
        }

        //cout << framePos/total << endl;
        Mat frame(cvarrToMat(cvQueryFrame(capture)));
        framePos = cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES);

        SurfFeatureDetector detector(minHessian);

        vector<KeyPoint> keypoints_frame;

        detector.detect(frame, keypoints_frame);

        SurfDescriptorExtractor extractor;

        Mat descriptors_frame;

        extractor.compute(frame, keypoints_frame, descriptors_frame);

        cout << "keypoints detected: " << keypoints_frame.size() << endl;
        for (int i = 0; i < keypoints_frame.size(); i++) {
            cout << "\t" << keypoints_frame[i].pt.x << ", " << keypoints_frame[i].pt.y << " -- " << keypoints_frame[i].angle << " : " << keypoints_frame[i].size << " -- " << keypoints_frame[i].response << endl;
            cout << "\t\t(" << descriptors_frame.rows << ", " << descriptors_frame.cols << ") ";
            for (int j = 0; j < descriptors_frame.cols; j++) {
                cout << " " << descriptors_frame.at<float>(i,j);
            }
            cout << endl;
        }
        cout << endl;


        // Find Matches
        FlannBasedMatcher matcher;
        vector<DMatch> matches;
        matcher.match(descriptors_frame, descriptors_file, matches);

        double max_dist = 0;
        double min_dist = 100;
        double avg_dist = 0;

        for (int i=0; i<matches.size(); i++) {
            double dist = matches[i].distance;
            if(dist < min_dist) min_dist = dist;
            if(dist > max_dist) max_dist = dist;
        }

        //cout << "Max dist: " << max_dist << endl;
        cout << "Min dist: " << min_dist << endl;

        vector<DMatch> good_matches;

        for (int i=0; i<matches.size(); i++) {
            if (matches[i].distance <= 0.18 && matches[i].distance <= 2.0*min_dist) {
                good_matches.push_back(matches[i]);
                avg_dist += matches[i].distance;
            }
        }
        if (good_matches.size() > 0) {
        	avg_dist = avg_dist/good_matches.size();
        	cout << "Avg dist: " << avg_dist << endl;
        }

		// Localize object.
		vector<Point2f> matching_points;
		vector<KeyPoint> keypoints_matches;

		for (int i=0; i<good_matches.size(); i++) {
			keypoints_matches.push_back(keypoints_frame[good_matches[i].queryIdx]);
			matching_points.push_back(keypoints_frame[good_matches[i].queryIdx].pt);
		}

		// Code to draw the points.
		Mat frame_points;
#ifdef GUI
        drawKeypoints(frame, keypoints_matches, frame_points, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
#endif

		//Get bounding rectangle.
		if (matching_points.size() == 0) {
			Point2f tlFrame(0, 0);
			Point2f brFrame(frame.cols, frame.rows);
			tlPoints.push_back(tlFrame);
			brPoints.push_back(brFrame);
		} else {
            cv::Rect boundRect = boundingRect(matching_points);
			
			//Calculate mean.
			Mat mean;
			reduce(matching_points, mean, CV_REDUCE_AVG, 1);
			double xMean = mean.at<float>(0,0);
			double yMean = mean.at<float>(0,1);
			
			//Calculate standard deviation.
			vector<int> xVals;
			vector<int> yVals;
			for (int i=0; i<matching_points.size(); i++) {
				xVals.push_back(matching_points[i].x);
				yVals.push_back(matching_points[i].y);
			}
			double xStdDev = standardDeviation(xVals, xMean);
			double yStdDev = standardDeviation(yVals, yMean);
			
			Point2f tlStdPoint(xMean-xStdDev/2, yMean+yStdDev/2);
			Point2f brStdPoint(xMean+xStdDev/2, yMean-yStdDev/2);
			
            cv::Rect stdDevRect(tlStdPoint, brStdPoint);

#ifdef GUI
			color = Scalar(0, 0, 255); // Blue, Green, Red
			rectangle(frame_points, boundRect.tl(), boundRect.br(), color, 2, 8, 0);
			color = Scalar(0, 255, 255); // Blue, Green, Red
			rectangle(frame_points, stdDevRect.tl(), stdDevRect.br(), color, 2, 8, 0);
#endif
			boundingRects.push_back(boundRect);
			tlPoints.push_back(boundRect.tl());
			brPoints.push_back(boundRect.br());
		}

		if (tlPoints.size() != 0) {
			
			// Calculate mean rectangle.
			Mat tlMean;
			Mat brMean;
			reduce(tlPoints, tlMean, CV_REDUCE_AVG, 1);
			reduce(brPoints, brMean, CV_REDUCE_AVG, 1);
			Point2f tlPoint(tlMean.at<float>(0,0), tlMean.at<float>(0,1));
			Point2f brPoint(brMean.at<float>(0,0), brMean.at<float>(0,1));
			
            cv::Rect averageRect(tlPoint, brPoint);
		
			// Calculate median rectangle.
			vector<int> tlxVals;
			vector<int> tlyVals;
			vector<int> brxVals;
			vector<int> bryVals;
			for (int i=0; i<tlPoints.size(); i++) {
				tlxVals.push_back(tlPoints[i].x);
				tlyVals.push_back(tlPoints[i].y);
				brxVals.push_back(brPoints[i].x);
				bryVals.push_back(brPoints[i].y);
			}
			int tlxMedian;
			int tlyMedian;
			int brxMedian;
			int bryMedian;
			tlxMedian = quickMedian(tlxVals);
			tlyMedian = quickMedian(tlyVals);
			brxMedian = quickMedian(brxVals);
			bryMedian = quickMedian(bryVals);
			//cout << "lt Median: " << tlxMedian << "," << tlyMedian << endl;
			//cout << "br Median: " << brxMedian << "," << bryMedian << endl;
			Point2i tlMedianPoint(tlxMedian, tlyMedian);
			Point2i brMedianPoint(brxMedian, bryMedian);

            cv::Rect medianRect(tlMedianPoint, brMedianPoint);			

#ifdef GUI
			color = Scalar(255, 0, 0); // Blue, Green, Red
			rectangle(frame_points, averageRect.tl(), averageRect.br(), color, 2, 8, 0);
			color = Scalar(0, 255, 0);
			rectangle(frame_points, medianRect.tl(), medianRect.br(), color, 2, 8, 0);
#endif

			finalRect = averageRect;
		}

		// Check for frames in three minutes mark.
		framesInThreeMin = 20;
		if (framePos != 0 && framePos % framesInThreeMin == 0.0) {
			double probability;
			if (tlPoints.empty() && brPoints.empty()) {
				probability = 0.0;
				
			} else {
				double frameDiameter = sqrt(pow((double)frame.cols, 2) * pow((double)frame.rows, 2));
				double roiDiameter = sqrt(pow((double)finalRect.width, 2) * pow((double)finalRect.height, 2));
				probability = 1-(roiDiameter/(frameDiameter*0.6));
			}
			if (probability < 0) probability = 0.0;
			percentages.push_back(probability);
#ifndef _BOINC_APP_
			cout << "Min Dist: " << min_dist << endl;
            cout << probability << endl;
#endif
			boundingRects.clear();
			tlPoints.clear();
			brPoints.clear();
		}

		// Update percent completion and look for checkpointing request.
#ifdef _BOINC_APP_
		boinc_fraction_done((double)framePos/total);

		if(boinc_time_to_checkpoint()) {
			cerr << "checkpointing" << endl;
			write_checkpoint();
			boinc_checkpoint_completed();
		}
#endif

#ifdef GUI
		imshow("SURF", frame_points);
		if(cvWaitKey(15)==27) break;
#endif
	}

	cerr << "<slice_probabilities>" << endl;
	for (int i=0; i<percentages.size(); i++) cerr << percentages[i] << endl;
	cerr << "</slice_probabilities>" << endl;

#ifdef GUI
    cvDestroyWindow("SURF");
#endif

    cvReleaseCapture(&capture);

#ifdef _BOINC_APP_
    boinc_finish(0);
#endif
    return 0;
}
/** @function main */
int main(  )

{
  // Apply SURF method to match images by camera in motion

  Mat img_object = imread("/home/hailong/Pictures/house_contour.png");//first input image location
  Mat img_scene = imread( "/home/hailong/Pictures/house_simulation_contour.png" );//second input image location

  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //Detect the keypoints using SURF Detector
  int minhessian = 400;

  SurfFeatureDetector detector( minhessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //Calculate descriptors (feature vectors)
 SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  // Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
     { good_matches.push_back( matches[i]); }
  }

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  //Localize the object
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( int i = 0; i < good_matches.size(); i++ )
  {
    //Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );

  //Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);

  //Draw lines between the corners (the mapped object in the scene - image_2 )
  line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

  //Show detected matches
  imshow( "SURF Match Results", img_matches );

  waitKey(0);
  return 0;
  }
//
// Following an example from
// http:// ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/
//
void calcHomographyFeature(const Mat& image1, const Mat& image2)
{
    static const char* difffeat = "Difference feature registered";

    Mat gray_image1;
    Mat gray_image2;
    // Convert to Grayscale
    if(image1.channels() != 1)
        cvtColor(image1, gray_image1, CV_RGB2GRAY);
    else
        image1.copyTo(gray_image1);
    if(image2.channels() != 1)
        cvtColor(image2, gray_image2, CV_RGB2GRAY);
    else
        image2.copyTo(gray_image2);

    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 400;

    SurfFeatureDetector detector(minHessian);

    std::vector<KeyPoint> keypoints_object, keypoints_scene;

    detector.detect(gray_image1, keypoints_object);
    detector.detect(gray_image2, keypoints_scene);

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;

    Mat descriptors_object, descriptors_scene;

    extractor.compute(gray_image1, keypoints_object, descriptors_object);
    extractor.compute(gray_image2, keypoints_scene, descriptors_scene);

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector<DMatch> matches;
    matcher.match(descriptors_object, descriptors_scene, matches);

    double max_dist = 0; double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for(int i = 0; i < descriptors_object.rows; i++)
    {
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    //-- Use only "good" matches (i.e. whose distance is less than 3*min_dist)
    std::vector<DMatch> good_matches;

    for(int i = 0; i < descriptors_object.rows; i++) {
        if(matches[i].distance < 3*min_dist) {
            good_matches.push_back( matches[i]);
        }
    }
    std::vector< Point2f > obj;
    std::vector< Point2f > scene;

    for(size_t i = 0; i < good_matches.size(); i++)
    {
        //-- Get the keypoints from the good matches
        obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }

    // Find the Homography Matrix
    Mat H = findHomography( obj, scene, CV_RANSAC );
    // Use the Homography Matrix to warp the images
    Mat result;
    Mat Hinv = H.inv();
    warpPerspective(image2, result, Hinv, image1.size());

    cout << "--- Feature method\n" << H << endl;
    
    Mat imf1, resf;
    image1.convertTo(imf1, CV_64FC3);
    result.convertTo(resf, CV_64FC3);
    showDifference(imf1, resf, difffeat);
}
Example #29
0
bool RelicDetect::Match(RelicDetect obj,RelicDetect scn)
{
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	
	matcher.match(obj.descriptors, scn.descriptors, matches);
	double max_dist = 0; double min_dist = 100;
	//-- Quick calculation of max and min distances between keypoints
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}
	printf("-- Max dist : %f \n", max_dist);
	printf("-- Min dist : %f \n", min_dist);
	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		if (matches[i].distance <= 3 * min_dist)
		{
			good_matches.push_back(matches[i]);
		}
	}
	max_dist = 0;min_dist = 100;double total_min_dist = 0;
	for (int i = 0; i < good_matches.size(); i++)
	{
		double dist = good_matches[i].distance;
		total_min_dist += dist;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;

	}
	printf("-- good matches Max dist : %f \n", max_dist);
	printf("-- good matches Min dist : %f \n", min_dist);
	printf("-- good matches total Min dist : %f \n", total_min_dist);
	cout << "-- good matches size " << good_matches.size() << endl;
	cout << "-- dist per match" << total_min_dist / (double)good_matches.size() << endl;
	Mat img_matches;
	drawMatches(obj.img_color, obj.keypoints, scn.img_color, scn.keypoints,
		good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
		std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	//imshow("matches", img_matches);
	//-- Localize the object
	std::vector<Point2f> obj_points;
	std::vector<Point2f> scn_points;
	for (size_t i = 0; i < good_matches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
		scn_points.push_back(scn.keypoints[good_matches[i].trainIdx].pt);
	}
	Mat H = cv::findHomography(obj_points, scn_points, RANSAC);
	cout << "H:" << endl;
	for (int i = 0;i < H.rows;i++)
	{
		for (int j = 0;j < H.cols;j++)
		{
			cout << H.at<double>(i, j) << " ";
		}
		cout << endl;
	}
	//-- Get the corners from the image_1 ( the object to be "detected" )
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(obj.img_color.cols, 0);
	obj_corners[2] = cvPoint(obj.img_color.cols, obj.img_color.rows);
	obj_corners[3] = cvPoint(0, obj.img_color.rows);
	std::vector<Point2f> scene_corners(4);
	perspectiveTransform(obj_corners, scene_corners, H);
	cout << "object area" << contourArea(obj_corners) << endl;
	cout << "scene detected area" << contourArea(scene_corners) << endl;
	auto scene_area = contourArea(scene_corners);
	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line(img_matches, scene_corners[0] + Point2f(obj.img_color.cols, 0), scene_corners[1] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[1] + Point2f(obj.img_color.cols, 0), scene_corners[2] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[2] + Point2f(obj.img_color.cols, 0), scene_corners[3] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[3] + Point2f(obj.img_color.cols, 0), scene_corners[0] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	//-- Show detected matches
	imshow("Good Matches & Object detection", img_matches);
	waitKey(0);
	if (scene_area>1000)
	{
		return true;
	} 
	else
	{
		return false;
	}
}
void buildTrainingDescriptors(){

    Mat training_features;



    FileStorage fs("dictionary.yml", FileStorage::READ);
    fs["vocabulary"] >> dictionary;
    fs.release();


    vector<string> filenames = readFile("files.txt", "../CS296/data/training_images/");

for(int i=0; i<filenames.size();i++){
    string file = filenames.at(i);
    Mat img1 = imread(file.c_str(), CV_LOAD_IMAGE_GRAYSCALE );

    Mat img1_equalized;


  cout << "\nProcessing image: " << file << " -  " << img1.rows << "x" << img1.cols << " iteration - " << i << endl;
    img1_equalized = img1.clone();

    equalizeHist( img1, img1_equalized); // histogram equalization
    cout << "\n\n(1) Image histogram equalized ... ";

    Mat img1_sift = img1_equalized.clone();
    Mat sift_descriptors, sift_dest;
    vector<KeyPoint> keypoints;

    //SiftDescriptorExtractor detector_sift;
    //detector_sift.detect(img1_sift, keypoints);
    //detector_sift.compute(img1_sift, keypoints,sift_descriptors);

    SIFT sift(2000,3,0.004);
    sift(img1_equalized, img1_equalized, keypoints, sift_descriptors, false);

    drawKeypoints(img1_sift, keypoints, sift_dest, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

    cout << "\n\n(2) SIFT Descriptor for image computed:\n";
    cout << "Keypoints detected: " << keypoints.size() << endl;
    cout << "Feature Vector: " << sift_descriptors.size();

    //lbp<unsigned char>(img1_equalized,  dest_lbp, radius_lbp, neighbor_lbp); /// Local Binary Pattern

Mat dest_lbp;
    orig_lbp<unsigned char>(img1_equalized, dest_lbp);
    cout << "\n\n(3) LBP Computed Image: " << dest_lbp.size()  <<endl;

    Mat dest_lbp_out = dest_lbp.clone();
    drawKeypoints(dest_lbp_out, keypoints, dest_lbp_out, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

    //obtain patch values per keypoint (8x8)

    Mat patch;
    Mat lbp_descriptors;
    int patchSize = 8;

    Size size = Size(patchSize , patchSize); //square patchSize x patchSize region (8x8 patch)

    for(int i=0; i<keypoints.size();i++){

        Point2f center(keypoints.at(i).pt.x,keypoints.at(i).pt.y);
        getRectSubPix(dest_lbp,size,center,patch);
        patch = patch.reshape(0,1); //flatten matrix to 1-D Vector

        lbp_descriptors.push_back(patch);
    }

    cout << "LBP Descriptors computed ... " << endl;
    cout << "LBP Vector: [" << lbp_descriptors.rows << " x" << lbp_descriptors.cols << "]" << endl;

    lbp_descriptors.convertTo(lbp_descriptors,CV_32FC1);

    Mat sift_lbp_features;

    hconcat(sift_descriptors, lbp_descriptors, sift_lbp_features);



   cout << "\nTotal Image feature vector: [ image " <<  " - " << sift_lbp_features.rows << " key points x " << sift_lbp_features.cols << " features]" << endl;



   // total_features.push_back(sift_lbp_features);
  //  cout << "Total features: " << total_features.rows << endl << endl;

    cout << "===================================================================\n\n";

       //prepare BOW descriptor extractor from the dictionary


    //create a nearest neighbor matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( sift_lbp_features, dictionary, matches );

    cout << "Number of matches: " << matches.size() << endl;

    float bins[dictionary.rows] = {0.0};

    for (int i =0; i<matches.size(); i++){

       bins[matches.at(i).trainIdx]= bins[matches.at(i).trainIdx] + 1; //update number of bins

    }


    Mat norm_bins(1,dictionary.rows,CV_32F,&bins);
    normalize( norm_bins, norm_bins, 0, 1, NORM_MINMAX, -1, Mat() );

    training_features.push_back(norm_bins);

}

//store the vocabulary
FileStorage fs2("training_set.yml", FileStorage::WRITE);
fs2 << "training_set" << training_features;
fs2.release();


}