コード例 #1
0
ファイル: RobustMatcher.cpp プロジェクト: 12rohanb/opencv
void RobustMatcher::robustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
              std::vector<cv::KeyPoint>& keypoints_frame, const cv::Mat& descriptors_model )
{

  // 1a. Detection of the ORB features
  this->computeKeyPoints(frame, keypoints_frame);

  // 1b. Extraction of the ORB descriptors
  cv::Mat descriptors_frame;
  this->computeDescriptors(frame, keypoints_frame, descriptors_frame);

  // 2. Match the two image descriptors
  std::vector<std::vector<cv::DMatch> > matches12, matches21;

  // 2a. From image 1 to image 2
  matcher_->knnMatch(descriptors_frame, descriptors_model, matches12, 2); // return 2 nearest neighbours

  // 2b. From image 2 to image 1
  matcher_->knnMatch(descriptors_model, descriptors_frame, matches21, 2); // return 2 nearest neighbours

  // 3. Remove matches for which NN ratio is > than threshold
  // clean image 1 -> image 2 matches
  ratioTest(matches12);
  // clean image 2 -> image 1 matches
  ratioTest(matches21);

  // 4. Remove non-symmetrical matches
  symmetryTest(matches12, matches21, good_matches);

}
コード例 #2
0
void SymmetryNNDRMatcher::match(Mat &descriptors_object, Mat &descriptors_scene, std::vector<DMatch> &good_matches)
{
  cv::BruteForceMatcher<cv::L2<float> > matcher;	

  // Match all keypoints from the object to the scene
  std::vector<std::vector<cv::DMatch> > matches1;
  matcher.knnMatch(descriptors_object, descriptors_scene,
      matches1, // Vector of matches
      2); // return 2 nearest neighbours

  // Match all keypoints from the scene with the object
  std::vector<std::vector<cv::DMatch> > matches2;
  matcher.knnMatch(descriptors_scene, descriptors_object,
      matches2, // Vector of matches
      2); // return 2 nearest neighbours


  // Check NNDR for both match vectors
  ratioTest(matches1);
  ratioTest(matches2);

  // Remove non-symmetrical matches
  // std::vector<cv::DMatch> symMatches;
  symmetryTest(matches1,matches2,good_matches);

}
コード例 #3
0
// Main method:
cv::Mat RobustMatcher::match(cv::Mat &image1,
                             cv::Mat &image2,
                             std::vector<cv::DMatch> &matches,
                             std::vector<cv::KeyPoint> &keypoints1,
                             std::vector<cv::KeyPoint> &keypoints2){

    // 1a. Detection of the SURF features
    detector->detect(image1,keypoints1);
    detector->detect(image2,keypoints2);
    // 1b. Extraction of the SURF descriptors
    cv::Mat descriptors1, descriptors2;
    extractor->compute(image1,keypoints1,descriptors1);
    extractor->compute(image2,keypoints2,descriptors2);
    // 2. Match the two image descriptors
    // Construction of the matcher
    //cv::BruteForceMatcher< cv::L2<float> > matcher;
    cv::FlannBasedMatcher matcher;

    // from image 1 to image 2
    // based on k nearest neighbours (with k=2)
    std::vector< std::vector<cv::DMatch> > matches1;
    matcher.knnMatch(descriptors1,descriptors2,
                     matches1, // vector of matches (up to 2 per entry)
                     2);
     // return 2 nearest neighbours
     // from image 2 to image 1
     // based on k nearest neighbours (with k=2)
     std::vector< std::vector<cv::DMatch> > matches2;
     matcher.knnMatch(descriptors2,descriptors1,
     matches2, // vector of matches (up to 2 per entry)
     2);
     // return 2 nearest neighbours
     // 3. Remove matches for which NN ratio is
     // > than threshold
     // clean image 1 -> image 2 matches
     int removed= ratioTest(matches1);
     // clean image 2 -> image 1 matches
     removed= ratioTest(matches2);
     // 4. Remove non-symmetrical matches
     std::vector<cv::DMatch> symMatches;

     symmetryTest(matches1,matches2,symMatches);
     // 5. Validate matches using RANSAC
     cv::Mat fundemental= ransacTest(symMatches,
     keypoints1, keypoints2, matches);
     // return the found fundemental matrix
     return fundemental;
}
コード例 #4
0
ファイル: RobustMatcher.cpp プロジェクト: 12rohanb/opencv
void RobustMatcher::fastRobustMatch( const cv::Mat& frame, std::vector<cv::DMatch>& good_matches,
                                 std::vector<cv::KeyPoint>& keypoints_frame,
                                 const cv::Mat& descriptors_model )
{
  good_matches.clear();

  // 1a. Detection of the ORB features
  this->computeKeyPoints(frame, keypoints_frame);

  // 1b. Extraction of the ORB descriptors
  cv::Mat descriptors_frame;
  this->computeDescriptors(frame, keypoints_frame, descriptors_frame);

  // 2. Match the two image descriptors
  std::vector<std::vector<cv::DMatch> > matches;
  matcher_->knnMatch(descriptors_frame, descriptors_model, matches, 2);

  // 3. Remove matches for which NN ratio is > than threshold
  ratioTest(matches);

  // 4. Fill good matches container
  for ( std::vector<std::vector<cv::DMatch> >::iterator
         matchIterator= matches.begin(); matchIterator!= matches.end(); ++matchIterator)
  {
    if (!matchIterator->empty()) good_matches.push_back((*matchIterator)[0]);
  }

}
コード例 #5
0
ファイル: robust_matcher.cpp プロジェクト: caomw/sfm_demo-1
// Match features and compute essential mat
bool RobustMatcher::robustMatchEssentialMat(const cv::Mat &frame1, const cv::Mat &frame2, const cv::Mat &K,
  KeyPointVec &kpts1_inliers, KeyPointVec &kpts2_inliers,
  DMatchVec &inliers_matches, cv::Mat &essentialMat)
{
  cv::Mat desc1, desc2, inliers_mask;
  KeyPointVec kpts1, kpts1_good, kpts2, kpts2_good;
  DMatchVec good_matches;
  DMatchVec2 nn_matches;

  // compute keypoints and decriptor and match
  nnMatch(frame1, kpts1, desc1, frame2, kpts2, desc2, nn_matches);

  // perform ratio test
  ratioTest(nn_matches, good_matches, kpts1, kpts1_good, kpts2, kpts2_good);

  // for essential mat we need
  // at least 4 points
  if(kpts2_good.size() >= 4)
  {
    computeEssentialMat(kpts1_good, kpts2_good, K, essentialMat, inliers_mask);
  }
  else
  {
    return false;
  }

  // extract inliers
  extractInliers(inliers_mask, kpts1_good, kpts2_good, kpts1_inliers, kpts2_inliers, inliers_matches);


  return true;
}
コード例 #6
0
/* Calculation of Homography for feature based
 *
 **/
void AlignmentMatrixCalc::featureBasedHomography()
{
    std::vector<cv::DMatch> matchesPrevToCurrent;
    std::vector<cv::DMatch> matchesCurrentToPrev;
    std::vector<std::vector<cv::DMatch> > kmatchesPrevToCurrent;
    std::vector<std::vector<cv::DMatch> > kmatchesCurrentToPrev;
    std::vector<cv::DMatch> matchesPassed;

    // Matching Section begin
    if( matchType == normalMatch )
    {
        matcher->match( descriptorsPrev, descriptorsCurrent, matchesPrevToCurrent );
        matcher->match( descriptorsCurrent, descriptorsPrev, matchesCurrentToPrev );
        // Symmetry Test start
        symmetryTest(matchesPrevToCurrent, matchesCurrentToPrev, matchesPassed);

    }
    else if( matchType == knnMatch)
    {
 //       qDebug()<<"Match : "<<keypointsCurrent.size()<<"  "<<keypointsPrev.size()<<"\n";
        matcher->knnMatch(descriptorsPrev, descriptorsCurrent, kmatchesPrevToCurrent,2);
 //       qDebug()<<"Ratio Test 1 :"<<kmatchesPrevToCurrent.size()<<"\n";
        ratioTest(kmatchesPrevToCurrent);
 //       qDebug()<<"Ratio Test 1 End :"<<kmatchesPrevToCurrent.size()<<"\n";
        matcher->knnMatch(descriptorsCurrent,descriptorsPrev, kmatchesCurrentToPrev, 2);
 //       qDebug()<<"Ratio Test 2 :"<<kmatchesCurrentToPrev.size()<<"\n";
        ratioTest(kmatchesCurrentToPrev);
 //       qDebug()<<"Ratio Test 2 End :"<<kmatchesCurrentToPrev.size()<<"\n";
        // Symmetry Test not working for knn
        //matchesPassed=matchesPrevToCurrent;
        symmetryTest(kmatchesPrevToCurrent,kmatchesCurrentToPrev,matchesPassed);
 //       qDebug()<<"Sym Test  :"<<matchesPassed.size()<<"\n";

    }
    else if( matchType == radiusMatch)
    {
        // there is no documentation
        // matcher->radiusMatch(descriptorsPrev, descriptorsCurrent, kmatchesPrevToCurrent,maxRadius );
        // work but there is no matching back for any maxRadius
        //convertRMatches(kmatchesCurrentToPrev,matchesPassed);
        exc.showException("radiusMatch not working Dont use it!" );

    }


    // Matching Section end
    isHomographyCalc = false;

    pointsPrev.clear();
    pointsCurrent.clear();

    // Conversition of matched Keypoints to points
    for (int p = 0; p < (int)matchesPassed.size(); ++p)
    {
        pointsPrev.push_back(keypointsPrev[matchesPassed[p].queryIdx].pt);
        pointsCurrent.push_back(keypointsCurrent[matchesPassed[p].trainIdx].pt);
    }

    // if enough matched points exist
    if(pointsPrev.size() >= 4 && pointsCurrent.size() >= 4)
    {
        // Sub-pixsel Accuracy

        cv::cornerSubPix(prevFrame, pointsPrev, cv::Size(5,5), cv::Size(-1,-1),
                         cv::TermCriteria(cv::TermCriteria::MAX_ITER+cv::TermCriteria::EPS,30,0.1));

        cv::cornerSubPix(currentFrame, pointsCurrent, cv::Size(5,5), cv::Size(-1,-1),
                         cv::TermCriteria(cv::TermCriteria::MAX_ITER+cv::TermCriteria::EPS,30,0.1));

        homography = cv::findHomography(pointsPrev, pointsCurrent, homographyCalcMethod,
                                        ransacReprojThreshold);
        /*
         cv::findHomography can return empty matrix in some cases.
         This seems happen only when cv::RANSAC flag is passed.
         check the computed homography before using it
         */
        if(!homography.empty())
        {
            if(isHomographyValid()) //
            {
                isHomographyCalc = true;
            }
        }

    }

    if(isHomographyCalc == false)
    {
        // if valid homography not calculated returns to a second stage....
        stage=secondPass;

    }

}
コード例 #7
0
// Match feature points using symmetry test and RANSAC
// returns fundemental matrix
bool feature_matcher::match(cv::Mat& image1, cv::Mat& image2, // input images
	                  std::vector<cv::DMatch>& matches, // output matches and keypoints
	                  std::vector<cv::KeyPoint>& keypoints1, std::vector<cv::KeyPoint>& keypoints2) {

	ros::Time startTime = ros::Time::now();
	cv::Mat matchesMat;
	std::stringstream onScreenText;
	// cleaning..
	int imageMatches;
	imageMatches= imageMatches+0;
	matches.clear();
	keypoints1.clear();
	highestComp = 0.;
/*
	cv::createTrackbar("ConfidenceLevel [%] : ", "FEATURE REF", &Trackbar1, 100, NULL);
	cv::createTrackbar("MinDistanceToEpipolar: ", "FEATURE REF", &Trackbar2, 10, NULL);
	cv::createTrackbar("Ratio [%]: ", "FEATURE REF", &Trackbar3, 100, NULL);
	cv::createTrackbar("Min. Matches: ", "FEATURE REF", &Trackbar4, 200, NULL);
*/
	// 1a. Detection of the SURF/SIFT features
	detector->detect(image1,keypoints1);

	std::cout << keypoints1.size() << " Keypoints extracted from Label" << std::endl;
	info_lastMatch.imageKeys = keypoints1.size();

	// 1b. Extraction of the SURF/SIFT descriptors
	cv::Mat descriptors1, descriptors2;
	extractor->compute(image1,keypoints1,descriptors1);
	//extractor->compute(image2,keypoints2,descriptors2);

	//std::cout << "descriptor matrix size: " << descriptors1.rows << " by " << descriptors1.cols << std::endl;

	cv::Mat tmp;
	this->allReferences.copyTo(tmp);

	onScreenText.str(std::string());
	onScreenText << "Keypoints on Scene-Label: " << keypoints1.size();
	cv::putText(tmp, onScreenText.str(), cv::Point(10, this->allReferences.rows - 50), CV_FONT_HERSHEY_SIMPLEX, 0.50, cv::Scalar(200, 200, 0), 1, CV_AA);


	std::cout << "Matching with references (ratio test) : " << std::endl;
	printf("Ref1\tRef2\tRef3\tRef4\tRef5\tRef6\tRef7\tRef8");
	std::cout << std::endl;

	if (descriptors1.rows == 0 || descriptors1.cols == 0) {
		printf("0\t0\t0\t0\t0\t0\t0\t0\n");
		printf("No Keypoints identified from camera. ??Looking at a plain scene??\n");
		return false;
	}


	int featureMatches[2][numOfRefPics];
	int positiveRef;
	float highestMatch;
	std::vector<std::vector<cv::DMatch> > bestMatches;
	cv::Mat bestDescriptors;
	std::vector<cv::KeyPoint> bestKeypoints;
	std::stringstream resultText;
	positiveRef=-1;
	highestMatch=-1;
	resultText.str(std::string());

	for (uint i = 0; (i < (this->referencePics).size()); i++) {

		// take the right Pic/KP/Desc of the Array and store it in XXX2 <- only for easier handling
		keypoints2 = this->refKeypoints[i];
		descriptors2 = this->refDescriptors[i];
		image2= this->referencePics[i];

		// 2. Match the two image descriptors

		// Construction of the matcher

		//cv::Ptr<cv::DescriptorMatcher> matcher = new cv::DescriptorMatcher::create("BruteForceMatcher");
		//OPEN CV 2.3 style:
		cv::BruteForceMatcher<cv::L2<float> > matcher;

		// from image 1 to image 2
		// based on k nearest neighbours (with k=2)
		std::vector<std::vector<cv::DMatch> > matches1;
		matches1.clear();
		matcher.knnMatch(descriptors1, descriptors2, matches1, // vector of matches (up to 2 per entry)
				2); // return 2 nearest neighbours

		// from image 2 to image 1
		// based on k nearest neighbours (with k=2)
		std::vector<std::vector<cv::DMatch> > matches2;
		matches2.clear();
		matcher.knnMatch(descriptors2, descriptors1, matches2, // vector of matches (up to 2 per entry)
				2); // return 2 nearest neighbours

	                //std::cout << "Number of matched points 1->2: " << matches1.size() << std::endl;
	                //std::cout << "Number of matched points 2->1: " << matches2.size() << std::endl;
	                //cv::waitKey(0);

		int stableMatches= 0;
		stableMatches = stableMatches+0;

		// 3. Remove matches for which NN ratio is > than threshold
		if (rTest12) {
			// clean image 1 -> image 2 matches
			int removed = ratioTest(matches1);
			printf("%i", (int)(matches1.size() - removed));
			resultText << (int)(matches1.size() - removed);
			onScreenText.str(std::string());
			onScreenText << "Matches (1->2): " << (matches1.size() - removed);
			cv::putText(tmp, onScreenText.str(), cv::Point(((this->allReferences.cols / this->referencePics.size()) * i + 10),15), CV_FONT_HERSHEY_SIMPLEX, 0.50, cv::Scalar(200, 200, 0), 1, CV_AA);
			stableMatches = matches1.size() - removed;
			featureMatches[0][i]= matches1.size() - removed;


			if (rTest21) {
				// clean image 2 -> image 1 matches
				removed = ratioTest(matches2);
				printf("/%i\t", (int)(matches2.size() - removed));
				resultText << "/" << (int)(matches2.size() - removed) << "  ";
				onScreenText.str(std::string());
				onScreenText << "Matches (1<-2): " << (matches2.size() - removed);
				cv::putText(tmp, onScreenText.str(), cv::Point(((this->allReferences.cols / this->referencePics.size()) * i + 10),35), CV_FONT_HERSHEY_SIMPLEX, 0.50, cv::Scalar(200, 200, 0), 1, CV_AA);
				stableMatches = matches2.size() - removed;
				featureMatches[1][i]= matches2.size() - removed;

				// 4. Remove non-symmetrical matches
				if (symTest) {
					std::vector<cv::DMatch> symMatches;
					symmetryTest(matches1, matches2, symMatches);
					std::cout << "Number of matched points (symmetry test): "
							<< symMatches.size() << std::endl;
					stableMatches = symMatches.size();
					featureMatches[1][i]= symMatches.size();

					// 5. Validate matches using RANSAC
					if (rsacTest) {
						cv::Mat fundemental = ransacTest(symMatches, keypoints1,
								keypoints2, matches);
						stableMatches = matches.size();
						featureMatches[1][i]= symMatches.size();
					}
				}
			}
		}
		// Wenn die Anzahl an Matches beidseitig größer als 15 ist
		if ((featureMatches[0][i] > minimumMatches) && (featureMatches[1][i] > minimumMatches)) {

			if (((featureMatches[1][i]) + (featureMatches[0][i])) > highestMatch) {
				highestMatch= ((featureMatches[1][i]) + (featureMatches[0][i]));
				positiveRef= i;
				bestKeypoints=keypoints2;
				bestMatches= matches2;
				info_lastMatch.positiveMatches1 = featureMatches[0][i];
				info_lastMatch.positiveMatches2 = featureMatches[1][i];
				//cv::waitKey(0);
			}
		}
	}
	std::cout << std::endl;

	// Now look for highest positive Match!
	// TODO: die refPic links und rechts von RefPic[highestMatch] auf Matches überprüfen -> sollten auch Matches haben!!
	highestComp= highestMatch;

	if (positiveRef > -1) {
		std::cout << "The Object was found. Reference picture number " << positiveRef+1 << " gave the best results." << std::endl;

		if (this->showWindows) {
			cv::drawMatches(image1, keypoints1, // 1st image and its keypoints
					this->referencePics[positiveRef], bestKeypoints, // 2nd image and its keypoints
					bestMatches, // the matches
					matchesMat, // the image produced
					cv::Scalar(0, 255, 0)); // color of the lines
			onScreenText.str(std::string());
			onScreenText << "Ref1   Ref2   Ref3   Ref4   Ref5   Ref6   Ref7   Ref8";
			cv::putText(matchesMat, onScreenText.str(), cv::Point(image1.cols/2 - 200, image1.rows/2 +30), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(10, 10, 255), 1, CV_AA);
			cv::putText(matchesMat, resultText.str(), cv::Point(image1.cols/2 - 200, image1.rows/2 + 50), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(10, 10, 255), 1, CV_AA);
			cv::namedWindow("Features", CV_WINDOW_AUTOSIZE);
			cv::imshow("Features", matchesMat);
		}
		info_lastMatch.processingTime = (ros::Time::now().nsec - startTime.nsec)/1000000;
		return true;
	}
	// "else"
	if (this->showWindows && (keypoints1.size() > 0)) {
		image1.copyTo(matchesMat);
		onScreenText.str(std::string());
		onScreenText << "This is not the wanted object!";
		cv::putText(matchesMat, onScreenText.str(), cv::Point(image1.cols/2 - 200, image1.rows/2), CV_FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(10, 10, 200), 2, CV_AA);
		onScreenText.str(std::string());
		onScreenText << "Ref1  Ref2  Ref3  Ref4  Ref5  Ref6  Ref7  Ref8";
		cv::putText(matchesMat, onScreenText.str(), cv::Point(image1.cols/2 - 200, image1.rows/2 +30), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(10, 10, 200), 1, CV_AA);
		cv::putText(matchesMat, resultText.str(), cv::Point(image1.cols/2 - 200, image1.rows/2 + 50), CV_FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(10, 10, 200), 1, CV_AA);
		cv::namedWindow("Features", CV_WINDOW_AUTOSIZE);
		cv::imshow("Features", matchesMat);
	}
	info_lastMatch.processingTime = (ros::Time::now().nsec - startTime.nsec)/1000000;
	return false;
}
コード例 #8
0
bool performEstimation
(
    const FeatureAlgorithm& alg,
    const ImageTransformation& transformation,
    const cv::Mat& sourceImage,
    std::vector<FrameMatchingStatistics>& stat
)
{
  Keypoints   sourceKp;
  Descriptors sourceDesc;

  cv::Mat gray;
  if (sourceImage.channels() == 3)
      cv::cvtColor(sourceImage, gray, CV_BGR2GRAY);
  else if (sourceImage.channels() == 4)
      cv::cvtColor(sourceImage, gray, CV_BGRA2GRAY);
  else if(sourceImage.channels() == 1)
      gray = sourceImage;

  if (!alg.extractFeatures(gray, sourceKp, sourceDesc))
    return false;

  std::vector<float> x = transformation.getX();
  stat.resize(x.size());

  const int count = x.size();

  cv::Mat     transformedImage;
  Keypoints   resKpReal;
  Descriptors resDesc;
  Matches     matches;

  // To convert ticks to milliseconds
  const double toMsMul = 1000. / cv::getTickFrequency();

#pragma omp parallel for private(transformedImage, resKpReal, resDesc, matches)
  for (int i = 0; i < count; i++)
  {
    float       arg = x[i];
    FrameMatchingStatistics& s = stat[i];

    transformation.transform(arg, gray, transformedImage);

    int64 start = cv::getTickCount();

    alg.extractFeatures(transformedImage, resKpReal, resDesc);

    // Initialize required fields
    s.isValid        = resKpReal.size() > 0;
    s.argumentValue  = arg;

    if (!s.isValid)
        continue;

    if (alg.knMatchSupported)
    {
      std::vector<Matches> knMatches;
      alg.matchFeatures(sourceDesc, resDesc, 2, knMatches);
      ratioTest(knMatches, 0.75, matches);

      // Compute percent of false matches that were rejected by ratio test
      s.ratioTestFalseLevel = (float)(knMatches.size() - matches.size()) / (float) knMatches.size();
    }
    else
    {
      alg.matchFeatures(sourceDesc, resDesc, matches);
    }

    int64 end = cv::getTickCount();

    Matches correctMatches;
    cv::Mat homography;
    bool homographyFound = ImageTransformation::findHomography(sourceKp, resKpReal, matches, correctMatches, homography);

    // Some simple stat:
    s.isValid        = homographyFound;
    s.totalKeypoints = resKpReal.size();
    s.consumedTimeMs = (end - start) * toMsMul;

    // Compute overall percent of matched keypoints
    s.percentOfMatches      = (float) matches.size() / (float)(std::min(sourceKp.size(), resKpReal.size()));
    s.correctMatchesPercent = (float) correctMatches.size() / (float)matches.size();

    // Compute matching statistics
    computeMatchesDistanceStatistics(correctMatches, s.meanDistance, s.stdDevDistance);
  }

  return true;
}
コード例 #9
0
/*****************************************************************************
 // The knn matching with k = 2
 // This code performs the matching and the refinement.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int knn_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with the database descriptors.
    // with k-nearest neighbors with k=2
    _matcher.knnMatch(descriptorQuery , matches1, 2);
    
#ifdef DEBUG_OUT
    std::cout << "Found " << matches1.size() << " matching feature descriptors out of " << _matcher.getTrainDescriptors().size() << " database descriptors."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 3 Filter the matches.
    // Accept only matches (knn with k=2) which belong ot one images
    // The database tree within _matcher contains descriptors of all input images.
    // We expect that both nearest neighbors must belong to one image.
    // Otherwise we can remove the result.
    // Along with this, we count which reference image has the highest number of matches.
    // At this time, we consinder this image as the searched image.
    
    // we init the variable hit with 0
    std::vector<int> hits(_num_ref_images);
    for (int i=0; i<_num_ref_images; i++)
    {
        hits[i] = 0;
    }
    
    // the loop runs through all matches and comparees the image indes
    // imgIdx. The must be equal otherwise the matches belong to two
    // different reference images.
    for (int i=0; i<matches1.size(); i++)
    {
        // The comparison.
        if(matches1[i].at(0).imgIdx == matches1[i].at(1).imgIdx)
        {
            // we keep it
            matches_opt1.push_back(matches1[i]);
            // and count a hit
            hits[matches1[i].at(0).imgIdx]++;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Optimized " << matches_opt1.size() << " feature descriptors." << std::endl;
#endif
    
    // Now we search for the highest number of hits in our hit array
    // The variable max_idx keeps the image id.
    // The variable max_value the amount of hits.
    int max_idx = -1;
    int max_value = 0;
    for (int i=0; i<_num_ref_images; i++)
    {
#ifdef DEBUG_OUT
        std::cout << "for " << i << " : " << hits[i] << std::endl;
#endif
        if(hits[i]  > max_value)
        {
            max_value = hits[i];
            max_idx = i;
        }
    }
    
    
    
    ///////////////////////////////////////////////////////
    // 4. The cross-match
    // At this time, we test the database agains the query descriptors.
    // The variable max_id stores the reference image id. Thus, we test only
    // the descriptors that belong to max_idx agains the query descriptors.
    _matcher.knnMatch(_descriptorsRefDB[max_idx], descriptorQuery, matches2, 2);
    
    
    ///////////////////////////////////////////////////////
    // 5. Refinement; Ratio test
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_opt1);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    removed = ratioTest(matches2);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches_opt1, matches2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_idx], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_idx]._ref_image , _keypointsRefDB[max_idx], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_idx]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    
    return max_idx;
    
}
コード例 #10
0
/*****************************************************************************
 // This applies a brute force match without a trained datastructure.
 // It also calculates the two nearest neigbors.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int brute_force_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Found " << descriptorQuery.size() << " feature descriptors in the image."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with all descriptors in the database
    // with k-nearest neighbors with k=2
    
    int max_removed = INT_MAX;
    int max_id = -1;
    
    for(int i=0; i<_descriptorsRefDB.size(); i++)
    {
        std::vector< std::vector< cv::DMatch> > matches_temp1;
        
        // Here we match all query descriptors agains all db descriptors and try to find
        // matching descriptors
        _brute_force_matcher.knnMatch( descriptorQuery, _descriptorsRefDB[i],  matches_temp1, 2);
        
        
        ///////////////////////////////////////////////////////
        // 3. Refinement; Ratio test
        // The ratio test only accept matches which are clear without ambiguity.
        // The best hit must be closer to the query descriptors than the second hit.
        int removed = ratioTest(matches_temp1);
        
        
        
        // We only accept the match with the highest number of hits / the vector with the minimum revmoved features
        int num_matches = matches_temp1.size();
        if(removed < max_removed)
        {
            max_removed = removed;
            max_id = i;
            matches1.clear();
            matches1 = matches_temp1;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Feature map number " << max_id << " has the highest hit with "<< matches1.size() -  max_removed << " descriptors." << std::endl;
#endif
    
    
    std::vector< std::vector< cv::DMatch> > matches_temp2;
    
    // Here we match all query descriptors agains all db descriptors and try to find
    // matching descriptors
    _brute_force_matcher.knnMatch(_descriptorsRefDB[max_id],  descriptorQuery,  matches_temp2, 2);
    
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_temp2);
    
    
    
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches1, matches_temp2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_id], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_id]._ref_image , _keypointsRefDB[max_id], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_id]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    return max_id;
    
}
コード例 #11
0
// Match feature points using symmetry test and RANSAC
// returns fundemental matrix
cv::Mat RobustMatcher::match(cv::Mat &image1,
		cv::Mat& image2, // input image
		// output matches and keypoints
		std::vector<cv::DMatch> &matches, std::vector<cv::KeyPoint> &keypoints1,
		std::vector<cv::KeyPoint> &keypoints2) {

	// 1a. Detection of the SURF features
	helper->detectKeypoints(image1, keypoints1);
	helper->detectKeypoints(image2, keypoints2);

	// 1b. Extraction of the SURF descriptors
	cv::Mat descriptors1, descriptors2;
	extractor->compute(image1, keypoints1, descriptors1);
	extractor->compute(image2, keypoints2, descriptors2);

	//cout << "RobustMatcher: Keypoints 1: " << keypoints1.size() << endl;
	//cout << "RobustMatcher: Keypoints 2: " << keypoints2.size() << endl;

	// 2. Match the two image descriptors
	// Construction of the matcher
	cv::BruteForceMatcher<cv::L2<float> > matcher;

	// from image 1 to image 2
	// based on k neares neighbours (with k=2)
	std::vector<std::vector<cv::DMatch> > matches1;
	matcher.knnMatch(descriptors1, descriptors2, matches1, // vector of matches (up to 2 per entry)
			2); // returns 2 nearest neighbours

	// from image 2 to image 1
	// based on k nearest neighbours (with k=2)
	std::vector<std::vector<cv::DMatch> > matches2;
	matcher.knnMatch(descriptors2, descriptors1, matches2, // vector of matches (up to 2 per entry)
			2); // return 2 nearest neighbours

	//cout << "Matchtes in Bild 2: " << matches1.size() << endl;
	// 3. Remove matches for which NN ratio is > than threshold
	// clean image 1 -> image 2 matches
	int removed = ratioTest(matches1);

	//cout << "Ratio; Entfernte Matches Bild 2: " << removed << endl;

	//cout << "Matchtes in Bild 1: " << matches2.size() << endl;
	// clean image 2 -> image 1 matches
	removed = ratioTest(matches2);

	//cout << "Ratio; Entfernte Matches Bild 1: " << removed << endl;

	// 4. Remove non-symmetrical matches
	std::vector<cv::DMatch> symMatches;
	symmetryTest(matches1, matches2, symMatches);

	//cout << "Symmetrische Matches: " << symMatches.size() << endl;


	matches = symMatches;
	// 5. Validate matches using RANSAC
	cv::Mat fundemental = ransacTest(symMatches, keypoints1, keypoints2,
			matches);

	// return the found fundemental matrix
	//cv::Mat fundemental;
	return fundemental;
}