Example #1
0
void descriptorsMatch(const Mat& rgb1,const Mat& rgb2,const vector<KeyPoint>& keyPts1,const vector<KeyPoint> keyPts2,const Mat& descriptors1,const Mat& descriptors2,vector<DMatch>& matches)
{
    //descriptor match
    vector<DMatch> des_match;
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
    matcher->match(descriptors1,descriptors2,des_match);
    Mat out;

    drawMatches(rgb1,keyPts1,rgb2,keyPts2,des_match,out);
    imshow("matches",out);
    waitKey();

    float dis_min = 1000,dis_max;
    for(size_t i = 0; i<des_match.size();i++)
    {
        float dis = des_match[i].distance;
        if(dis_min>dis) dis_min = dis;
        if(dis_max<dis) dis_max = dis;
    }


    for(size_t i = 0;i<des_match.size();i++)
    {
        float dis = des_match[i].distance;
        if(dis<4*dis_min) matches.push_back(des_match[i]);
    }
    drawMatches(rgb1,keyPts1,rgb2,keyPts2,matches,out,Scalar(0,255,0));
    imshow("matches",out);
    waitKey();

}
void FeatureDetector::processSurfFeatureDetection_CPU( cv::Mat * _image )
{
	int minHessian = 400;
	std::cout << "Started." << std::endl;
	cv::SurfFeatureDetector detector( minHessian );

	std::vector< cv::KeyPoint > keypoints_1, keypoints_2;

	detector.detect( *_image, keypoints_1 );
	detector.detect( previousMat, keypoints_2 );

	//-- Step 2: Calculate descriptors (feature vectors)
	cv::SurfDescriptorExtractor extractor;
	std::cout << "2" << std::endl;
	cv::Mat descriptors_1, descriptors_2;

	extractor.compute( *_image, keypoints_1, descriptors_1 );
	extractor.compute( previousMat, keypoints_2, descriptors_2 );
	std::cout << "3" << std::endl;
	//-- Step 3: Matching descriptor vectors with a brute force matcher
	cv::BFMatcher matcher( cv::NORM_L2 );
	std::vector< cv::DMatch > matches;
	std::cout << "4" << std::endl;
	matcher.match( descriptors_1, descriptors_2, matches );
	std::cout << "5" << std::endl;
	//-- Draw matches
	cv::Mat img_matches;
	drawMatches( *_image, keypoints_1, previousMat, keypoints_2, matches, img_matches );
	_image->copyTo( previousMat );
	img_matches.copyTo( *_image );
	
}
//-----------------------------------------------------
void ofxSURFTracker::draw() {

    // Crosshairs
    ofNoFill();
    ofSetColor(255, 255, 0);
    ofSetLineWidth(1);
    int d = 10; // length of croshairs
    ofDrawLine(0, 0, 0 + d, 0);
    ofDrawLine(0, 0, 0, 0 + d);
    ofDrawLine(width, 0, width - d, 0);
    ofDrawLine(width, 0, width, 0 + d);
    ofDrawLine(width, height, width - d, height);
    ofDrawLine(width, height, width, height - d);
    ofDrawLine(0, height, 0 + d, height);
    ofDrawLine(0, height, 0, height - d);

    if(bDrawImage) {
        ofSetColor(255);
        trackImg.draw(0,0);
    }
    if(bDrawResponses) {
        drawResponses();
    }
    if(bDrawMatches) {
        drawMatches();
    }
    if(bDrawFeatures) {
        drawFeatures();
    }
    if(bDrawHomography) {
        drawHomoGraphy();
    }
}
Example #4
0
void FeatureMatcher::drawGoodMatches( Mat img_1, Mat img_2, Mat descriptors_1,
        vector<KeyPoint> keypoints_1, vector<KeyPoint> keypoints_2, vector<DMatch> matches )
{
	//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
	//-- PS.- radiusMatch can also be used here.
	vector<DMatch> good_matches;

    double max_dist = 0, min_dist = 100;
    maxMinDistances( matches, &max_dist, &min_dist );

    for ( int i = 0; i < descriptors_1.rows; i++ ) {
        if ( matches[i].distance < 2 * min_dist ) {
            good_matches.push_back( matches[i] );
		}
	}

	//-- Draw only "good" matches
	Mat img_matches;
    drawMatches( img_1, keypoints_1, img_2, keypoints_2, good_matches,
            img_matches, Scalar::all( -1 ), Scalar::all( -1 ), vector<char> (),
            DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	//-- Show detected matches
    imshow( "Good Matches", img_matches );
    waitKey( 0 );
}
Example #5
0
// Draws matches of keypints from two images on output image.
void ICLASS_API DrawMatches(
	TMat* img1, TCVectorKeyPoint* keypoints1,
	TMat* img2, TCVectorKeyPoint* keypoints2,
	TCVectorDMatch* matches1to2, TMat** outImg)
{
	vector<KeyPoint> k1, k2;	
	for (size_t i = 0; i < keypoints1->size(); i++)
	{
		TKeyPoint K = *keypoints1->at(i);
		k1.push_back(KeyPoint(K.x, K.y, K.size, K.angle, K.response, K.octave, K.class_id));
	}
	for (size_t i = 0; i < keypoints2->size(); i++)
	{
		TKeyPoint K = *keypoints2->at(i);
		k2.push_back(KeyPoint(K.x, K.y, K.size, K.angle, K.response, K.octave, K.class_id));
	}
	
	vector<DMatch> m1to2;
	for (size_t i = 0; i < matches1to2->size(); i++)
	{
		TDMatch K = *matches1to2->at(i);
		m1to2.push_back(DMatch(K.queryIdx, K.trainIdx, K.imgIdx, K.distance));
	}
	
	Mat oImg;
	drawMatches(*img1->Mat(), k1, *img2->Mat(), k2, m1to2, oImg);

	*outImg = new TMat(oImg);
};
tInt cRoadSigns::match(Mat inputImage)
{
	if (m_state)
		{
		Mat descriptors;
		std::vector<KeyPoint> keypoints;	
		
		m_detector.detect( inputImage, keypoints );
	
		//remove keypoints which are near to border
		tInt borderSize = inputImage.cols/4;	 
		keypoints.erase( remove_if(keypoints.begin(), keypoints.end(),
								   RoiPredicatePic((tFloat32)borderSize, (tFloat32)borderSize,
												(tFloat32)(inputImage.cols - borderSize),
												(tFloat32)(inputImage.rows - borderSize))),
						 keypoints.end() );
	
		m_extractor.compute( inputImage, keypoints, descriptors );	

		std::vector< DMatch > matches;

		//doing the matching
		m_matcher.match(descriptors,m_descriptors, matches );
		
		//limits for maximum and minimum distance
		tFloat64 min_dist = 0;
		if (m_shapeIdentifier == 1)
			min_dist = 0.1;
		else if (m_shapeIdentifier == 2)
			min_dist = 0.2;
		else
			min_dist = 5;
		//-- Quick calculation of max and min distances between keypoints
			
		//for( tInt i = 0; i < descriptors.rows; i++ )
		//	{ tFloat64 dist = matches[i].distance;
		//		if( dist < min_dist ) min_dist = dist;
		//		if( dist > max_dist ) max_dist = dist;
		//	}
		
		//creating vector with good matches; lesser distance is better then higher distance
		std::vector< DMatch > good_matches;
		for( tInt i = 0; i < descriptors.rows; i++ )
			{ if( matches[i].distance <= max(2*min_dist, 0.02) )
			{ good_matches.push_back( matches[i]); }
			}
		
	
		//drawKeypoints( inputImage, keypoints, inputImage, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

		drawMatches( inputImage, keypoints,m_image, m_keypoints, 
						good_matches, m_img_matches, Scalar::all(-1), Scalar::all(-1),
						vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

		return static_cast<tInt> (good_matches.size());
		}
	else 
		return 0;
}
Example #7
0
Mat
StitchedMap::get_debug()
{
  Mat out;
  drawKeypoints(image1, kpv1, image1, Scalar(255,0,0));
  drawKeypoints(image2, kpv2, image2, Scalar(255,0,0));
  drawMatches(image1,fil1, image2,fil2, matches,out,Scalar::all(-1),Scalar::all(-1));
  return out;
}
void FeatureMatchThread::run()
{
	Mat resultImg;
	Mat grayImg;

	cvtColor(cropImg,grayImg,CV_BGR2GRAY);
	featureDetector.detect(grayImg,keyPoints);
	featureExtractor.compute(grayImg,keyPoints,descriptors);
	flannIndex.build(descriptors,flann::LshIndexParams(12,20,2),cvflann::FLANN_DIST_HAMMING);

	while(true)
	{
		Mat captureImage_gray;

		vector<KeyPoint> captureKeyPoints;
		Mat captureDescription;
		vector<DMatch> goodMatches;

		cap >> captureImage;
		
		if(captureImage.empty())
			continue;

		cvtColor(captureImage,captureImage_gray,CV_BGR2GRAY);
		featureDetector.detect(captureImage_gray,captureKeyPoints);
		featureExtractor.compute(captureImage_gray,captureKeyPoints,captureDescription);

		Mat matchIndex(captureDescription.rows,2,CV_32SC1);
		Mat matchDistance(captureDescription.rows,2,CV_32FC1);

		flannIndex.knnSearch(captureDescription,matchIndex,matchDistance,2,flann::SearchParams());
		
		for(int i=0;i<matchDistance.rows;i++)
		{
			if(matchDistance.at<float>(i,0) < 0.6 * matchDistance.at<float>(i,1))
			{
				DMatch dmatches(i,matchIndex.at<int>(i,0),matchDistance.at<float>(i,0));
				goodMatches.push_back(dmatches);
			}
		}

		drawMatches(captureImage,captureKeyPoints,cropImg,keyPoints,goodMatches,resultImg);
		emit NewFeatureMatch(&resultImg);

		imshow(WindowName,captureImage);
		cv::setMouseCallback(WindowName,mouse_callback);

		captureKeyPoints.clear();
		goodMatches.clear();
		waitKey(30);
	}

	return;
}
Example #9
0
void Assignment2::displayGoodMatches(Assignment2 &m2, const vector< DMatch> &good_matches)
{
	// draw matches defined in good matches
	Mat img_matches;
	drawMatches( this->image, this->keypoints, m2.image, m2.keypoints, good_matches, img_matches, Scalar(0,0,255), 
		Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	//-- Show detected matches
	//cout << good_matches.size() << endl;
	imshow( "Good Matches", img_matches );
}
Example #10
0
void Panorama::showMatches(Mat img1, Mat img2, vector<KeyPoint> keypoints1, vector<KeyPoint> keypoints2, vector<DMatch> good_matches){
    
    cout << "Drawing the results..." << endl;
	namedWindow("matches", 1);
	Mat img_matches;
	drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	imshow("matches", img_matches);
    
    char* filename = new char[200];
    sprintf(filename, "MatchesImage.jpg");
    IplImage saveImage = img_matches;
    cvSaveImage(filename, &saveImage);
}
cv::Mat biosmabe::MabeFeatureComparison::getImageMatches(){
	//-- Draw only "good" matches
	cv::Mat img_matches;
	drawMatches(Image_Matches1, keypoints_1, Image_Matches2, keypoints_2,
		good_matches, Image_Matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
		vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

	for (int i = 0; i < (int)good_matches.size(); i++)
	{
		printf("-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
	}

	return Image_Matches;
}
Example #12
0
void drawPoints(const string &aImageFilename0, const string &aPointsFilename0, const string &aImageFilename1, const string &aPointsFilename1, const string &aMatchFilename)
{
	Im2D_U_INT1 red0, green0, blue0;
	getGrayImages(aImageFilename0, red0, green0, blue0);
	drawDetectedPoints(aPointsFilename0, blue0);

	Im2D_U_INT1 red1, green1, blue1;
	getGrayImages(aImageFilename1, red1, green1, blue1);
	drawDetectedPoints(aPointsFilename1, blue1);

	drawMatches(aMatchFilename, green0, green1);

	string dstImageFilename0 = drawn_matches_filename(aMatchFilename, aImageFilename0);
	save_tiff(dstImageFilename0, red0, green0, blue0);

	string dstImageFilename1 = drawn_matches_filename(aMatchFilename, aImageFilename1);
	save_tiff(dstImageFilename1, red1, green1, blue1);
}
Example #13
0
// Detect keypoints and find
cv::Mat AntiShake::antiShake(Mat &img_1, Mat &img_2, int matches_type, int featurePoints, int corePx,
		double absoluteRelation) {

	Mat workImage1, workImage2;
	reduceDifferences(img_1, img_2, workImage1, workImage2, 7, 7); // STEPS 1 to 4 here

	// STEP 5: KeyPoint Detection:
	cv::FeatureDetector *detector = new cv::FastFeatureDetector(corePx, true);
	std::vector<KeyPoint> keypoints_1, keypoints_2;
	detector->detect(workImage1, keypoints_1);
	detector->detect(workImage2, keypoints_2);
	cout << "==== STEP 5 complete: keypoints detected, (keypoints1.size(), keypoints2.size()) = ("
			<< keypoints_1.size() << ", " << keypoints_2.size() << ")" << endl;
	delete (detector);

	// STEP 6: Calculate descriptors (feature vectors)
	cv::DescriptorExtractor *extractor = new cv::BriefDescriptorExtractor();
	Mat descriptors_1, descriptors_2;
	extractor->compute(workImage1, keypoints_1, descriptors_1);
	extractor->compute(workImage2, keypoints_2, descriptors_2);
	cout << "==== STEP 6 complete: extract descriptors" << endl;
	delete (extractor);

	// STEP 7: Get Matches
	vector<DMatch> good_matches;
	std::vector<Point2f> pts1, pts2;

	this->getBestMatches(matches_type, featurePoints, good_matches, pts1, pts2, descriptors_1,
			descriptors_2, keypoints_1, keypoints_2, workImage1.rows, workImage1.cols, absoluteRelation);
	Mat img_matches;
	drawMatches(workImage1, keypoints_1, workImage2, keypoints_2, good_matches, img_matches,
			Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	if (shouldPrint)
		displayWindow(img_matches, "MATCHES");
	cout << "==== STEP 7 complete: finished matching descriptors: " << featurePoints << endl;

	// STEP 8: Find Homography:
	vector<uchar> inliers(pts1.size(), 0);
	Mat homography = validateHomography(pts1, pts2, inliers, true);
	cout << "==== STEP 8 complete: finished calculating right homographY." << endl;

	return homography;
}
    void FeaturesTracker::draw(){
               
        ofPushStyle();
        
        ofSetColor(ofColor::red);
        drawQueryPoints();
        drawImgKeyPoints();
        

        
        if(isFound()){
            ofSetColor(ofColor::white);
            drawMatches();
            ofSetColor(ofColor::yellow);
            drawQuad();
            
        }
        
        ofPopStyle();
    }
Example #15
0
  void matchORB(string image1, string image2)
  {
    Mat img1 = imread(image1,IMREAD_GRAYSCALE);
    Mat img2 = imread(image2,IMREAD_GRAYSCALE);
    namedWindow("ima1", WINDOW_AUTOSIZE);
    namedWindow("ima2", WINDOW_AUTOSIZE);
    imshow("ima1", img1);
    imshow("ima2", img2);
    
    if(img1.empty() || img2.empty())
    {
        printf("Can't load all the images!");
        return;
    }   

  //Initialise the Wrapping Class for Surf()
    ORB detector_extractor;
    //Ptr<FeatureDetector> detector = FeatureDetector::create("SURF");
    vector<KeyPoint> keypoints1, keypoints2;
    detector_extractor.detect(img1, keypoints1);
    detector_extractor.detect(img2, keypoints2);

  // computing descriptors
    //Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("SURF");
    Mat descriptors1, descriptors2;
    detector_extractor.compute(img1, keypoints1, descriptors1);
    detector_extractor.compute(img2, keypoints2, descriptors2);

  //Initialise BruteForceMatcher: For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each on (=brute)
    BFMatcher matcher(NORM_L2);
    vector< DMatch > matches;
    matcher.match(descriptors1, descriptors2, matches);

  // drawing the results
    namedWindow("matches", 1);
    Mat img_matches;
    drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
    imshow("matches", img_matches);

    waitKey(0);
  }
void CameraPoseOptimization::drawCorrespondenceBetweenTwoFrames(
	vector<cv::KeyPoint>& keypointsInCurrFrame,
	vector<DMatch>& filteredMatches,
	vector<char>& matchesMask)
{
	cv::Mat img1, img2, drawImg;
	namedWindow("output", CV_WINDOW_NORMAL);
	int currImgIdx = (int)m_frameInfos.size() - 2;
	string strImageFilename = m_strDataPath + "rgb/" + m_strColorStamp[currImgIdx] + ".png";
	img1 = cv::imread(strImageFilename, CV_LOAD_IMAGE_GRAYSCALE);
	currImgIdx++;
	strImageFilename = m_strDataPath + "rgb/" + m_strColorStamp[currImgIdx] + ".png";
	img2 = cv::imread(strImageFilename, CV_LOAD_IMAGE_GRAYSCALE);
	
	drawMatches(img1, m_keypointsInLastFrame, img2, keypointsInCurrFrame, filteredMatches,drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask);
	
	// This is to draw the correspondence between two figures without using Perspective-Transformation-Check process
	//	drawMatches(img1, keypointsInLastFrame, img2, keypointsInCurrFrame, filteredMatches, drawImg);
	
	imshow("output", drawImg);
	waitKey(0);
}
Mat hallaCorresp(Mat im1,Mat im2,vector<KeyPoint> kp1,vector<KeyPoint> kp2,Mat descrip1,Mat descrip2,string criterio,vector<DMatch> &coincidencias){

    Mat emparejados;
    
     coincidencias.clear();   
    if(criterio.compare("BFCrossCheck")==0){

        bool crossCheck;
        
        BFMatcher m(NORM_HAMMING, crossCheck=true); 
        m.match(descrip1, descrip2,coincidencias);
        
    }else if(criterio.compare("Flann")==0){
 //       Ptr<DescriptorMatcher> flann=DescriptorMatcher::create("FlannBased");
 //       flann->match(descrip1, descrip2,coincidencias);
        
        cv::FlannBasedMatcher flann(new cv::flann::LshIndexParams(15,15,0));
        flann.match(descrip1, descrip2,coincidencias);
    } 
    drawMatches(im1,kp1,im2,kp2,coincidencias,emparejados);
    
    return emparejados;
}
Example #18
0
/*****************************************************************************
    *  @brief    : matchFeature
    *  @author   : Zhangle
    *  @date     : 2014/10/29 10:35
    *  @version  : ver 1.0
    *  @inparam  : 
    *  @outparam :  
*****************************************************************************/
void ImageProcess::matchFeature()
{
	/*clear*/
	if (!leftFeaturePoint.empty())
	{
		leftFeaturePoint.clear();
	}
	if (!rightFeaturePoint.empty())
	{
		rightFeaturePoint.clear();
	}

	Mat warpImage = leftImageMat.clone();//克隆一个副本进行下面操作
	Mat referImage = rightImageMat.clone();
	/*匹配*/
	stitcher.knnMatch(leftImageDesc,rightImageDesc,knn_matches,matches,2);
	//求globalHomo
	float homographyReprojectionThreshold = 1.0;
	bool homographyFound = stitcher.refineMatchesWithHomography(leftImageKeyPoints,rightImageKeyPoints,homographyReprojectionThreshold,matches,globalHomo); 
	/*从匹配对中得到左边影像和右边影像匹配好的特征点*/
	stitcher.getPointsfromMatches(leftImageKeyPoints,rightImageKeyPoints,matches,leftFeaturePoint,rightFeaturePoint);
	vector<DMatch> inliers = matches;
	drawMatches(warpImage,leftImageKeyPoints,referImage,rightImageKeyPoints,inliers,matchMat);
}
Example #19
0
bool RelicDetect::Match(RelicDetect obj,RelicDetect scn)
{
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	
	matcher.match(obj.descriptors, scn.descriptors, matches);
	double max_dist = 0; double min_dist = 100;
	//-- Quick calculation of max and min distances between keypoints
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}
	printf("-- Max dist : %f \n", max_dist);
	printf("-- Min dist : %f \n", min_dist);
	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		if (matches[i].distance <= 3 * min_dist)
		{
			good_matches.push_back(matches[i]);
		}
	}
	max_dist = 0;min_dist = 100;double total_min_dist = 0;
	for (int i = 0; i < good_matches.size(); i++)
	{
		double dist = good_matches[i].distance;
		total_min_dist += dist;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;

	}
	printf("-- good matches Max dist : %f \n", max_dist);
	printf("-- good matches Min dist : %f \n", min_dist);
	printf("-- good matches total Min dist : %f \n", total_min_dist);
	cout << "-- good matches size " << good_matches.size() << endl;
	cout << "-- dist per match" << total_min_dist / (double)good_matches.size() << endl;
	Mat img_matches;
	drawMatches(obj.img_color, obj.keypoints, scn.img_color, scn.keypoints,
		good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
		std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	//imshow("matches", img_matches);
	//-- Localize the object
	std::vector<Point2f> obj_points;
	std::vector<Point2f> scn_points;
	for (size_t i = 0; i < good_matches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
		scn_points.push_back(scn.keypoints[good_matches[i].trainIdx].pt);
	}
	Mat H = cv::findHomography(obj_points, scn_points, RANSAC);
	cout << "H:" << endl;
	for (int i = 0;i < H.rows;i++)
	{
		for (int j = 0;j < H.cols;j++)
		{
			cout << H.at<double>(i, j) << " ";
		}
		cout << endl;
	}
	//-- Get the corners from the image_1 ( the object to be "detected" )
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(obj.img_color.cols, 0);
	obj_corners[2] = cvPoint(obj.img_color.cols, obj.img_color.rows);
	obj_corners[3] = cvPoint(0, obj.img_color.rows);
	std::vector<Point2f> scene_corners(4);
	perspectiveTransform(obj_corners, scene_corners, H);
	cout << "object area" << contourArea(obj_corners) << endl;
	cout << "scene detected area" << contourArea(scene_corners) << endl;
	auto scene_area = contourArea(scene_corners);
	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line(img_matches, scene_corners[0] + Point2f(obj.img_color.cols, 0), scene_corners[1] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[1] + Point2f(obj.img_color.cols, 0), scene_corners[2] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[2] + Point2f(obj.img_color.cols, 0), scene_corners[3] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[3] + Point2f(obj.img_color.cols, 0), scene_corners[0] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	//-- Show detected matches
	imshow("Good Matches & Object detection", img_matches);
	waitKey(0);
	if (scene_area>1000)
	{
		return true;
	} 
	else
	{
		return false;
	}
}
/*
 * @function surf_feature_detect_RANSAC SURF特征提取及匹配,RANSAC错误点消除以及物体标记
 * @return null
 * @method SURF feature detector
 * @method SURF descriptor
 * @method findFundamentalMat RANSAC错误去除
 * @method findHomography 寻找透视变换矩阵
 */
void surf_feature_detect_bruteforce_RANSAC_Homography(Mat SourceImg, Mat SceneImg, Mat imageMatches, char* string)
{
	vector<KeyPoint> keyPoints1, keyPoints2;
	SurfFeatureDetector detector(400);
	detector.detect(SourceImg, keyPoints1); //标注原图特征点
	detector.detect(SceneImg, keyPoints2); //标注场景图特征点

	SurfDescriptorExtractor surfDesc;
	Mat SourceImgDescriptor, SceneImgDescriptor;
	surfDesc.compute(SourceImg, keyPoints1, SourceImgDescriptor); //描述原图surf特征点
	surfDesc.compute(SceneImg, keyPoints2, SceneImgDescriptor); //描述场景图surf特征点

	//计算两张图片的特征点匹配数
	BruteForceMatcher<L2<float>>matcher;
	vector<DMatch> matches;
	matcher.match(SourceImgDescriptor, SceneImgDescriptor, matches);
	std::nth_element(matches.begin(), matches.begin() + 29 ,matches.end());
	matches.erase(matches.begin() + 30, matches.end());

	//FLANN匹配检测算法
	//vector<DMatch> matches;
	//DescriptorMatcher *pMatcher = new FlannBasedMatcher;
	//pMatcher->match(SourceImgDescriptor, SceneImgDescriptor, matches);
	//delete pMatcher;

	//keyPoints1 图1提取的关键点
	//keyPoints2 图2提取的关键点
	//matches 关键点的匹配
	int ptCount = (int)matches.size();
	Mat p1(ptCount, 2, CV_32F);
	Mat p2(ptCount, 2, CV_32F);
	Point2f pt;
	for(int i = 0; i < ptCount; i++)
	{
		pt = keyPoints1[matches[i].queryIdx].pt;
		p1.at<float>(i, 0) = pt.x;
		p1.at<float>(i, 1) = pt.y;

		pt = keyPoints2[matches[i].trainIdx].pt;
		p2.at<float>(i, 0) = pt.x;
		p2.at<float>(i, 1) = pt.y;
	}
	Mat m_Fundamental;
	vector<uchar> m_RANSACStatus;
	m_Fundamental = findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);
	int OutlinerCount = 0;
	for(int i = 0; i < ptCount; i++)
	{
		if(m_RANSACStatus[i] == 0)
		{
			OutlinerCount++;
		}
	}

	// 计算内点
	vector<Point2f> m_LeftInlier;
	vector<Point2f> m_RightInlier;
	vector<DMatch> m_InlierMatches;

	// 上面三个变量用于保存内点和匹配关系
	int InlinerCount = ptCount - OutlinerCount;
	m_InlierMatches.resize(InlinerCount);
	m_LeftInlier.resize(InlinerCount);
	m_RightInlier.resize(InlinerCount);
	InlinerCount = 0;
	for (int i=0; i<ptCount; i++)
	{
		if (m_RANSACStatus[i] != 0)
		{
			m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
			m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
			m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
			m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
			m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
			m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
			InlinerCount++;
		}
	}

	// 把内点转换为drawMatches可以使用的格式
	vector<KeyPoint> key1(InlinerCount);
	vector<KeyPoint> key2(InlinerCount);
	KeyPoint::convert(m_LeftInlier, key1);
	KeyPoint::convert(m_RightInlier, key2);

	//显示计算F过后的内点匹配
	drawMatches(SourceImg, key1, SceneImg, key2, m_InlierMatches, imageMatches);
	//drawKeypoints(SourceImg, key1, SceneImg, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	
	vector<Point2f> obj;
	vector<Point2f> scene;
	for(unsigned int i = 0; i < m_InlierMatches.size(); i++)
	{
		obj.push_back(key1[m_InlierMatches[i].queryIdx].pt); //查询图像,即目标图像的特征描述
		scene.push_back(key2[m_InlierMatches[i].trainIdx].pt); //模版图像,即场景图像的特征描述
	}
	//求解变换矩阵
	//作用同getPerspectiveTransform函数,输入原始图像和变换之后图像中对应的4个点,然后建立起变换映射关系,即变换矩阵
	//findHomography直接使用透视平面来找变换公式
	Mat H = findHomography(obj, scene, CV_RANSAC);
	vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(SourceImg.cols, 0);
	obj_corners[2] = cvPoint(SourceImg.cols, SourceImg.rows);
	obj_corners[3] = cvPoint(0, SourceImg.rows);
	vector<Point2f> scene_corners(4);
	//透视变换,将图片投影到一个新的视平面
	//根据以求得的变换矩阵
	perspectiveTransform(obj_corners, scene_corners, H);

	line(imageMatches, scene_corners[0] + Point2f(SourceImg.cols, 0), scene_corners[1] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[1] + Point2f(SourceImg.cols, 0), scene_corners[2] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[2] + Point2f(SourceImg.cols, 0), scene_corners[3] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[3] + Point2f(SourceImg.cols, 0), scene_corners[0] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	imshow(string, imageMatches);

	imwrite("feature_detect.jpg", imageMatches);
}
Example #21
0
int main(int argc, char ** argv) 
{

	//std::cout<<sizeof(cv::Point2i)<<" "<<sizeof(CvPoint)<<std::endl;

	// process command line args
	if(argc != 6 && argc != 7 && argc != 1){
		help(argv);
		return 1;
	}

	// names of the two image files
	std::string fname1;
	std::string fname2;
	cv::Mat imgRGB1;
	cv::Mat imgRGB2;
	cv::Mat imgRGB3;
	bool do_rot=false;
	// standard file extensions
	std::vector<std::string> fextensions;
	fextensions.push_back(".bmp");
	fextensions.push_back(".jpeg");
	fextensions.push_back(".jpg");
	fextensions.push_back(".jpe");
	fextensions.push_back(".jp2");
	fextensions.push_back(".png");
	fextensions.push_back(".pgm");
	fextensions.push_back(".ppm");
	fextensions.push_back(".sr");
	fextensions.push_back(".ras");
	fextensions.push_back(".tiff");
	fextensions.push_back(".tif");

	// if no arguments are passed: 
	if(argc==1){
		int i=0;
		int fextensions_size=fextensions.size();
		while(imgRGB1.empty()||imgRGB2.empty()){
			fname1 = "../../images/img1"+fextensions[i];
			fname2 = "../../images/img2"+fextensions[i];
			imgRGB1 = cv::imread(fname1);
			imgRGB2 = cv::imread(fname2);
			i++;
			if(i>=fextensions_size) break;
		}
		if (imgRGB2.empty()||imgRGB2.empty())
		{
			std::cout<<"image(s) "<<fname1<<", "<<fname2<<" not found." << std::endl;
			return 2;
		}
	}
	else{
		if(strncmp("rot-", argv[1], 4)==0){
			do_rot=true;
			int i=0;
			int fextensions_size=fextensions.size();
			while(imgRGB1.empty()){
				fname1 = std::string(argv[1]+4)+"/img1"+fextensions[i];
				imgRGB1 = cv::imread(fname1);
				i++;
				if(i>=fextensions_size) break;
			}
			if (imgRGB2.empty())
			{
				std::cout<<"image not found." << std::endl;
				return 2;
			}
		}
		else{
			int i=0;
			int fextensions_size=fextensions.size();
			while(imgRGB1.empty()||imgRGB2.empty()){
				fname1 = std::string(argv[1])+"/img1"+fextensions[i];
				fname2 = std::string(argv[1])+"/img"+std::string(argv[2])+fextensions[i];
				imgRGB1 = cv::imread(fname1);
				imgRGB2 = cv::imread(fname2);
				i++;
				if(i>=fextensions_size) break;
			}
			if (imgRGB2.empty()||imgRGB2.empty())
			{
				std::cout<<"image(s)"<<fname1<<", "<<fname2<<" not found." << std::endl;
				return 2;
			}
		}
		//unsigned int N=atoi(argv[3]);
		if (imgRGB1.empty())
		{
			fname1 = std::string(argv[1]+4)+"/img1.pgm";
			imgRGB1 = cv::imread(fname1);
			if (imgRGB1.empty()){
				std::cout<<"image not found at " << fname1 << std::endl;
				return 2;
			}
		}
	}

	// convert to grayscale
	cv::Mat imgGray1;
	cv::cvtColor(imgRGB1, imgGray1, CV_BGR2GRAY);
	cv::Mat imgGray2;
	if(!do_rot){
		cv::cvtColor(imgRGB2, imgGray2, CV_BGR2GRAY);
	}

	// run FAST in first image
	std::vector<cv::KeyPoint> keypoints, keypoints2;
	int threshold;

	// create the detector:
	cv::Ptr<cv::FeatureDetector> detector;
	if(argc==1){
		detector = new cv::BriskFeatureDetector(60,4);
	}
	else{
		if(strncmp("FAST", argv[3], 4 )==0){
			threshold = atoi(argv[3]+4);
			if(threshold==0)
				threshold = 30;
			detector = new cv::FastFeatureDetector(threshold,true);
		}
		else if(strncmp("AGAST", argv[3], 5 )==0){
			threshold = atoi(argv[3]+5);
			if(threshold==0)
				threshold = 30;
			detector = new cv::BriskFeatureDetector(threshold,0);
		}
		else if(strncmp("BRISK", argv[3], 5 )==0){
			threshold = atoi(argv[3]+5);
			if(threshold==0)
				threshold = 30;
			detector = new cv::BriskFeatureDetector(threshold,4);
		}
		else if(strncmp("SURF", argv[3], 4 )==0){
			threshold = atoi(argv[3]+4);
			if(threshold==0)
				threshold = 400;
			detector = new cv::SurfFeatureDetector(threshold);
		}
		else if(strncmp("SIFT", argv[3], 4 )==0){
			float thresh = 0.04 / 3 / 2.0;
			float edgeThreshold=atof(argv[3]+4);
			if(edgeThreshold==0)
				thresh = 10.0;
			detector = new cv::SiftFeatureDetector(thresh,edgeThreshold);
		}
		else{
			detector = cv::FeatureDetector::create( argv[3] );
		}
		if (detector.empty()){
			std::cout << "Detector " << argv[3] << " not recognized. Check spelling!" << std::endl;
			return 3;
		}
	}

	int repeat_cnt = atoi(argv[6]);
	{
		clock_t start = clock();
		for( int i=0; i<repeat_cnt; i++ ){
			keypoints.clear();
			keypoints2.clear();
			detector->detect(imgGray1,keypoints);
			detector->detect(imgGray2,keypoints2);	
		}
		clock_t end = clock();
		double time_total = (double)(end - start) / repeat_cnt / (double)CLOCKS_PER_SEC;
		printf("Time consumed in keypoint detection: %lfs\n", time_total );
	}

	// now the extractor:
	bool hamming=true;
	bool is_black_white = false;
	cv::Ptr<cv::DescriptorExtractor> descriptorExtractor;
	// now the extractor:
	if(argc==1){
		descriptorExtractor = new cv::BriskDescriptorExtractor();
	}
	else{
		if(std::string(argv[4])=="SBRISK"){
			descriptorExtractor = new cv::ZKBriskDescriptorExtractor();
			is_black_white      = true;
		}
		else if(std::string(argv[4])=="BRISK"){
			descriptorExtractor = new cv::BriskDescriptorExtractor();
		}
		else if(std::string(argv[4])=="U-BRISK"){
			descriptorExtractor = new cv::BriskDescriptorExtractor(false);
		}
		else if(std::string(argv[4])=="SU-BRISK"){
			descriptorExtractor = new cv::BriskDescriptorExtractor(false,false);
		}
		else if(std::string(argv[4])=="S-BRISK"){
			descriptorExtractor = new cv::BriskDescriptorExtractor(true,false);
		}
		else if(std::string(argv[4])=="BRIEF"){
			descriptorExtractor = new cv::BriefDescriptorExtractor(64);
		}
		else if(std::string(argv[4])=="CALONDER"){
			descriptorExtractor = new cv::CalonderDescriptorExtractor<float>("current.rtc");
			hamming=false;
		}
		else if(std::string(argv[4])=="SURF"){
			descriptorExtractor = new cv::SurfDescriptorExtractor();
			hamming=false;
		}
		else if(std::string(argv[4])=="SIFT"){
			descriptorExtractor = new cv::SiftDescriptorExtractor();
			hamming=false;
		}
		else{
			descriptorExtractor = cv::DescriptorExtractor::create( argv[4] );
		}
		if (descriptorExtractor.empty()){
			hamming=false;
			std::cout << "Descriptor " << argv[4] << " not recognized. Check spelling!" << std::endl;
			return 4;
		}
	}

	// get the descriptors
	cv::Mat descriptors, descriptors2;
	std::vector<cv::DMatch> indices;

	std::vector<cv::KeyPoint> black_keypoints, black_keypoints2;
	cv::Mat black_descriptors, black_descriptors2;
	std::vector<cv::DMatch> black_indices;

	std::vector<cv::KeyPoint> white_keypoints, white_keypoints2;
	cv::Mat white_descriptors, white_descriptors2;
	std::vector<cv::DMatch> white_indices;
	
	clock_t start = clock();
	for( int i=0; i<repeat_cnt; i++ ){
		// first image
		descriptorExtractor->compute(imgGray2,keypoints2,descriptors2);
		// and the second one
		descriptorExtractor->compute(imgGray1,keypoints,descriptors);
	}
	clock_t end = clock();
	double time_total = (double)(end-start) / repeat_cnt / (double)CLOCKS_PER_SEC;
	printf("Time consumed in feature extraction£º%lfs\n", time_total );

	// matching
	std::vector<std::vector<cv::DMatch> > matches;
	std::vector<std::vector<cv::DMatch> > white_matches, black_matches;
	cv::Ptr<cv::DescriptorMatcher> descriptorMatcher;
	if(hamming)
		descriptorMatcher = new cv::BFMatcher(cv::NORM_HAMMING);//new cv::BruteForceMatcher<cv::HammingSse>();
	else
		descriptorMatcher = new cv::BruteForceMatcher<cv::L2<float> >();

	float hamming_thresh = atoi(argv[5]);
	if( is_black_white ){
		unsigned char * psrc   = NULL;
		unsigned char * pwhite = NULL;
		unsigned char * pblack = NULL;
		int nwhite=0,nblack=0;

		// the first key set ------------------------------------------------------------------------------------
		nwhite=nblack=0;
		for( std::vector<cv::KeyPoint>::iterator iter = keypoints.begin(); iter != keypoints.end(); iter ++ ){
			if( iter->class_id > 0 ){
				nwhite ++;
			}
			else{
				nblack ++;
			}
		}
		white_descriptors = cv::Mat::zeros(nwhite,64, CV_8U);
		black_descriptors = cv::Mat::zeros(nblack,64, CV_8U);
		psrc   = descriptors.data;
		pwhite = white_descriptors.data;
		pblack = black_descriptors.data;
		for( std::vector<cv::KeyPoint>::iterator iter = keypoints.begin(); iter != keypoints.end(); iter ++ ){
			if( iter->class_id > 0 ){
				white_keypoints.push_back(*iter);
				memcpy( pwhite, psrc, 64 );
				pwhite += 64;
			}
			else{
				black_keypoints.push_back(*iter);
				memcpy( pblack, psrc, 64 );
				pblack += 64;
			}
			psrc += 64;
		}

		// the second key set ------------------------------------------------------------------------------------
		nwhite=nblack=0;
		for( std::vector<cv::KeyPoint>::iterator iter = keypoints2.begin(); iter != keypoints2.end(); iter ++ ){
			if( iter->class_id > 0 ){
				nwhite ++;
			}
			else{
				nblack ++;
			}
		}
		white_descriptors2 = cv::Mat::zeros(nwhite,64, CV_8U);
		black_descriptors2 = cv::Mat::zeros(nblack,64, CV_8U);
		psrc   = descriptors2.data;
		pwhite = white_descriptors2.data;
		pblack = black_descriptors2.data;
		for( std::vector<cv::KeyPoint>::iterator iter = keypoints2.begin(); iter != keypoints2.end(); iter ++ ){
			if( iter->class_id > 0 ){
				white_keypoints2.push_back(*iter);
				memcpy( pwhite, psrc, 64 );
				pwhite += 64;
			}
			else{
				black_keypoints2.push_back(*iter);
				memcpy( pblack, psrc, 64 );
				pblack += 64;
			}
			psrc += 64;
		}
		start = clock();
		for( int i=0; i<repeat_cnt; i++ ){
			white_matches.clear();
			black_matches.clear();
			if(hamming){
				descriptorMatcher->radiusMatch(white_descriptors2,white_descriptors,white_matches,hamming_thresh);
				descriptorMatcher->radiusMatch(black_descriptors2,black_descriptors,black_matches,hamming_thresh);
				//descriptorMatcher->knnMatch(white_descriptors2,white_descriptors,white_matches,hamming_thresh);
				//descriptorMatcher->knnMatch(black_descriptors2,black_descriptors,black_matches,hamming_thresh);
			}
			else{
				descriptorMatcher->radiusMatch(white_descriptors2,white_descriptors,white_matches,0.21);
				descriptorMatcher->radiusMatch(black_descriptors2,black_descriptors,black_matches,0.21);
			}
		}
		end = clock();
		time_total = (double)(end-start) / repeat_cnt / (double)CLOCKS_PER_SEC;
		printf("Time consumed in keypoint matching£º%lfs\n", time_total );
	}
	else{
		start = clock();
		for( int i=0; i<repeat_cnt; i++ ){
			matches.clear();
			if(hamming){
				descriptorMatcher->radiusMatch(descriptors2,descriptors,matches,hamming_thresh);
				//descriptorMatcher->knnMatch(descriptors2,descriptors,matches,hamming_thresh);
			}
			else
				descriptorMatcher->radiusMatch(descriptors2,descriptors,matches,0.21);
		}
		end = clock();
		time_total = (double)(end-start) / repeat_cnt / (double)CLOCKS_PER_SEC;
		printf("Time consumed in keypoint matching£º%lfs\n", time_total );
	}
	fgetc(stdin);

	// drawing-----------------------------------------------------------------------------------------
	cv::Mat outimg;
	if( is_black_white ){
		// save the white keypoints----------------------------------------
		{
		std::string desc1 = std::string(std::string(argv[1])+"/img1_white.txt");
		std::string desc2 = std::string(std::string(argv[1])+"/img2_white.txt");
		std::ofstream descf1(desc1.c_str());
		if(!descf1.good()){
			std::cout<<"Descriptor file not found at " << desc1 <<std::endl;
			return 3;
		}
		std::ofstream descf2(desc2.c_str());
		if(!descf2.good()){
			std::cout<<"Descriptor file not found at " << desc2 <<std::endl;
			return 3;
		}
		unsigned char * pwhite2 = white_descriptors2.data;
		unsigned char * pwhite  = white_descriptors.data;
		for( std::vector<cv::KeyPoint>::iterator iter = white_keypoints.begin(); iter != white_keypoints.end(); iter ++ ){
			descf1 << iter->pt.x << " " << iter->pt.y << " " << iter->size << " " << iter->class_id;
			for( int i=0; i<64; i++ )
				descf1 << " " << (unsigned)pwhite[i];
			descf1 << std::endl;
			pwhite += 64;
		}
		for( std::vector<cv::KeyPoint>::iterator iter = white_keypoints2.begin(); iter != white_keypoints2.end(); iter ++ ){
			descf2 << iter->pt.x << " " << iter->pt.y << " " << iter->size << " " << iter->class_id;
			for( int i=0; i<64; i++ )
				descf2 << " " << (unsigned)pwhite2[i];
			descf2 << std::endl;
			pwhite2 += 64;
		}
		// clean up
		descf1.close();
		descf2.close();
		}

		// save the black keypoints----------------------------------------
		{
		std::string desc1 = std::string(std::string(argv[1])+"/img1_black.txt");
		std::string desc2 = std::string(std::string(argv[1])+"/img2_black.txt");
		std::ofstream descf1(desc1.c_str());
		if(!descf1.good()){
			std::cout<<"Descriptor file not found at " << desc1 <<std::endl;
			return 3;
		}
		std::ofstream descf2(desc2.c_str());
		if(!descf2.good()){
			std::cout<<"Descriptor file not found at " << desc2 <<std::endl;
			return 3;
		}
		unsigned char * pblack2 = black_descriptors2.data;
		unsigned char * pblack  = black_descriptors.data;
		int cnt = 0;
		for( std::vector<cv::KeyPoint>::iterator iter = black_keypoints.begin(); iter != black_keypoints.end(); iter ++ ){
			descf1 << iter->pt.x << " " << iter->pt.y << " " << iter->size << " " << iter->class_id;
			for( int i=0; i<64; i++ )
				descf1 << " " << (unsigned)pblack[i]; 
			descf1 << std::endl;
			pblack += 64;
			cnt ++;
		}
		for( std::vector<cv::KeyPoint>::iterator iter = black_keypoints2.begin(); iter != black_keypoints2.end(); iter ++ ){
			descf2 << iter->pt.x << " " << iter->pt.y << " " << iter->size << " " << iter->class_id;
			for( int i=0; i<64; i++ )
				descf2 << " " << (unsigned)pblack2[i];
			descf2 << std::endl;
			pblack2 += 64;
		}
		// clean up
		descf1.close();
		descf2.close();
		}

		{
		// save the white matches-------------------------------------------------------------------------------
		std::string desc1 = std::string(std::string(argv[1])+"/img1_img2_white.txt");
		std::ofstream descf1(desc1.c_str());
		if(!descf1.good()){
			std::cout<<"Cannot open file: " << desc1 <<std::endl;
			return 3;
		}
		for( std::vector<std::vector<cv::DMatch> >::iterator iter = white_matches.begin(); iter != white_matches.end(); iter ++ ){
			for( std::vector<cv::DMatch>::iterator iter2 = iter->begin(); iter2 != iter->end(); iter2 ++ ){
				descf1 << iter2->queryIdx << " " << iter2->trainIdx << " " << iter2->distance << std::endl;
			}
		}
		// clean up
		descf1.close();
		}

		{
		// save the black matches-------------------------------------------------------------------------------
		std::string desc1 = std::string(std::string(argv[1])+"/img1_img2_black.txt");
		std::ofstream descf1(desc1.c_str());
		if(!descf1.good()){
			std::cout<<"Cannot open file: " << desc1 <<std::endl;
			return 3;
		}
		for( std::vector<std::vector<cv::DMatch> >::iterator iter = black_matches.begin(); iter != black_matches.end(); iter ++ ){
			for( std::vector<cv::DMatch>::iterator iter2 = iter->begin(); iter2 != iter->end(); iter2 ++ ){
				descf1 << iter2->queryIdx << " " << iter2->trainIdx << " " << iter2->distance << std::endl;
			}
		}
		// clean up
		descf1.close();
		}
		drawMatches(imgRGB2, white_keypoints2, imgRGB1, white_keypoints,white_matches,outimg,
				 cv::Scalar(0,255,0), cv::Scalar(0,0,255),
				std::vector<std::vector<char> >(), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
		cv::namedWindow("White Matches");
		cv::imshow("White Matches", outimg);

		drawMatches(imgRGB2, black_keypoints2, imgRGB1, black_keypoints,black_matches,outimg,
				cv::Scalar(0,255,0), cv::Scalar(0,0,255),
				std::vector<std::vector<char> >(), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
		cv::namedWindow("Black Matches");
		cv::imshow("Black Matches", outimg);
	}
	else{
		// save the keypoints----------------------------------------
		{
		std::string desc1 = std::string(std::string(argv[1])+"/img1.txt");
		std::string desc2 = std::string(std::string(argv[1])+"/img2.txt");
		std::ofstream descf1(desc1.c_str());
		if(!descf1.good()){
			std::cout<<"Descriptor file not found at " << desc1 <<std::endl;
			return 3;
		}
		std::ofstream descf2(desc2.c_str());
		if(!descf2.good()){
			std::cout<<"Descriptor file not found at " << desc2 <<std::endl;
			return 3;
		}
		unsigned char * pdat2 = descriptors2.data;
		unsigned char * pdat  = descriptors.data;
		int cnt = 0;
		for( std::vector<cv::KeyPoint>::iterator iter = keypoints.begin(); iter != keypoints.end(); iter ++ ){
			descf1 << iter->pt.x << " " << iter->pt.y << " " << iter->size << " " << iter->class_id;
			for( int i=0; i<64; i++ )
				descf1 << " " << (unsigned)pdat[i];
			pdat += 64;
			descf1 << std::endl;
			cnt ++;
		}
		for( std::vector<cv::KeyPoint>::iterator iter = keypoints2.begin(); iter != keypoints2.end(); iter ++ ){
			descf2 << iter->pt.x << " " << iter->pt.y << " " << iter->size << " " << iter->class_id;
			for( int i=0; i<64; i++ )
				descf2 << " " << (unsigned)pdat2[i];
			pdat2 += 64;
			descf2 << std::endl;
		}
		// clean up
		descf1.close();
		descf2.close();
		}
		{
		// save the matches-------------------------------------------------------------------------------
		std::string desc1 = std::string(std::string(argv[1])+"/img1_img2.txt");
		std::ofstream descf1(desc1.c_str());
		if(!descf1.good()){
			std::cout<<"Cannot open file: " << desc1 <<std::endl;
			return 3;
		}
		for( std::vector<std::vector<cv::DMatch> >::iterator iter = matches.begin(); iter != matches.end(); iter ++ ){
			for( std::vector<cv::DMatch>::iterator iter2 = iter->begin(); iter2 != iter->end(); iter2 ++ ){
				descf1 << iter2->queryIdx << " " << iter2->trainIdx << " " << iter2->distance << std::endl;
			}
		}
		// clean up
		descf1.close();
		}
		drawMatches(imgRGB2, keypoints2, imgRGB1, keypoints,matches,outimg,
				 cv::Scalar(0,255,0), cv::Scalar(0,0,255),
				std::vector<std::vector<char> >(), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
		cv::namedWindow("Matches");
		cv::imshow("Matches", outimg);
	}
	cv::waitKey();

	return 0;
}
Example #22
0
//平面物体检测
void Feature::objectDetect( Mat& objectImage, Mat& sceneImage , Mat&outImage,
							Mat& objectDescriptor,Mat& sceneDescriptor, vector<DMatch>& matches,
							vector<KeyPoint>& objectKeypoints, vector<KeyPoint>& sceneKeypoints)
{
	double max_dist = 0; double min_dist = 100;

	//特征点最大最小距离
	for( int i = 0; i < objectDescriptor.rows; i++ )
	{ 
		double dist = matches[i].distance;
		if( dist < min_dist ) 
			min_dist = dist;
		if( dist > max_dist ) 
			max_dist = dist;
	}


	//找出强度较大的特征点(也可以用半径)
	std::vector< DMatch > good_matches;
	double acceptedDist = 2*min_dist;

	for( int i = 0; i < objectDescriptor.rows; i++ )
	{
		if( matches[i].distance < acceptedDist )
		{ 
			good_matches.push_back( matches[i]); 
		}
	}
	
	//画出匹配结果
	drawMatches( objectImage, objectKeypoints, sceneImage, sceneKeypoints,
				 good_matches, outImage, Scalar::all(-1), Scalar::all(-1),
				 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	//得到好的特征点的位置
	std::vector<Point2f> object;//目标图片中的点
	std::vector<Point2f> scene;//场景图片中的点
	for( int i = 0; i < good_matches.size(); i++ )
	{
		object.push_back( objectKeypoints[ good_matches[i].queryIdx ].pt );
		scene.push_back( sceneKeypoints[ good_matches[i].trainIdx ].pt );
	}

	//目标图片和场景图片的透视变化关系
	Mat H = findHomography( object, scene, CV_RANSAC );

	//目标图像的四个角的坐标
	std::vector<Point2f> object_corners(4);

	object_corners[0] = cvPoint(0,0); 
	object_corners[1] = cvPoint( objectImage.cols, 0 );
	object_corners[2] = cvPoint( objectImage.cols, objectImage.rows ); 
	object_corners[3] = cvPoint( 0, objectImage.rows );

	std::vector<Point2f> scene_corners(4);

	perspectiveTransform( object_corners, scene_corners, H);//透视变换

	//在输出图像的场景部分画出边框
	line( outImage, scene_corners[0] + Point2f( objectImage.cols, 0), scene_corners[1] + Point2f( objectImage.cols, 0), Scalar(0, 255, 0), 4 );
	line( outImage, scene_corners[1] + Point2f( objectImage.cols, 0), scene_corners[2] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[2] + Point2f( objectImage.cols, 0), scene_corners[3] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[3] + Point2f( objectImage.cols, 0), scene_corners[0] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );

}
void DisplayCorrespondence::onNewImage() {
	int wrongmatch_counter=0;
	int goodmatch_counter=0;

	Mat image = in_img.read();
	std::string path = in_path.read();
	std::vector<int> image_params = in_image_params.read();
	std::vector<std::vector<int> > MatchedSourceForTile =in_MatchedSourceForTile.read();
	std::vector<int> PositionOfPatchesInImages =in_PositionOfPatchesInImages.read();
	std::vector<std::vector<int> > MatchedPatchInMatcher =in_MatchedPatchInMatcher.read();
	std::vector<std::vector<double> > DistanceMap = in_DistanceMap.read();
	std::vector<std::string> all_file_paths = in_all_file_paths.read();
	int files_number = all_file_paths.size();
	std::vector<double> QueryMatchMap = in_match_map.read();
	double match_quality = in_match_quality.read();

	patches_cols = image_params[0];
	patches_rows = image_params[1];
	int patchsize = image_params[2];

	int bd_cols = image_params[3];
	int bd_rows = image_params[4];
	int bd_patch_size = image_params[5];

	int queue_size = image_params[6];
	int BestMatchingImage = image_params[7];

	int image_cols = image_params[8];
	int image_rows = image_params[9];
	
	double threshold = (double)image_params[10]/10000;

	std::vector<std::vector<int> > BDMatchMap;
	BDMatchMap.resize(bd_cols * bd_rows);
	//for (int q = 0; q < bd_cols * bd_rows; q++) {
	//	BDMatchMap[q] = -1;
	//}

	int query_col;
	int query_row;
	int fn;

	std::cout << "qs:" << queue_size << "  thre:"<<threshold<<" p:"<<image_params[10]<<std::endl;
	/*=======================================================================*/
	/*************************************************************************/
	/*		create corresspondence map for DB image							 */
	/*************************************************************************/

	if (mode == 3) {
		for (int k = 0; k < patches_rows * patches_cols; k++) {
			int flag = 0;
			int zzz;
			//check if there is match of current patch to the best image
			for (zzz = 0; zzz < queue_size; zzz++) {
				if (DistanceMap[k][zzz]>threshold || DistanceMap[k][zzz]<0){
					//std::cout<<"k:"<<k<<" zzz:"<<zzz<<" dmap"<<DistanceMap[k][zzz]<<"  th:"<<threshold<<std::endl;
					//break;
				}
				if (MatchedSourceForTile[k][zzz] == BestMatchingImage) {
					flag = 1;
					break;
				}
			}
			if (flag) {
				//where in the image is the patch (all patches are numbered linearly in 1D. Like in progressive scan in TV from left to right and to next line)
				fn = PositionOfPatchesInImages[MatchedPatchInMatcher[k][zzz]];
				BDMatchMap[fn].push_back(k);
			}
		}
	}
	
	if (mode==4){
	//!!!!!!!!!!!!!!!!!!!!!!!
	Mat matchedim = cv::imread(path, -1);
	//!!!!!!!!!!!!!!!!!!
		double nx, ny, mx, my;
		
		double angle_rad=M_PI*angle/180;
		
		//scalefactor=1.0;
		int basex=bd_patch_size*bd_cols;
		int basey=bd_patch_size*bd_rows;
		//int imgx=patchsize*patches_cols;
		//int imgy=patchsize*patches_rows;
		int imgx=image_cols;
		int imgy=image_rows;
				
		//circle(matchedim, Point(basex/2.0,basey/2.0), 20, Scalar(0,255,0),-1);
		//circle(image, Point(imgx/2.0,imgy/2.0), 20, Scalar(0,255,0),-1);
		//float px=-100, py=-100;
		
		//float px2=px*scalefactor;
		//float py2=px*scalefactor;
		
		//float qx=px2*cos(angle_rad)-py2*sin(angle_rad);
		//float qy=px2*sin(angle_rad)+py2*cos(angle_rad);
		
		
		
		//std::cout<<qx<<" "<<qy<<std::endl;
		
		//circle(matchedim, Point(basex/2.0+px,basey/2.0+py), 10, Scalar(255,0,0),-1);
		//circle(image, Point(basex/2.0+qx,basey/2.0+qy), 10, Scalar(255,0,0),-1);
				
		for (int k = 0; k < patches_rows * patches_cols; k++) {
					
					
			int flag = 0;
			int zzz;
			//check if there is match of current patch to the best image
			for (zzz = 0; zzz < queue_size; zzz++) {
				if (DistanceMap[k][zzz]>threshold || DistanceMap[k][zzz]<0.0){
					//std::cout<<"k:"<<k<<" zzz:"<<zzz<<" dmap"<<DistanceMap[k][zzz]<<"  th:"<<threshold<<std::endl;
					break;
				}
				if (MatchedSourceForTile[k][zzz] == BestMatchingImage) {
					flag = 1;
					break;
				}
			}
			if (flag) {
			
				fn = PositionOfPatchesInImages[MatchedPatchInMatcher[k][zzz]];
			
				
		
				//float p1x=base_keypoints[matches[i].queryIdx].pt.x-basex/2;
				//float p1y=base_keypoints[matches[i].queryIdx].pt.y-basey/2;
				//float p2x=keypoints[matches[i].trainIdx].pt.x-imgx/2;
				//float p2y=keypoints[matches[i].trainIdx].pt.y-imgy/2;
				
				//cout<<"loop i2: "<<i<<std::endl;
				//float bigx=p1x*(cos(angle_rad)-sin(angle_rad));
				//float bigy=p1y*(sin(angle_rad)+cos(angle_rad));
				
				//float bigx=p1x*cos(angle_rad)-p1y*sin(angle_rad);
				//float bigy=p1x*sin(angle_rad)+p1y*cos(angle_rad);
				
				//cout<<"loop i3: "<<i<<std::endl;
				
				//if (sqrt((bigx-p2x)*(bigx-p2x)+(bigy-p2y)*(bigy-p2y))<=threshold){
					
			
				nx = bd_patch_size * (fn % bd_cols) + bd_patch_size * 0.5;
				ny = bd_patch_size * (fn / bd_cols) + bd_patch_size * 0.5;
				
				//nx*=scalefactor;
				//ny*=scalefactor;
				
				//which tile is matched
				query_col = k % patches_cols;
				query_row = k / patches_cols;

				//location of the center
				mx = patchsize * (query_col) + 0.5 * patchsize;
				my = patchsize * (query_row) + 0.5 * patchsize;
				
				
				float p1x=nx-basex/2.0;
				float p1y=ny-basey/2.0;
				float p2x=mx-imgx/2.0;
				float p2y=my-imgy/2.0;
				
				p1x*=scalefactor;
				p1y*=scalefactor;
				
				//circle(image, Point(mx,my), 10, Scalar(0,0,255), -1);
				//circle(matchedim, Point(nx,ny), 10, Scalar(0,0,255),-1);
				
				
				
				
				float bigx=p1x*cos(angle_rad)-p1y*sin(angle_rad);
				float bigy=p1x*sin(angle_rad)+p1y*cos(angle_rad);	//<<" basex"<<basex<<" basey"<<basey<<" imgx"<<imgx<<" imgy"<<imgy
				//std::cout<<"scale:"<<scalefactor<<std::endl;
				
				if (sqrt((bigx-p2x)*(bigx-p2x)+(bigy-p2y)*(bigy-p2y))<=(sqrt(2)*bd_patch_size*scalefactor/2+3)){
					//where in the image is the patch (all patches are numbered linearly in 1D. Like in progressive scan in TV from left to right and to next line)
					
					//std::cout<<"p1x:"<<p1x<<" p1y:"<<p1y<<" p2x:"<<p2x<<" p2y:"<<p2y<<" bx:"<<bigx<<" by:"<<bigy<<" scale:"<<scalefactor<<std::endl;
				//std::cout<<sqrt((bigx-p2x)*(bigx-p2x)+(bigy-p2y)*(bigy-p2y))<<std::endl;
					
					BDMatchMap[fn].push_back(k);
					goodmatch_counter++;
				}
				else{
					wrongmatch_counter++;
				}
			}
	
		}
		wrongmatches.push_back(wrongmatch_counter);
		goodmatches.push_back(goodmatch_counter);
		similarity.push_back(match_quality);
	}
	/*=======================================================================*/
	/*************************************************************************/
	/*				simple correspondence drawing							 */
	/*************************************************************************/
	
	if (mode == 0) {
		float nx, ny, mx, my;
		Mat outimg;
		//int query_col;
		//int query_row;


		std::vector<cv::KeyPoint> im1kp;
		std::vector<cv::KeyPoint> im2kp;
		std::vector<cv::DMatch> immatches;

		int tempc = 0;

		int cant = 0, cant2 = 0;
		//read query image
		Mat matchedim = cv::imread(path, -1);
		double color;

		for (int k = 0; k < patches_rows * patches_cols; k++) {
			int flag = 0;
			int zzz;
			//check if there is match of current patch to the best image
			for (zzz = 0; zzz < queue_size; zzz++) {
				if (MatchedSourceForTile[k][zzz] == BestMatchingImage) {
					flag = 1;
					//cant++;
					break;
				}
			}
			if (flag) {

				//where in the image is the patch (all patches are numbered linearly in 1D. Like in progressive scan in TV from left to right and to next line)
				fn = PositionOfPatchesInImages[MatchedPatchInMatcher[k][zzz]];
				//BDMatchMap[fn] = k;

				//error?
				if (fn < 0) {
					return;
				}

				//location of the center tile in the image#include <time.h>
				nx = bd_patch_size * (fn % bd_cols) + bd_patch_size * 0.5;
				ny = bd_patch_size * (fn / bd_cols) + bd_patch_size * 0.5;

				//which tile is matched
				query_col = k % patches_cols;
				query_row = k / patches_cols;

				//location of the center
				mx = patchsize * (query_col) + 0.5 * patchsize;
				my = patchsize * (query_row) + 0.5 * patchsize;

				//Is it not a bad match?
				if (DistanceMap[k][0] < 0 || DistanceMap[k][0] > 0.3) {
					cant2++;
					continue;
				}

				//choose colour
				//color = (1 - 10 * DistanceMap[k][0] * DistanceMap[k][0]) * 255.0;
				int cb = 0, cg = 0, cr = 255;
				int qLineW = 2, bdLineW = 2;
				//cb=rand() % 255;
				//cg=rand() % 255;
				//cr=rand() % 255;

				//draw patches in query img
				rectangle(image, Point(mx - 0.5 * patchsize, my - 0.5
						* patchsize), Point(mx + 0.5 * patchsize, my + 0.5
						* patchsize), Scalar(cb, cg, cr), qLineW);

				//draw patches in BD img
				rectangle(matchedim, Point(nx - 0.5 * bd_patch_size, ny - 0.5
						* bd_patch_size), Point(nx + 0.5 * bd_patch_size, ny
						+ 0.5 * bd_patch_size), Scalar(cb, cg, cr), bdLineW);

				//each tile center as a KP
				im1kp.push_back(KeyPoint(Point2f(nx, ny), 5.0));
				im2kp.push_back(KeyPoint(Point2f(mx, my), 5.0));

				std::ostringstream q;
				q.precision(2);
				q << QueryMatchMap[k];

				//putText(image, q.str(), cvPoint(mx-0.25*patchsize,my-0.125*patchsize), FONT_HERSHEY_SIMPLEX, 0.5, cvScalar(0,0,0), 1.5, CV_AA);

				//and now set correspondence
				immatches.push_back(DMatch(tempc, tempc, 0.0));
				tempc++;
			}
		}
		drawMatches(image, im2kp, matchedim, im1kp, immatches, outimg,
				Scalar::all(-1));
		out_image.write(outimg);
	}

	/*=======================================================================*/
	/*************************************************************************/
	/*				corresponding tiles marked with the same color			 */
	/*************************************************************************/
	if (mode == 3 || mode==4) {
		std::cout << "mode:" << mode << " s" << BDMatchMap.size() << std::endl;
		//!!!!!!!!!!!!!!!!!!!
		//!!!!!!!!!!!!!!!!
		//int counter=0;
		Mat matchedim = cv::imread(path, -1);
		float nx, ny, mx, my;
		Mat outimg;
		std::vector<cv::KeyPoint> im1kp;
		std::vector<cv::KeyPoint> im2kp;
		std::vector<cv::DMatch> immatches;

		int k;
		for (int m = 0; m < BDMatchMap.size(); m++) {
			int cb = 0, cg = 0, cr = 255;
			int qLineW = -1, bdLineW = -1;
			cb = rand() % 255;
			cg = rand() % 255;
			cr = rand() % 255;
			if (BDMatchMap[m].size() > 0) {
				for (int n = 0; n < BDMatchMap[m].size(); n++) {
				
				
					k = BDMatchMap[m][n];
					fn = n;

					query_col = k % patches_cols;
					query_row = k / patches_cols;

					//std::cout<<query_col<<" "<<query_row<<" "<<k<<std::endl;

					//location of the center
					mx = patchsize * (query_col) + 0.5 * patchsize;
					my = patchsize * (query_row) + 0.5 * patchsize;

					//Is it not a bad match?
					if (DistanceMap[k][0] < 0.0 || DistanceMap[k][0] > threshold) {
						continue;
					}
					//counter++;
					//draw patches in query img
					rectangle(image, Point(mx - 0.5 * patchsize, my - 0.5
							* patchsize), Point(mx + 0.5 * patchsize, my + 0.5
							* patchsize), Scalar(cb, cg, cr), qLineW);
							
					std::ostringstream q;
					q.precision(2);
					q << QueryMatchMap[k];
					putText(image, q.str(), cvPoint(mx-0.25*patchsize,my-0.125*patchsize), FONT_HERSHEY_SIMPLEX, 0.3, cvScalar(0,0,0), 1.5, CV_AA);
							
				}

				nx = bd_patch_size * (m % bd_cols) + bd_patch_size * 0.5;
				ny = bd_patch_size * (m / bd_cols) + bd_patch_size * 0.5;

				//draw patches in BD img
				rectangle(matchedim, Point(nx - 0.5 * bd_patch_size, ny - 0.5
						* bd_patch_size), Point(nx + 0.5 * bd_patch_size, ny
						+ 0.5 * bd_patch_size), Scalar(cb, cg, cr), bdLineW);
			}
		}
		drawMatches(image, im2kp, matchedim, im1kp, immatches, outimg,
				Scalar::all(-1));

		out_image.write(outimg);
		//goodmatches_q.push_back(counter);
	}

	/*=======================================================================*/
	/*************************************************************************/
	/*			experimental mode to reconstruct image from matched patches	 */
	/*						  possible only when patches have the same size  */
	/*************************************************************************/
	//
	if ((mode == 1 && patchsize == bd_patch_size) || mode == 2) {

		int nx, ny, mx, my;
		//int fn;
		//int query_col;
		//int query_row;

		int ax, bx, ay, by;
		int fx;
		int r, g, b;
		Mat outimage(image_rows, image_cols, CV_8UC3,
				Scalar(0.0, 0.0, 0.0, 1.0));

		int counter = 0;
		int zzz = 0, flag = 0;

		//scan through all the files
		for (int l = 0; l < files_number; l++) {

			Mat matchedim = cv::imread(all_file_paths[l], -1);
			unsigned char *input = (unsigned char*) (matchedim.data);

			for (int k = 0; k < patches_rows * patches_cols; k++) {
				//matches only to the best image
				if (mode == 1) {
					flag = 0;
					//find if an image has some corresponging match on the list
					for (zzz = 0; zzz < queue_size; zzz++) {
						if (MatchedSourceForTile[k][zzz] == BestMatchingImage) {
							flag = 1;
							break;
						}
					}
				}
				//matches to all img in the DB
				if (flag || mode == 2) {

					query_col = k % patches_cols;
					query_row = k / patches_cols;
					mx = patchsize * (query_col);
					my = patchsize * (query_row);

					fn
							= PositionOfPatchesInImages[MatchedPatchInMatcher[k][zzz]];

					nx = bd_patch_size * (fn % bd_cols);
					ny = bd_patch_size * (fn / bd_cols);

					//if the tile is matched to the current image l
					if (MatchedSourceForTile[k][zzz] == l) {

						//then copy image pixel-by-pixel
						for (int ax = 0; ax < patchsize; ax++) {
							for (int ay = 0; ay < patchsize; ay++) {
								//outimage[mx+ax][mx+ay]=matchedim[nx+ax][nx+ay];
								//if ((ax+nx)>512||(ay+ny))
								b = matchedim.data[matchedim.step[0]
										* (ay + ny) + matchedim.step[1] * (ax
										+ nx)];
								g = matchedim.data[matchedim.step[0]
										* (ay + ny) + matchedim.step[1] * (ax
										+ nx) + 1];
								r = matchedim.data[matchedim.step[0]
										* (ay + ny) + matchedim.step[1] * (ax
										+ nx) + 2];

								outimage.data[outimage.step[0] * (my + ay)
										+ outimage.step[1] * (mx + ax) + 0] = b;
								outimage.data[outimage.step[0] * (my + ay)
										+ outimage.step[1] * (mx + ax) + 1] = g;
								outimage.data[outimage.step[0] * (my + ay)
										+ outimage.step[1] * (mx + ax) + 2] = r;

							}
						}
					}
				}
			}
		}

		std::cout << "done" << std::endl;
		out_image.write(outimage);
	}
	/*************************************************************************/

	std::cout << "raise" << std::endl;
	matched->raise();
}
Example #24
0
void CvFlann::onNewImage()
{
	CLOG(LTRACE) << "CvFlann::onNewImage\n";
	try {
		// Read input features.
		Types::Features features_1 = in_features0.read();
		Types::Features features_2 = in_features1.read();
		// Read input descriptors.
		cv::Mat img_1 = in_img0.read();
		cv::Mat img_2 = in_img1.read();
		// Read input images.
		cv::Mat descriptors_1 = in_descriptors0.read();
		cv::Mat descriptors_2 = in_descriptors1.read();

		// Matching descriptor vectors using FLANN matcher.
		FlannBasedMatcher matcher;
		std::vector< DMatch > matches;
		matcher.match( descriptors_1, descriptors_2, matches );

		if (distance_recalc) {
			double max_dist = 0;
			double min_dist = 100;
			//-- Quick calculation of max and min distances between keypoints.
			for( int i = 0; i < descriptors_1.rows; i++ )
			{
				double dist = matches[i].distance;
				if( dist < min_dist ) min_dist = dist;
				if( dist > max_dist ) max_dist = dist;
			}
			dist = 2*min_dist;
			CLOG(LINFO) << " Max dist : " << (double)max_dist;
			CLOG(LINFO) << " Min dist : " << (double)min_dist;
			CLOG(LINFO) << " Dist : " << (double)dist << std::endl;
		}

		//Draw only "good" matches (i.e. whose distance is less than 2*min_dist ).
		//PS.- radiusMatch can also be used here.
		std::vector< DMatch > good_matches;
		for( int i = 0; i < descriptors_1.rows; i++ )
		{
			if( matches[i].distance < dist )
				good_matches.push_back( matches[i]);
		}

		//-- Draw only "good" matches
		Mat img_matches;
		drawMatches( img_1, features_1.features, img_2, features_2.features,
				   good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
				   vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

		// Print stats.
		if (print_stats) {
			for( int i = 0; i < good_matches.size(); i++ )
			{
				CLOG(LINFO) << " Good Match [" << i <<"] Keypoint 1: " << good_matches[i].queryIdx << "  -- Keypoint 2: " << good_matches[i].trainIdx;
			}
			CLOG(LINFO) << std::endl;
		}


		// Write the result to the output.
		out_img.write(img_matches);
	} catch (...) {
		CLOG(LERROR) << "CvFlann::onNewImage failed\n";
	}
}
Example #25
0
int main()
{
	cv::Mat imCalibColor;	
	cv::Mat imCalibGray;	
	cv::vector<cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	cv::vector<cv::Point2f> pointQR;
	cv::Mat imCalibNext;
	cv::Mat imQR;
	cv::vector<cv::Mat> tabQR;
	/*cv::vector<cv::Point2f> corners1;
	cv::vector<cv::Point2f> corners2;
	cv::vector<cv::Point2f> corners3;
	cv::vector<cv::Point2f> corners4;
	cv::vector<cv::Point2f> corners5;*/

	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false;
	double k = 0.04;
	int maxCorners = 600;

	int A = 0, B= 0, C= 0;
	char key;
	int mark;
	bool patternFound = false;
	
	cv::VideoCapture vcap("../rsc/capture2.avi");

	for (int i = 1; i < 5; i++)
	{
		std::ostringstream oss;
		oss << "../rsc/QrCodes/QR" << i << ".jpg";
		imQR = cv::imread(oss.str());
		cv::cvtColor(imQR, imQR, CV_BGR2GRAY);
		std::cout<< "Bouh!!!!!!" << std::endl;
		tabQR.push_back(imQR);
	}

	do
	{
		while(imCalibColor.empty())
		{
			vcap >> imCalibColor;
		}
		vcap >> imCalibColor;

		cv::Mat edges(imCalibColor.size(),CV_MAKETYPE(imCalibColor.depth(), 1));
		cv::cvtColor(imCalibColor, imCalibGray, CV_BGR2GRAY);
		Canny(imCalibGray, edges, 100 , 200, 3);

		cv::findContours( edges, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
		
		cv::imshow("pointInteret", imCalibColor);

		mark = 0;

		cv::vector<cv::Moments> mu(contours.size());
  		cv::vector<cv::Point2f> mc(contours.size());

		for( int i = 0; i < contours.size(); i++ )
		{	
			mu[i] = moments( contours[i], false ); 
			mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
		}

		for( int i = 0; i < contours.size(); i++ )
		{
			int k=i;
			int c=0;

			while(hierarchy[k][2] != -1)
			{
				k = hierarchy[k][2] ;
				c = c+1;
			}
			if(hierarchy[k][2] != -1)
			c = c+1;

			if (c >= 5)
			{	
				if (mark == 0)		A = i;
				else if  (mark == 1)	B = i;		// i.e., A is already found, assign current contour to B
				else if  (mark == 2)	C = i;		// i.e., A and B are already found, assign current contour to C
				mark = mark + 1 ;
			}
		} 

		if (A !=0 && B !=0 && C!=0)
		{

			cv::Mat imagecropped = imCalibColor;
			cv::Rect ROI(280/*pointQR[0].x*/, 260/*pointQR[0].y*/, 253, 218);
			cv::Mat croppedRef(imagecropped, ROI);
			cv::cvtColor(croppedRef, imagecropped, CV_BGR2GRAY);
			cv::threshold(imagecropped, imagecropped, 180, 255, CV_THRESH_BINARY);

			pointQR.push_back(mc[A]);
			cv::circle(imCalibColor, cv::Point(pointQR[0].x, pointQR[0].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[B]);
			cv::circle(imCalibColor, cv::Point(pointQR[1].x, pointQR[1].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(mc[C]);
			cv::circle(imCalibColor, cv::Point(pointQR[2].x, pointQR[2].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			cv::Point2f D(0.0f,0.0f);
			cv::Point2f E(0.0f,0.0f);
			cv::Point2f F(0.0f,0.0f);

			D.x = (mc[A].x + mc[B].x)/2;
			E.x = (mc[B].x + mc[C].x)/2;
			F.x = (mc[C].x + mc[A].x)/2;

			D.y = (mc[A].y + mc[B].y)/2;
			E.y = (mc[B].y + mc[C].y)/2;
			F.y = (mc[C].y + mc[A].y)/2;

			pointQR.push_back(D);
			cv::circle(imCalibColor, cv::Point(pointQR[3].x, pointQR[3].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(E);
			cv::circle(imCalibColor, cv::Point(pointQR[4].x, pointQR[4].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);
			pointQR.push_back(F);
			cv::circle(imCalibColor, cv::Point(pointQR[5].x, pointQR[5].y), 3, cv::Scalar(0, 0, 255), 1, 8, 0);

			patternFound = true;
			std::cout << "patternfound" << std::endl;
			
			cv::SiftFeatureDetector detector;
			cv::vector<cv::KeyPoint> keypoints1, keypoints2;
			detector.detect(tabQR[3], keypoints1);
			detector.detect(imagecropped, keypoints2);

			cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("SIFT");
			cv::Mat descriptors1, descriptors2;
			descriptor->compute(tabQR[3], keypoints1, descriptors1 );
			descriptor->compute(imagecropped, keypoints2, descriptors2 );

			cv::FlannBasedMatcher matcher; 
			std::vector< cv::DMatch > matches; 
			matcher.match( descriptors1, descriptors2, matches ); 
			double max_dist = 0; double min_dist = 100;

			for( int i = 0; i < descriptors1.rows; i++ ) 
			{ 
				double dist = matches[i].distance; 
				if( dist < min_dist ) min_dist = dist; 
				if( dist > max_dist ) max_dist = dist; 
			}

			std::vector< cv::DMatch > good_matches;
			for( int i = 0; i < descriptors1.rows; i++ ) 
				if( matches[i].distance <= 2*min_dist ) 
					good_matches.push_back( matches[i]); 
			cv::Mat imgout; 
			drawMatches(tabQR[3], keypoints1, imagecropped, keypoints2, good_matches, imgout); 

			std::vector<cv::Point2f> pt_img1; 
			std::vector<cv::Point2f> pt_img2; 
			for( int i = 0; i < (int)good_matches.size(); i++ ) 
			{ 
				pt_img1.push_back(keypoints1[ good_matches[i].queryIdx ].pt ); 
				pt_img2.push_back(keypoints2[ good_matches[i].trainIdx ].pt ); 
			}
			cv::Mat H = findHomography( pt_img1, pt_img2, CV_RANSAC );

			cv::Mat result; 
			warpPerspective(tabQR[3],result,H,cv::Size(tabQR[3].cols+imagecropped.cols,tabQR[3].rows)); 
			cv::Mat half(result,cv::Rect(0,0,imagecropped.cols,imagecropped.rows)); 
			imagecropped.copyTo(half); 
			imshow( "Result", result );

			break;
		}

		key = (char)cv::waitKey(67);
	}while(patternFound != true && key != 27);

	if(patternFound)
		imCalibNext = imCalibColor;
	
	return patternFound;

}
Example #26
0
/* perform 2D SURF feature matching */
void match (Mat img_1, Mat img_2, vector<KeyPoint> keypoints_1,
    vector<KeyPoint> keypoints_2, vector<DMatch> &good_matches,
    pcl::CorrespondencesPtr &correspondences)
{
  SurfDescriptorExtractor extractor;
  Mat descriptors_1, descriptors_2;

  extractor.compute (img_1, keypoints_1, descriptors_1);
  extractor.compute (img_2, keypoints_2, descriptors_2);

  //FlannBasedMatcher matcher;
  BFMatcher matcher (NORM_L2);
  std::vector<DMatch> matches;

  matcher.match (descriptors_1, descriptors_2, matches);

  double max_dist = 0;
  double min_dist = 100;

  for (int i = 0; i < descriptors_1.rows; i++)
  {
    double dist = matches[i].distance;

    if (dist < min_dist)
      min_dist = dist;
    if (dist > max_dist)
      max_dist = dist;
  }

  for (int i = 0; i < descriptors_1.rows; i++)
  {
    // need to change the factor "2" to adapt to different cases
    if (matches[i].distance < 3 * min_dist)  //may adapt for changes
    {
      good_matches.push_back (matches[i]);
    }
  }

  correspondences->resize (good_matches.size ());

  for (unsigned cIdx = 0; cIdx < good_matches.size (); cIdx++)
  {
    (*correspondences)[cIdx].index_query = good_matches[cIdx].queryIdx;
    (*correspondences)[cIdx].index_match = good_matches[cIdx].trainIdx;

    if (0)  // for debugging
    {
      cout << good_matches[cIdx].queryIdx << " " << good_matches[cIdx].trainIdx
          << " " << good_matches[cIdx].distance << endl;
      cout << good_matches.size () << endl;
    }
  }

  // change the constant value of SHOW_MATCHING to 1 if you want to visulize the matching result
  if (SHOW_MATCHING)
  {
    Mat img_matches;
    drawMatches (img_1, keypoints_1, img_2, keypoints_2, good_matches,
        img_matches, Scalar::all (-1), Scalar::all (-1), vector<char> (),
        DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    //-- Show detected matches
    imshow ("Good Matches", img_matches);
    waitKey (0);
  }
}
Example #27
0
/** @function main */
int matchKeypoints( int argc, char** argv )
{
//  if( argc != 3 )
//  { readme(); return -1; }
  cv::initModule_nonfree();

  Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
  Codebook codebook;
  //codebook.readInCSV(string(argv[3]));

  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 15000;

  //SurfFeatureDetector detector( minHessian);
  SURF* detector = new SURF(minHessian,1,4,true,true);

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  assert(img_1.size[0]>0 && img_1.size[1]>0 && img_2.size[0]>0 && img_2.size[1]>0);
  
  (*detector)( img_1, Mat(), keypoints_1 );
  (*detector)( img_2, Mat(), keypoints_2 );
  
  Mat img_keypoints_1; Mat img_keypoints_2;
//  drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//  drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
  cvtColor(img_1,img_keypoints_1,CV_GRAY2RGB);
  for (KeyPoint k :keypoints_1)
  {
//      circle(img_keypoints_1,k.pt,k.size,Scalar(rand()%256,rand()%256,rand()%256));
//      cout<<k.size<<endl;
      Rect rec(k.pt.x-(k.size/2),k.pt.y-(k.size/2),k.size,k.size);
      rectangle(img_keypoints_1,rec,Scalar(rand()%256,rand()%256,rand()%256));
  }
  
  cvtColor(img_2,img_keypoints_2,CV_GRAY2RGB);
  for (KeyPoint k :keypoints_2)
  {
//      circle(img_keypoints_2,k.pt,k.size,Scalar(rand()%256,rand()%256,rand()%256));
      Rect rec(k.pt.x-(k.size/2),k.pt.y-(k.size/2),k.size,k.size);
      rectangle(img_keypoints_2,rec,Scalar(rand()%256,rand()%256,rand()%256));
  }
  
  
    //-- Show detected (drawn) keypoints
    imshow("Keypoints 1", img_keypoints_1 );
    imshow("Keypoints 2", img_keypoints_2 );
    waitKey(0);

  //-- Step 2: Calculate descriptors (feature vectors)
    //SurfDescriptorExtractor extractor;
  
    Mat descriptors_1, descriptors_2;
  
    detector->compute( img_1, keypoints_1, descriptors_1 );
    detector->compute( img_2, keypoints_2, descriptors_2 );
  
    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< std::vector< DMatch > > matches;
    matcher.knnMatch( descriptors_1, descriptors_2, matches, 10 );
  
    double max_dist = 0; double min_dist = 100;
  
    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < matches.size(); i++ )
    {
        for (int j=0; j < matches[i].size(); j++)
        {
            double dist = matches[i][j].distance;
            if( dist < min_dist ) min_dist = dist;
            if( dist > max_dist ) max_dist = dist;
        }
    }
  
    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );
  
    //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
    //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
    //-- small)
    //-- PS.- radiusMatch can also be used here.
    std::vector< DMatch > good_matches;
  
    for( int i = 0; i < matches.size(); i++ )
    {
        for (int j=0; j < matches[i].size(); j++)
            //if( matches[i][j].distance <= max(2*min_dist, 0.02) )
            if( matches[i][j].distance <= max((max_dist-min_dist)/4.0 + min_dist, 0.02) )
            { good_matches.push_back( matches[i][j]); }
            else
                printf("discard(%d,%d)\n",i,j);
    }
  
    //-- Draw only "good" matches
    Mat img_matches;
    drawMatches( img_1, keypoints_1, img_2, keypoints_2,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
  
    //-- Show detected matches
    imshow( ".... Matches", img_matches );
  
    for( int i = 0; i < (int)good_matches.size(); i++ )
    { printf( "-- .... Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
  
    waitKey(0);
    
//    vector<Point2f> corners;
//      double qualityLevel = 0.01;
//      double minDistance = 10;
//      int blockSize = 3;
//      bool useHarrisDetector = false;
//      double k = 0.04;
//      int maxCorners = 23;
//      int maxTrackbar = 100;
//    goodFeaturesToTrack( src_gray,
//                   corners,
//                   maxCorners,
//                   qualityLevel,
//                   minDistance,
//                   Mat(),
//                   blockSize,
//                   useHarrisDetector,
//                   k );
    
  return 0;
  }
Example #28
0
void TORecognize::onNewImage()
{
	CLOG(LTRACE) << "onNewImage";
	try {


		// Change keypoint detector and descriptor extractor types (if required).
		setKeypointDetector();

		setDescriptorExtractor();

		// Re-load the model - extract features from model.
		loadModels();

		std::vector<KeyPoint> scene_keypoints;
		cv::Mat scene_descriptors;
		std::vector< DMatch > matches;

		// Clear vectors! ;)
		recognized_names.clear();
		recognized_centers.clear();
		recognized_corners.clear();
		recognized_scores.clear();


		// Load image containing the scene.
		cv::Mat scene_img = in_img.read();



		// Extract features from scene.
		extractFeatures(scene_img, scene_keypoints, scene_descriptors);
		CLOG(LINFO) << "Scene features: " << scene_keypoints.size();

		// Check model.
		for (unsigned int m=0; m < models_imgs.size(); m++) {
			CLOG(LDEBUG) << "Trying to recognize model (" << m <<"): " << models_names[m];

			if ((models_keypoints[m]).size() == 0) {
				CLOG(LWARNING) << "Model not valid. Please load model that contain texture";
				return;
			}//: if

			CLOG(LDEBUG) << "Model features: " << models_keypoints[m].size();

			// Change matcher type (if required).
			setDescriptorMatcher();

			// Find matches.
			matcher->match( models_descriptors[m], scene_descriptors, matches );

			CLOG(LDEBUG) << "Matches found: " << matches.size();

			if (m == prop_returned_model_number) {
				// Draw all found matches.
				Mat img_matches1;
				drawMatches( models_imgs[m], models_keypoints[m], scene_img, scene_keypoints,
					     matches, img_matches1, Scalar::all(-1), Scalar::all(-1),
					     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
				out_img_all_correspondences.write(img_matches1);
			}//: if


			// Filtering.
			double max_dist = 0;
			double min_dist = 100;

			//-- Quick calculation of max and min distances between keypoints
			for( int i = 0; i < matches.size(); i++ ) {
				double dist = matches[i].distance;
				if( dist < min_dist ) min_dist = dist;
				if( dist > max_dist ) max_dist = dist;
			}//: for

			CLOG(LDEBUG) << "Max dist : " << max_dist;
			CLOG(LDEBUG) << "Min dist : " << min_dist;

			//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
			std::vector< DMatch > good_matches;

			for( int i = 0; i < matches.size(); i++ ) {
				if( matches[i].distance < 3*min_dist )
					good_matches.push_back( matches[i]);
			}//: for

			CLOG(LDEBUG) << "Good matches: " << good_matches.size();

			// Localize the object
			std::vector<Point2f> obj;
			std::vector<Point2f> scene;

			// Get the keypoints from the good matches.
			for( int i = 0; i < good_matches.size(); i++ ) {
			  obj.push_back( models_keypoints [m] [ good_matches[i].queryIdx ].pt );
			  scene.push_back( scene_keypoints [ good_matches[i].trainIdx ].pt );
			}//: for

			// Find homography between corresponding points.
			Mat H = findHomography( obj, scene, CV_RANSAC );

			// Get the corners from the detected "object hypothesis".
			std::vector<Point2f> obj_corners(4);
			obj_corners[0] = cv::Point2f(0,0);
			obj_corners[1] = cv::Point2f( models_imgs[m].cols, 0 );
			obj_corners[2] = cv::Point2f( models_imgs[m].cols, models_imgs[m].rows );
			obj_corners[3] = cv::Point2f( 0, models_imgs[m].rows );
			std::vector<Point2f> hypobj_corners(4);

			// Transform corners with found homography.
			perspectiveTransform( obj_corners, hypobj_corners, H);

			// Verification: check resulting shape of object hypothesis.
			// Compute "center of mass".
			cv::Point2f center = (hypobj_corners[0] + hypobj_corners[1] + hypobj_corners[2] + hypobj_corners[3])*.25;
			std::vector<double> angles(4);
			cv::Point2f tmp ;
			// Compute angles.
			for (int i=0; i<4; i++) {
				tmp = (hypobj_corners[i] - center);
				angles[i] = atan2(tmp.y,tmp.x);
				CLOG(LDEBUG)<< tmp << " angle["<<i<<"] = "<< angles[i];
			}//: if


			// Find smallest element.
			int imin = -1;
			double amin = 1000;
			for (int i=0; i<4; i++)
				if (amin > angles[i]) {
					amin = angles[i];
					imin = i;
				}//: if

			// Reorder table.
			for (int i=0; i<imin; i++) {
				angles.push_back (angles[0]);
				angles.erase(angles.begin());
			}//: for

			for (int i=0; i<4; i++) {
				CLOG(LDEBUG)<< "reordered angle["<<i<<"] = "<< angles[i];
			}//: if

			cv::Scalar colour;
			double score = (double)good_matches.size()/models_keypoints [m].size();
			// Check dependency between corners.
			if ((angles[0] < angles[1]) && (angles[1] < angles[2]) && (angles[2] < angles[3])) {
				// Order is ok.
				colour = Scalar(0, 255, 0);
				CLOG(LINFO)<< "Model ("<<m<<"): keypoints "<< models_keypoints [m].size()<<" corrs = "<< good_matches.size() <<" score "<< score << " VALID";
				// Store the model in a list in proper order.
				storeObjectHypothesis(models_names[m], center, hypobj_corners, score);

			} else {
				// Hypothesis not valid.
				colour = Scalar(0, 0, 255);
				CLOG(LINFO)<< "Model ("<<m<<"): keypoints "<< models_keypoints [m].size()<<" corrs = "<< good_matches.size() <<" score "<< score << " REJECTED";
			}//: else


			if (m == prop_returned_model_number) {
				Mat img_matches2;
				// Draw good matches.
				drawMatches( models_imgs[m], models_keypoints[m], scene_img, scene_keypoints,
					     good_matches, img_matches2, Scalar::all(-1), Scalar::all(-1),
					     vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
				// Draw the object as lines, with center and top left corner indicated.
				line( img_matches2, hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), hypobj_corners[1] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[1] + Point2f( models_imgs[m].cols, 0), hypobj_corners[2] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[2] + Point2f( models_imgs[m].cols, 0), hypobj_corners[3] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				line( img_matches2, hypobj_corners[3] + Point2f( models_imgs[m].cols, 0), hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), colour, 4 );
				circle( img_matches2, center + Point2f( models_imgs[m].cols, 0), 2, colour, 4);
				circle( img_matches2, hypobj_corners[0] + Point2f( models_imgs[m].cols, 0), 2, Scalar(255, 0, 0), 4);
				out_img_good_correspondences.write(img_matches2);

			}//: if
		}//: for

		Mat img_object = scene_img.clone();
		if (recognized_names.size() == 0) {
			CLOG(LWARNING)<< "None of the models was not properly recognized in the image";
		} else {

			for (int h=0; h<recognized_names.size(); h++) {
				// Draw the final object - as lines, with center and top left corner indicated.
				line( img_object, recognized_corners[h][0], recognized_corners[h][1], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][1], recognized_corners[h][2], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][2], recognized_corners[h][3], Scalar(0, 255, 0), 4 );
				line( img_object, recognized_corners[h][3], recognized_corners[h][0], Scalar(0, 255, 0), 4 );
				circle( img_object, recognized_centers[h], 2, Scalar(0, 255, 0), 4);
				circle( img_object, recognized_corners[h][0], 2, Scalar(255, 0, 0), 4);
				CLOG(LNOTICE)<< "Hypothesis (): model: "<< recognized_names[h]<< " score: "<< recognized_scores[h];
			}//: for
		}//: else
		// Write image to port.
		out_img_object.write(img_object);

	} catch (...) {
		CLOG(LERROR) << "onNewImage failed";
	}//: catch
}
bool CustomPattern::findPatternPass(const Mat& image, vector<Point2f>& matched_features, vector<Point3f>& pattern_points,
                                    Mat& H, vector<Point2f>& scene_corners, const double pratio, const double proj_error,
                                    const bool refine_position, const Mat& mask, OutputArray output)
{
    if (!initialized) {return false; }
    matched_features.clear();
    pattern_points.clear();

    vector<vector<DMatch> > matches;
    vector<KeyPoint> f_keypoints;
    Mat f_descriptor;

    detector->detect(image, f_keypoints, mask);
    if (refine_position) refineKeypointsPos(image, f_keypoints);

    descriptorExtractor->compute(image, f_keypoints, f_descriptor);
    descriptorMatcher->knnMatch(f_descriptor, descriptor, matches, 2); // k = 2;
    vector<DMatch> good_matches;
    vector<Point2f> obj_points;

    for(int i = 0; i < f_descriptor.rows; ++i)
    {
        if(matches[i][0].distance < pratio * matches[i][1].distance)
        {
            const DMatch& dm = matches[i][0];
            good_matches.push_back(dm);
            // "keypoints1[matches[i].queryIdx] has a corresponding point in keypoints2[matches[i].trainIdx]"
            matched_features.push_back(f_keypoints[dm.queryIdx].pt);
            pattern_points.push_back(points3d[dm.trainIdx]);
            obj_points.push_back(keypoints[dm.trainIdx].pt);
        }
    }

    if (good_matches.size() < MIN_POINTS_FOR_H) return false;

    Mat h_mask;
    H = findHomography(obj_points, matched_features, RANSAC, proj_error, h_mask);
    if (H.empty())
    {
        // cout << "findHomography() returned empty Mat." << endl;
        return false;
    }

    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(!h_mask.data[i])
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (good_matches.empty()) return false;

    uint numb_elem = good_matches.size();
    check_matches(matched_features, obj_points, good_matches, pattern_points, H);
    if (good_matches.empty() || numb_elem < good_matches.size()) return false;

    // Get the corners from the image
    scene_corners = vector<Point2f>(4);
    perspectiveTransform(obj_corners, scene_corners, H);

    // Check correctnes of H
    // Is it a convex hull?
    bool cConvex = isContourConvex(scene_corners);
    if (!cConvex) return false;

    // Is the hull too large or small?
    double scene_area = contourArea(scene_corners);
    if (scene_area < MIN_CONTOUR_AREA_PX) return false;
    double ratio = scene_area/img_roi.size().area();
    if ((ratio < MIN_CONTOUR_AREA_RATIO) ||
        (ratio > MAX_CONTOUR_AREA_RATIO)) return false;

    // Is any of the projected points outside the hull?
    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(pointPolygonTest(scene_corners, f_keypoints[good_matches[i].queryIdx].pt, false) < 0)
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (output.needed())
    {
        Mat out;
        drawMatches(image, f_keypoints, img_roi, keypoints, good_matches, out);
        // Draw lines between the corners (the mapped object in the scene - image_2 )
        line(out, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2);
        line(out, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 2);
        line(out, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 2);
        line(out, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 2);
        out.copyTo(output);
    }

    return (!good_matches.empty()); // return true if there are enough good matches
}
Example #30
0
/**
 * @function main
 * @brief Main function
 */
int SURF_main(Mat img_scene, Mat img_object)
{
  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
  printf("Coming to SURF");
  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );
  
  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }  

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, 
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 


  //-- Localize the object from img_1 in img_2 
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( int i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); 
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);

   
  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );

  waitKey(1);

  return 0;
}