Example #1
0
int main(int argc, char* argv[])
{
	cv::Mat cvmGray1 = imread("rgb0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
	cv::Mat cvmGray2 = imread("rgb1.bmp", CV_LOAD_IMAGE_GRAYSCALE);

	SURF surf(100,4,2,false,true);
	// detecting keypoints & computing descriptors
	FREAK* pFreak = new FREAK(); 

	vector<KeyPoint> vKeypoints1; 
	vector<KeyPoint> vKeypoints2; 

	Mat cvmDescriptor1;
	Mat cvmDescriptor2;

	vector<DMatch> vMatches;
	BruteForceMatcher<HammingLUT> matcher;  


	
	surf(cvmGray1, cv::Mat(), vKeypoints1);
	pFreak->compute( cvmGray1, vKeypoints1, cvmDescriptor1 );

	surf(cvmGray2, cv::Mat(), vKeypoints2);
	double t = (double)getTickCount();

	pFreak->compute( cvmGray2, vKeypoints2, cvmDescriptor2 );
	t = ((double)getTickCount() - t)/getTickFrequency();

	matcher.match(cvmDescriptor1, cvmDescriptor2, vMatches);  

	

	std::cout << "whole time [s]: " << t << std::endl;	
    sort (vMatches.begin(), vMatches.end(), sort_pred);
    vector<DMatch> closest;

    int nSize = (int)vMatches.size();//>300?300:matches.size();
    cout << "matched point pairs: " << nSize << endl;
	for( int i=0;i < 100;i++) {
        closest.push_back( vMatches[i] );
        cout << vMatches[i].distance << " ";
    }
    // drawing the results
    Mat cvmImgMatches;

	cout << "FOUND " << vKeypoints1.size() << " keypoints on first image" << endl;
	cout << "FOUND " << vKeypoints2.size() << " keypoints on second image" << endl;

    cv::drawMatches( cvmGray1, vKeypoints1, cvmGray2, vKeypoints2, closest, cvmImgMatches);
    
    namedWindow("matches", 0);
    imshow("matches", cvmImgMatches);
    waitKey(0);

    return 0;
}
Example #2
0
int main( int argc, char** argv )
{
  if( argc != 3 )
  { return -1; }

  Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
  
  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }


Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);

// detecting keypoints
FastFeatureDetector detector(15);
vector<KeyPoint> keypoints1;
detector.detect(img1, keypoints1);

// do the same for the second image
FastFeatureDetector detector2(15);
vector<KeyPoint> keypoints2;
detector2.detect(img2, keypoints2);

// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1;
extractor.compute(img1, keypoints1, descriptors1);

// process keypoints from the second image as well
SurfDescriptorExtractor extractor2;
Mat descriptors2;
extractor2.compute(img2, keypoints2, descriptors2);


// matching descriptors
BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);


// drawing the results
namedWindow("matches", 1);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
imshow("matches", img_matches);
waitKey(0);

}
//-----------------------------------【main( )函数】--------------------------------------------
//		描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main(  )
{
	//【0】改变console字体颜色
	system("color 1F"); 

	//【0】显示欢迎和帮助文字
	ShowHelpText( );

	//【1】载入素材图
	Mat srcImage1 = imread("1.jpg",1);
	Mat srcImage2 = imread("2.jpg",1);
	if( !srcImage1.data || !srcImage2.data )
	{ printf("读取图片错误,请确定目录下是否有imread函数指定的图片存在~! \n"); return false; }  

	//【2】使用SURF算子检测关键点
	int minHessian = 700;//SURF算法中的hessian阈值
	SurfFeatureDetector detector( minHessian );//定义一个SurfFeatureDetector(SURF) 特征检测类对象  
	std::vector<KeyPoint> keyPoint1, keyPoints2;//vector模板类,存放任意类型的动态数组

	//【3】调用detect函数检测出SURF特征关键点,保存在vector容器中
	detector.detect( srcImage1, keyPoint1 );
	detector.detect( srcImage2, keyPoints2 );

	//【4】计算描述符(特征向量)
	SurfDescriptorExtractor extractor;
	Mat descriptors1, descriptors2;
	extractor.compute( srcImage1, keyPoint1, descriptors1 );
	extractor.compute( srcImage2, keyPoints2, descriptors2 );

	//【5】使用BruteForce进行匹配
	// 实例化一个匹配器
	BruteForceMatcher< L2<float> > matcher;
	std::vector< DMatch > matches;
	//匹配两幅图中的描述子(descriptors)
	matcher.match( descriptors1, descriptors2, matches );

	//【6】绘制从两个图像中匹配出的关键点
	Mat imgMatches;
	drawMatches( srcImage1, keyPoint1, srcImage2, keyPoints2, matches, imgMatches );//进行绘制

	//【7】显示效果图
	imshow("匹配图", imgMatches );

	waitKey(0);
	return 0;
}
Example #4
0
/**
 * @function main
 * @brief Main function
 */
int main( int argc, char** argv )
{
  if( argc != 3 )
  { return -1; }

  Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );

  if( !img_1.data || !img_2.data )
  { return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors with a brute force matcher
  BruteForceMatcher< L2<float> > matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  //-- Draw matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );

  //-- Show detected matches
  imshow("Matches", img_matches );

  waitKey(0);

  return 0;
}
/*
 * Test Calonder classifier to match keypoints on given image:
 *      classifierFilename - name of file from which classifier will be read,
 *      imgFilename - test image filename.
 *
 * To calculate keypoint descriptors you may use RTreeClassifier class (as to train),
 * but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
 * RTreeClassifier.
 */
void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
    Mat img1 = imread( imgFilename, CV_LOAD_IMAGE_GRAYSCALE ), img2, H12;
    if( img1.empty() )
    {
        cout << "Test image can not be read." << endl;
        exit(-1);
    }
    warpPerspectiveRand( img1, img2, H12, theRNG() );

    // Exstract keypoints from test images
    SurfFeatureDetector detector;
    vector<KeyPoint> keypoints1; detector.detect( img1, keypoints1 );
    vector<KeyPoint> keypoints2; detector.detect( img2, keypoints2 );

    // Compute descriptors
    CalonderDescriptorExtractor<float> de( classifierFilename );
    Mat descriptors1;  de.compute( img1, keypoints1, descriptors1 );
    Mat descriptors2;  de.compute( img2, keypoints2, descriptors2 );

    // Match descriptors
    BruteForceMatcher<L1<float> > matcher;
    vector<DMatch> matches;
    matcher.match( descriptors1, descriptors2, matches );

    // Prepare inlier mask
    vector<char> matchesMask( matches.size(), 0 );
    vector<Point2f> points1; KeyPoint::convert( keypoints1, points1 );
    vector<Point2f> points2; KeyPoint::convert( keypoints2, points2 );
    Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
    for( size_t mi = 0; mi < matches.size(); mi++ )
    {
        if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
            matchesMask[mi] = 1;
    }

    // Draw
    Mat drawImg;
    drawMatches( img1, keypoints1, img2, keypoints2, matches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask );
    string winName = "Matches";
    namedWindow( winName, WINDOW_AUTOSIZE );
    imshow( winName, drawImg );
    waitKey();
}
int main(int argc, char** argv)
{
	if(argc != 3)
	{
		help();
		return -1;
	}

	Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
	if(img1.empty() || img2.empty())
	{
		printf("Can't read one of the images\n");
		return -1;
	}

	// detecting keypoints
	SurfFeatureDetector detector(400);
	vector<KeyPoint> keypoints1, keypoints2;
	detector.detect(img1, keypoints1);
	detector.detect(img2, keypoints2);

	// computing descriptors
	SurfDescriptorExtractor extractor;
	Mat descriptors1, descriptors2;
	extractor.compute(img1, keypoints1, descriptors1);
	extractor.compute(img2, keypoints2, descriptors2);

	// matching descriptors
	BruteForceMatcher<L2<float> > matcher;
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);

	// drawing the results
	namedWindow("matches", 1);
	Mat img_matches;
	drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
	imshow("matches", img_matches);
	waitKey(0);

	return 0;
}
void MatchingCountTest(vector<KeyPoint> &referenceKeyPoints, Mat &referenceDescriptors, 
	vector<KeyPoint> &transformedKeyPoints ,Mat &transformedDescriptors, struct ImageData &imageData, 
	vector<struct Data>& result, int width, int height)
{
	BruteForceMatcher<Hamming> matcher; 
	std::vector<DMatch> matches, matches_opp, matches_best;
	int w = width*imageData.scale;
	int h = height*imageData.scale;
	if(transformedDescriptors.rows>0){
		matcher.match(referenceDescriptors, transformedDescriptors, matches);
		matcher.match(transformedDescriptors, referenceDescriptors, matches_opp);
		gcFilterMatches(matches, matches_opp, matches_best, referenceKeyPoints, transformedKeyPoints);			

		float rotatedWidth = sqrt((double)w * (double)w + h * h);

		CvPoint center;
		center.x = center.y = rotatedWidth / 2.0;
		if(matches_best.size()>0)
			countCorrectMatch(referenceKeyPoints,referenceDescriptors,transformedKeyPoints,imageData,result,width,height,center,transformedDescriptors, matches_best);
	}
}
Example #8
0
File: sift.cpp Project: KeiUe/cpp
// パノラマ合成
Mat panorama(Mat src1, Mat src2, int width, int height)
{
	// SIFT特徴点の検出と特徴量の計算
	Mat gray1, gray2, des1, des2;
	SiftFeatureDetector detector(2000);
	SiftDescriptorExtractor extrator;
	vector<KeyPoint> kps1, kps2;
	cvtColor(src1, gray1, CV_BGR2GRAY);
	cvtColor(src2, gray2, CV_BGR2GRAY);
	detector.detect(gray1, kps1);
	detector.detect(gray2, kps2);
	extrator.compute(gray1, kps1, des1);
	extrator.compute(gray2, kps2, des2);

	// 特徴点の対応付け
	vector<DMatch> matches;
	BruteForceMatcher< L2<float> > matcher;
	matcher.match(des1, des2, matches);
	vector<Vec2f> pts1(matches.size());
	vector<Vec2f> pts2(matches.size());

	// ホモグラフィの計算
	for (size_t i = 0; i < matches.size(); ++i){
		pts1[i][0] = kps1[matches[i].queryIdx].pt.x;
		pts1[i][1] = kps1[matches[i].queryIdx].pt.y;
		pts2[i][0] = kps2[matches[i].trainIdx].pt.x;
		pts2[i][1] = kps2[matches[i].trainIdx].pt.y;
	}
	Mat H = findHomography(pts1, pts2, CV_RANSAC);

	// ホモグラフィ行列Hを用いてパノラマ合成
	Mat dst;
	warpPerspective(src1, dst, H, Size(width, height));
	for (int y = 0; y < src1.rows; y++){
		for (int x = 0; x < src1.cols; x++){
			dst.at<Vec3b>(y, x) = src2.at<Vec3b>(y, x);
		}
	}
	return dst;
}
pair<vector<Point2f>, vector<Point2f> > computeMatching(Mat &img1, Mat &img2, vector<KeyPoint> &keypoints1, vector<KeyPoint> &keypoints2)
{
    SiftDescriptorExtractor extractor;
    Mat descriptors1, descriptors2;
    extractor.compute(img1, keypoints1, descriptors1);
    extractor.compute(img2, keypoints2, descriptors2);
    BruteForceMatcher<L2<float> > matcher;
    vector<DMatch> matches1_2, matches2_1;
    matcher.match(descriptors1, descriptors2, matches1_2);
    matcher.match(descriptors2, descriptors1, matches2_1);
    pair<vector<Point2f>, vector<Point2f> > matches;
    vector<DMatch> dmatchFiltrado;
    double maxDistance = 90;
    for (uint i=0; i < matches1_2.size(); i++) {
        if (matches1_2[i].distance > maxDistance) {
            continue;
        }
        pair<Point2f, Point2f> match1_2 = pair<Point2f, Point2f>(keypoints1[matches1_2[i].queryIdx].pt, keypoints2[matches1_2[i].trainIdx].pt);
        for (uint j=0; j < matches2_1.size(); j++) {
            if (matches2_1[j].distance > maxDistance) {
                continue;
            }
            pair<Point2f, Point2f> match2_1 = pair<Point2f, Point2f>(keypoints1[matches2_1[j].trainIdx].pt, keypoints2[matches2_1[j].queryIdx].pt);
            if (match1_2.first == match2_1.first && match1_2.second == match2_1.second) {
                if (dmatchFiltrado.empty() || (matches.first.back() != match1_2.first || matches.second.back() != match1_2.second)) {
                    dmatchFiltrado.push_back(matches1_2[i]);
                    matches.first.push_back(match1_2.first);
                    matches.second.push_back(match1_2.second);
                }
            }
        }
    }

    Mat img3;
    drawMatches(img1, keypoints1, img2, keypoints2, dmatchFiltrado, img3);
    imshow("Correspondencias", img3);
    waitKey();
    destroyWindow("Correspondencias");
    return matches;
}
Example #10
0
vector< DMatch > Panorama::matchingGoodPoint(Mat descriptors1, Mat descriptors2){
    
    cout << "Matching descriptors..." << endl;
	BruteForceMatcher<L2<float> > matcher;
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);
    
    cout << "Total Matches: " << descriptors1.rows << endl;

    double max_dist = 0; double min_dist = 100;
    
    cout << "Eliminate Bad Matches..." << endl;
    // Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors1.rows; i++ )
    { double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    
//    printf("-- Max dist : %f \n", max_dist );
//    printf("-- Min dist : %f \n", min_dist );
    
    // Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
    vector< DMatch > good_matches;
    int count = 0;
    for( int i = 0; i < descriptors1.rows; i++ )
    { 
        if( matches[i].distance < 2*min_dist )
        {
            good_matches.push_back( matches[i]); 
            count++;
        }
    }
    
    cout << "Good Matches: " << count << endl;

    return good_matches;
}
int main( int argc, char** argv )
{
	if (argc <= 1) {
		cout << "USAGE: " << argv[0] << " <image1> [image2]" << endl;
		return 0;
	}
	
	Mat img_1 = imread( argv[1] );
		
	//-- Step 1: Detect the keypoints using SURF Detector
	int minHessian = 400;
	
	SurfFeatureDetector detector( minHessian );
	std::vector<KeyPoint> keypoints_1;
	detector.detect( img_1, keypoints_1 );

	Mat img_keypoints;

	if (argc == 2) {
		
		drawKeypoints( img_1, keypoints_1, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
		
	} else if (argc == 3) {
		Mat img_2 = imread(argv[2]);
		
		vector<KeyPoint> keypoints_2;
		detector.detect( img_2, keypoints_2 );

		SurfDescriptorExtractor extractor(48, 12, true);
		Mat descriptors_1,descriptors_2;
		extractor.compute(img_1, keypoints_1, descriptors_1);
		extractor.compute(img_2, keypoints_2, descriptors_2);
		
		BruteForceMatcher<L2<float> > matcher;
		vector<DMatch> matches;
		matcher.match(descriptors_1, descriptors_2, matches);
		
		vector<Point2f> pts1,pts2;
		for (unsigned int i=0; i<matches.size(); i++) {
			pts1.push_back(keypoints_1[matches[i].queryIdx].pt);
			pts2.push_back(keypoints_2[matches[i].trainIdx].pt);
		}
		vector<uchar> status;
		Mat F = findFundamentalMat(pts1, pts2, FM_RANSAC, 0.5, 0.99, status);

		cout << "F " << F << endl;

		vector<KeyPoint> kpts1,kpts2;
		vector<DMatch> Fmatches;
		for (unsigned int i=0; i<pts1.size(); i++) {
			if (status[i]) {
				cout << "Fmatch " << i << endl;
				Fmatches.push_back(DMatch(kpts1.size(),kpts1.size(),1));
				kpts1.push_back(KeyPoint(pts1[i],1));
				kpts2.push_back(KeyPoint(pts2[i],1));
			}
		}
		
		drawMatches(img_1, kpts1, img_2, kpts2, Fmatches, img_keypoints, Scalar::all(-1), Scalar::all(-1));
	}

	
	
	imwrite(std::string(basename(argv[1])) + "_keypoints.jpg", img_keypoints);
	
	return 0;
}
void CRenderCenterDlg::OnBnClickedTarget()
{
	// TODO: 在此添加控件通知处理程序代码
	if(!imgtarget.data)
	{
		MessageBox(TEXT("no target image loaded!"),TEXT("error"),MB_OK);
		_Global_Obj_Ptr->OnBnClickedStop();
		return;
	}
	SiftFeatureDetector siftdtc;
	vector<KeyPoint>kp1,kp2;
	siftdtc.detect(imgtarget,kp1);
	siftdtc.detect(imgfusion,kp2);

	SiftDescriptorExtractor extractor;
	Mat descriptor1,descriptor2;
	extractor.compute(imgtarget,kp1,descriptor1);
	extractor.compute(imgfusion,kp2,descriptor2);

	BruteForceMatcher<L2<float>> matcher;
	vector<DMatch> matches;
	matcher.match(descriptor1,descriptor2,matches);

	int i,j;
	int pointcount=(int)matches.size();
	Mat point1(pointcount,2,CV_32F);
	Mat point2(pointcount,2,CV_32F);
	Point2f point;
	for(i=0;i<pointcount;i++)
	{
		point=kp1[matches[i].queryIdx].pt;
		point1.at<float>(i,0)=point.x;
		point1.at<float>(i,1)=point.y;

		point=kp2[matches[i].trainIdx].pt;
		point2.at<float>(i,0)=point.x;
		point2.at<float>(i,1)=point.y;
	}
	Mat m_fundamental;
	vector<uchar> m_ransacstatus;
	m_fundamental=findFundamentalMat(point1,point2,m_ransacstatus,FM_RANSAC);

	float hhh[9];
	for(i=0;i<9;i++)
		hhh[i]=0;

	for(i=0;i<3;i++)
	{
		for(j=0;j<3;j++)
		{
			hhh[i*3+j]=m_fundamental.ptr<float>(i)[j];
		}
	}
	int outlinercount=0;
	for(i=0;i<pointcount;i++)
	{
		if(m_ransacstatus[i]==0)
		{
			outlinercount++;
		}
	}

	vector<Point2f> m_leftinliner;
	vector<Point2f> m_rightinliner;
	vector<DMatch> m_inlinermatches;
	int inlinercount=pointcount-outlinercount;
	m_inlinermatches.resize(inlinercount);
	m_leftinliner.resize(inlinercount);
	m_rightinliner.resize(inlinercount);
	inlinercount=0;
	for(i=0;i<pointcount;i++)
	{
		if(m_ransacstatus[i]!=0)
		{
			m_leftinliner[inlinercount].x=point1.at<float>(i,0);
			m_leftinliner[inlinercount].y=point1.at<float>(i,1);
			m_rightinliner[inlinercount].x=point2.at<float>(i,0);
			m_rightinliner[inlinercount].y=point2.at<float>(i,1);
			m_inlinermatches[inlinercount].queryIdx=inlinercount;
			m_inlinermatches[inlinercount].trainIdx=inlinercount;
			inlinercount++;
		}
	}
	vector<KeyPoint> key1(inlinercount);
	vector<KeyPoint> key2(inlinercount);
	KeyPoint::convert(m_leftinliner,key1);
	KeyPoint::convert(m_rightinliner,key2);

	Mat H=findHomography(m_leftinliner,m_rightinliner,CV_RANSAC);
	std::vector<Point2f> obj_corners(4);
    obj_corners[0]=cv::Point(0,0);obj_corners[1]=cv::Point(imgtarget.cols,0);
    obj_corners[2]=cv::Point(imgtarget.cols,imgtarget.rows);obj_corners[3]=cv::Point(0,imgtarget.rows);
	std::vector<Point2f>scene_corners(4);
	perspectiveTransform(obj_corners,scene_corners,H);
	rectangle(imgfusion,scene_corners[0],scene_corners[2],Scalar(0,0,255,0),1,8,0);


	CWnd *pWnd=GetDlgItem(IDC_IMGFUSION);
	CDC *pDC=pWnd->GetDC();
	HDC hDC=pDC->GetSafeHdc();
	IplImage img=imgfusion;
	CvvImage cimg;
	cimg.CopyOf(&img);
	CRect rect;
	GetDlgItem(IDC_IMGFUSION)->GetClientRect(&rect);
	cimg.DrawToHDC(hDC,&rect);
}
Example #13
0
void Marker::match(vector<KeyPoint> inputKeypoints,Mat inputDescriptors,Mat inputImage){
    BruteForceMatcher<L2<float> > matcher;
    vector<DMatch> matches;
    //Calculate the descriptors of the marker
    calculate();
    //Match the marker with the input image
    matcher.match(descriptors,inputDescriptors,matches);
    //Extract pairs of points
    vector<int> pairMarkerKPs(matches.size()), pairInputKPs(matches.size());
    for( size_t i = 0; i < matches.size(); i++ ){
        pairMarkerKPs[i] = matches[i].queryIdx;
        pairInputKPs[i] = matches[i].trainIdx;
    }
    //Converts the keypoints to Point2f vectors
    vector<Point2f> markerPoints;
    vector<Point2f> inputPoints;
    KeyPoint::convert(keypoints,markerPoints,pairMarkerKPs);
    KeyPoint::convert(inputKeypoints,inputPoints,pairInputKPs);
    //Matched pairs of 2D points. Those pairs will be used to calculate homography
    Mat marker2Dfeatures;
    Mat input2Dfeatures;
    Mat(markerPoints).copyTo(marker2Dfeatures);
    Mat(inputPoints).copyTo(input2Dfeatures);
    //Calculates the homography
    vector<uchar> outlierMask;
    Mat H;
    H = findHomography(marker2Dfeatures,input2Dfeatures,outlierMask,RANSAC,3);
    //Create the mask to calc the extreme points
    int width=image.size().width;
    int height=image.size().height;
    Mat mask = Mat(height,width,CV_8UC1,0.0);
    //Draw a white point in the mask to detect the marker in the input image
    switch(cardinal){
        case 0:         //NW
            mask.at<uchar>(height-1,0)=255;
        break;
        case 1:         //NE
            mask.at<uchar>(height-1,width-1)=255;
        break;
        case 2:         //SE
            mask.at<uchar>(0,width-1)=255;
        break;
        case 3:         //SW
            mask.at<uchar>(0,0)=255;
        break;
    }
    //Detect the mask in the inputImage image using the homography
    Mat maskedImage;
    warpPerspective(mask,maskedImage,H,inputImage.size(),INTER_LINEAR,BORDER_CONSTANT);
    //FIXIT: Get the pixel with maximum value
    //Find the point in the maskedImage
    Point2f extremePoint;
    for(int i=0;i<maskedImage.rows;i++){
        for(int j=0;j<maskedImage.cols;j++){
            int pixel= maskedImage.at<uchar>(i,j);
            if(pixel>0){
                extremePoint.x=j;
                extremePoint.y=i;
                i=maskedImage.rows;
                j=maskedImage.cols;
            }
        }
    }
    //Returns the point
    point=extremePoint; 
    //DEBUG CODE
    bool debug=false;
    if(debug){
        //Shows the match between keypoints marker and keypoints input image
        Mat outimg;
        drawMatches(image,keypoints,inputImage,inputKeypoints,matches,outimg, Scalar::all(-1), Scalar::all(-1),reinterpret_cast<const vector<char>&> (outlierMask));
        imwrite("data/output/"+name+"_keypoints.jpg",outimg);
        
        //Draws a cross in the extreme point
        Mat cross=inputImage;    
        line(cross,Point(point.x,0),Point(point.x,cross.rows),CV_RGB(200,200,200));
        line(cross,Point(0,point.y),Point(cross.cols,point.y),CV_RGB(200,200,200));
        imwrite("data/output/"+name+"_corners.jpg",cross);

        //Save a text file with the matches between the marker and the input image
        ofstream km;
        string nameMatches="data/output/"+name+"_matches.tx";
        km.open(nameMatches.c_str());
        km<<
                "markerId(Qrx,Qry)\t"
                "inputId(Scanx,Scany)\t"
                "distance"
                "imgIdx"<<endl;
        for(int i=0;i<matches.size();i++){
            km<<
                matches[i].queryIdx<<"("<<keypoints[matches[i].queryIdx].pt.x<<","<<keypoints[matches[i].queryIdx].pt.y<<")\t"<<
                matches[i].trainIdx<<"("<<inputKeypoints[matches[i].trainIdx].pt.x<<","<<inputKeypoints[matches[i].trainIdx].pt.y<<")\t"<<
                matches[i].distance<<"\t"<<
                matches[i].imgIdx<<endl;
        }
        km.close();
    }
}
  vector<Match> get_corresp_points(Mat *left_image, 
  				 Mat *right_image, 
  				 int cornercount = 12) {
  	Mat firstImg, secondImg;
  	vector<Point2f> resf;
  	vector<Match> res_matches;

  	vector<uchar> status;
  	vector<float> err;

  	cvtColor(*left_image, firstImg, CV_RGB2GRAY); 
  	cvtColor(*right_image, secondImg, CV_RGB2GRAY);

  	int width, height;
  	width = min(firstImg.cols,secondImg.cols);
  	height = min(firstImg.rows,secondImg.rows);
  	int thresh = 240;

  	firstImg = firstImg(Rect(0, 0, width, height)); 
  	secondImg = secondImg(Rect(0, 0, width, height));
  	mask = mask(Rect(0, 0, width, height));

  	blockSize = min(height/2, width/2) - 2;
  	if (blockSize < 1 ) return res_matches;

  	GoodFeaturesToTrackDetector detector(
  			cornercount,
  			qualitylevel,
  			minDistance,
  			blockSize,
  			false, k);
  	vector<KeyPoint> keypoints_1, keypoints_2;
  	detector.detect(firstImg, keypoints_1, mask);
  	detector.detect(secondImg, keypoints_2, mask);

  	SurfDescriptorExtractor extractor; 
  	Mat descriptors1, descriptors2;

  	extractor.compute(firstImg, 
  			  keypoints_1, 
  			  descriptors1);
  	extractor.compute(secondImg, 
  			  keypoints_2, 
  			  descriptors2);

  	BruteForceMatcher<L2<float>> matcher;
  	vector<DMatch> matches; 
  	matcher.match(descriptors1, descriptors2, matches);
  	double distance;
  	bool contains1 = false, contains2 = false;
  	int index;

  	for(int k = 0; k < matches.size(); k++) {
  		distance = distanceEuclidian(
  		 keypoints_1[matches[k].queryIdx].pt,
  		 keypoints_2[matches[k].trainIdx].pt) ;
  		if (distance < 50) {
  			for (int l = 0; 
  			     l < res_matches.size(); l++) {
  			  if (keypoints_1
  			      [matches[k].queryIdx].pt 
  				== res_matches[l].first_pt) {
  			    contains1 = true; 
  			    break;
  			  }
  			  if (keypoints_2
  			       [matches[k].trainIdx].pt 
  				 == res_matches[l].second_pt) {
  			    contains2 = true; 
  			    break;
  			  }
  			}
  			if (!contains1 && ! contains2) {
  			  res_matches.push_back(
  			   Match(keypoints_1
  				 [matches[k].queryIdx].pt,
  				 keypoints_2
  				 [matches[k].trainIdx].pt,
  				 distance)); 
  			}
  			contains1 = false; 
  			contains2 = false;
  		}
  	}
  	return res_matches;
  }
Example #15
0
/**
 * @function main
 */
int main( int, char** argv )
{
  


  image1 = imread( argv[1], 1 );
  image2 = imread( argv[2], 1 );
  rows=image1.rows;
  cols=image1.cols;

  
  namedWindow( "image1", WINDOW_AUTOSIZE );
  imshow( "image1",image1 );
  namedWindow( "image2", WINDOW_AUTOSIZE );
  imshow( "image2",image2 );

  

  sift.detect(image1,keypoints1);
  sift.detect(image2,keypoints2);

  DescriptorExtractor FreakDesc;

  //FreakDesc.create("FREAK");
  
  //FREAK FreakDesc;

  //Ptr <DescriptorExtractor> FreakDesc;// = new DescriptorExtractor("FREAK");

  //DescriptorExtractor* FreakDesc = new DescriptorExtractor("FREAK");

  FreakDesc = DescriptorExtractor::create("FREAK");

  Mat descriptors1,descriptors2;
  (&FreakDesc)->compute(image1,keypoints1,descriptors1);
  (&FreakDesc)->compute(image2,keypoints2,descriptors2);

  
  
  // Construction of the matcher
  //BruteForceMatcher< HammingLUT > matcher;
  BruteForceMatcher<Hamming> matcher;// =BruteForceMatcher<Hamming>(10);

  Mat descriptorAuxKp1;
  Mat descriptorAuxKp2;


  vector<int> associateIdx;

  for(int i=0;i<descriptors1.rows;i++){
   //on copie la ligne i du descripteur, qui correspond aux différentes valeurs données par le descripteur pour le Keypoints[i]
   descriptors1.row(i).copyTo(descriptorAuxKp1);

//ici on va mettre que les valeurs du descripteur des keypoints de l'image 2 que l'on veut comparer aux keypoints de l'image1 en cours de traitement
   descriptorAuxKp2.create(0,0,CV_8UC1);
   

   //associateIdx va servir à faire la transition entre les indices renvoyés par matches et ceux des Keypoints
   associateIdx.erase(associateIdx.begin(),associateIdx.end());

    
    for(int j=0;j<descriptors2.rows;j++){

      float p1x=keypoints1[i].pt.x;
      float p1y=keypoints1[i].pt.y;
      float p2x=keypoints2[j].pt.x;
      float p2y=keypoints2[j].pt.y;

      float distance=sqrt(pow((p1x-p2x),2)+pow((p1y-p2y),2));
      
	//parmis les valeurs dans descriptors2 on ne va garder que ceux dont les keypoints associés sont à une distance définie du keypoints en cours, en l'occurence le ieme ici.
      if(distance<4){
       
        descriptorAuxKp2.push_back(descriptors2.row(j));
        associateIdx.push_back(j);
       
      }
      

    }
    //ici on ne matche qu'un keypoints de l'image1 avec tous les keypoints gardés de l'image 2
    matcher.match(descriptorAuxKp1,descriptorAuxKp2, matches);
    
    //on remet à la bonne valeur les attributs de matches
    for(int idxMatch=0;idxMatch<matches.size();idxMatch++){
	//on a comparer le keypoints i
      matches[idxMatch].queryIdx=i;
        //avec le keypoints2 j
      matches[idxMatch].trainIdx=associateIdx[matches[idxMatch].trainIdx];
    }
    
	//on concatene les matches trouvés pour les points précedents avec les nouveaux
    matchesWithDist.insert(matchesWithDist.end(),matches.begin(),matches.end());
   

  }



//ici on trie les matchesWithDist par distance des valeurs des descripteurs et non par distance euclidienne
  nth_element(matchesWithDist.begin(),matchesWithDist.begin()+24,matchesWithDist.end());  
    // initial position
 // position of the sorted element
             // end position

Mat imageMatches;
Mat matchesMask;
drawMatches(
  image1,keypoints1, // 1st image and its keypoints
  image2,keypoints2, // 2nd image and its keypoints
  matchesWithDist,            // the matches
  imageMatches,      // the image produced
  Scalar::all(-1),   // color of the lines
  Scalar(255,255,255) //color of the keypoints
  );


  namedWindow( "Matches BRIEF", CV_WINDOW_AUTOSIZE );
  imshow( "Matches BRIEF", imageMatches );
  imwrite("resultat.png", imageMatches);
  

  
  /// Create a window and a trackbar
  namedWindow(transparency_window, WINDOW_AUTOSIZE );
  createTrackbar( "Threshold: ", transparency_window, &thresh, max_thresh, interface );
  //imshow(transparency_window,image1 );







  interface( 0, 0 );

  waitKey(0);
  return(0);
}
Example #16
0
int main( int argc, char** argv ) {
    // check http://opencv.itseez.com/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
    // for OpenCV general detection/matching framework details

    // Load images
    Mat imgA = imread(kResPath + "images/graf/img1.ppm", CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgA.data ) {
        std::cout<< " --(!) Error reading images " << std::endl;
        return -1;
    }

    Mat imgB = imread(kResPath + "images/graf/img3.ppm", CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgA.data ) {
        std::cout << " --(!) Error reading images " << std::endl;
        return -1;
    }

    std::vector<KeyPoint> keypointsA, keypointsB;
    Mat descriptorsA, descriptorsB;

    std::vector< DMatch>   matches;

    // DETECTION
    // Any openCV detector such as
    SurfFeatureDetector detector(2000,4);

    // DESCRIPTOR
    // Our propose FREAK descriptor
    // (roation invariance, scale invariance, pattern radius corresponding to SMALLEST_KP_SIZE, number of octaves, file containing list of selected pairs)
    // FreakDescriptorExtractor extractor(true, true, 22, 4, kResPath + "selected_pairs.bin");
    FreakDescriptorExtractor extractor(true, true, 22, 4, "");

    // MATCHER
    // The standard Hamming distance can be used such as
    // BruteForceMatcher<Hamming> matcher;
    // or the proposed cascade of hamming distance
#ifdef USE_SSE
    BruteForceMatcher< HammingSeg<30,4> > matcher;
#else
    BruteForceMatcher<Hamming> matcher;
#endif

    // detect
    double t = (double)getTickCount();
    detector.detect( imgA, keypointsA );
    detector.detect( imgB, keypointsB );
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "detection time [s]: " << t/1.0 << std::endl;

    // extract
    t = (double)getTickCount();
    extractor.compute( imgA, keypointsA, descriptorsA );
    extractor.compute( imgB, keypointsB, descriptorsB );
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "extraction time [s]: " << t << std::endl;

    // match
    t = (double)getTickCount();
    matcher.match(descriptorsA, descriptorsB, matches);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "matching time [s]: " << t << std::endl;

    // Draw matches
    Mat imgMatch;
    drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);

    namedWindow("matches", CV_WINDOW_KEEPRATIO);
    imshow("matches", imgMatch);
    waitKey(0);

    /////////////////////////////////////////////////
    //
    //PAIRS SELECTION
    //FREAK is available with a set of pairs learned off-line. Researchers can run a training process to learn their own set of pair.
    //For more details read section 4.2 in:
    //A. Alahi, R. Ortiz, and P. Vandergheynst. FREAK: Fast Retina Keypoint. In IEEE Conference on Computer Vision and Pattern Recognition, 2012.

    //We notice that for keypoint matching applications, image content has little effect on the selected pairs unless very specific
    //what does matter is the detector type (blobs, corners,...) and the options used (scale/rotation invariance,...)
    //reduce corrTresh if not enough pairs are selected (43 points --> 903 possible pairs)
    // Un-comment the following lines if you want to run the training process to learn the best pairs:
    /*
    std::vector<string> filenames;
    filenames.push_back(kResPath + "images/train/1.jpg");
    filenames.push_back(kResPath + "images/train/2.jpg");

    std::vector<Mat> images(filenames.size());
    std::vector< std::vector<KeyPoint> > keypoints(filenames.size());

    for( size_t i = 0; i < filenames.size(); ++i ) {
        images[i] = imread( filenames[i].c_str(), CV_LOAD_IMAGE_GRAYSCALE );
        if( !images[i].data ) {
            std::cout<< " --(!) Error reading images " << std::endl;
            return -1;
        }
        detector.detect( images[i], keypoints[i] );
    }
    extractor.selectPairs(images, keypoints, kResPath + "selected_pairs2", 0.7);
    */
}
Example #17
0
void RGBDFramePair::FeatureDetect()
{
  Mat img_1(mainFrame->getDepthMap().getRGBImage());
  Mat img_2(refFrame->getDepthMap().getRGBImage());


  vector<KeyPoint> keyPoints_1, keyPoints_2;  
  Mat descriptors_1, descriptors_2;  

  /*ORB orb;  
  orb(img_1, Mat(), keyPoints_1, descriptors_1);  
  orb(img_2, Mat(), keyPoints_2, descriptors_2);
  BruteForceMatcher<HammingLUT> matcher;  */
  
  SIFT sift;
  sift(img_1, Mat(), keyPoints_1, descriptors_1);
  sift(img_2, Mat(), keyPoints_2, descriptors_2);
  BruteForceMatcher<L2<float> >  matcher;

  /*SURF surf;
  surf(img_1, Mat(), keyPoints_1);
  surf(img_2, Mat(), keyPoints_2);
  SurfDescriptorExtractor extrator;
  extrator.compute(img_1, keyPoints_1, descriptors_1);
  extrator.compute(img_2, keyPoints_2, descriptors_2);
  BruteForceMatcher<L2<float> >  matcher;*/


  vector<DMatch> matches;  
  matcher.match(descriptors_1, descriptors_2, matches);  

  double max_dist = 0; double min_dist = 100;  
  //-- Quick calculation of max and min distances between keypoints  
  for( int i = 0; i < descriptors_1.rows; i++ )  
  {   
    double dist = matches[i].distance;  
    if( dist < min_dist ) min_dist = dist;  
    if( dist > max_dist ) max_dist = dist;  
  }  

  std::vector< DMatch > good_matches;  
  for( int i = 0; i < matches.size(); i++ )  
  {   
    if( matches[i].distance < 0.5*max_dist )  
    {   
      good_matches.push_back( matches[i]);   
    }  
  }  

  Mat img_matches;  
  drawMatches(img_1, keyPoints_1, img_2, keyPoints_2,  
    good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),  
    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);  
  imshow( "Match", img_matches);  

  for (int i=0;i<good_matches.size();i++)
  {
    Point2f fp1=keyPoints_1[good_matches[i].queryIdx].pt;
    Point2f fp2=keyPoints_2[good_matches[i].trainIdx].pt;
    if (mainFrame->getDepthMap().getDepth(fp1.x,fp1.y)!=mainFrame->getDepthMap().initialDepth
      &&refFrame->getDepthMap().getDepth(fp2.x,fp2.y)!=refFrame->getDepthMap().initialDepth)
    {
      int index1=mainFrame->getDepthMap().getIndex(fp1.x,fp1.y);
      int index2=refFrame->getDepthMap().getIndex(fp2.x,fp2.y);
      mainFPoints.push_back(mainFrame->getPoints()[index1]);
      refFPoints.push_back(refFrame->getPoints()[index2]);
      mainFPIndex.push_back(index1);
      refFPIndex.push_back(index2);
      //mainFPoints.push_back(mainFrame->getDepthMap().get3dPoints(fp1.x,fp1.y));
      //refFPoints.push_back(refFrame->getDepthMap().get3dPoints(fp2.x,fp2.y));
    }

  }
  FPSize=mainFPoints.size();
  FPUsed.resize(FPSize,0);
}
int main(int argc,char** argv){

Mat image1,image2;





const char* source_window = "Source image";

 /// Load images
 image1 = imread( argv[1], 1 );
 image2 = imread( argv[2], 1 );



  if( argc != 3 || !image1.data || !image2.data)
    {
      printf( "No image data \n" );
      return 1;
    }


    int cols=image1.cols;
    int rows=image1.rows;
   //   cout<<"\ntaille de la matrice:" <<image1.size();
  //  cout<<"\ntype de la matrice: \n" << image1.type();
  //  cout<<"\nflags" << image1.flags;
  //  cout<<"\ndims" << image1.dims;
    cout<<"\nrows" << image1.rows;
    cout<<"\ncols" << image1.cols;
  //  Point pt = Point(1,2);
    
  //  cout<<"\nnombre de chanels: " << image1.channels();

  //  cout<< "\npoints 1 1 " << (int)image1.at<cv::Vec3b>(0,1)[1];
    
    /*
    for(int i=0;i<cols;i++){
	for(int j=0;j<rows;j++){
		image1.at<cv::Vec3b>(i,j)[0]=0;
		image1.at<cv::Vec3b>(i,j)[1]=0;
		image1.at<cv::Vec3b>(i,j)[2]=0;
	}
    }
    */

    cout<< "\nmais que se passe-t'il?";

 // cout<<"\nimage1" <<  image1; 

 /// vector of keypoints 
  vector<KeyPoint> keypoints1,keypoints2;



///Construct the SURF feature detector object
  SiftFeatureDetector sift;

  sift.detect(image1,keypoints1);
  sift.detect(image2,keypoints2);

  namedWindow( "Image 1", CV_WINDOW_AUTOSIZE );
  imshow( "Image 1", image1 );
  namedWindow( "Image 2", CV_WINDOW_AUTOSIZE );
  imshow( "Image 2", image2 );
  //afficher les coordonées des points des keypoints
	/*for(int i=0;i<keypoints1.size();i++){
        cout<<"\n\nkeypoints number" << i <<"\n";
	cout<<"\nkeypoints1" <<  keypoints1[i].pt; 
  	cout<<"\nkeypoints1x " <<  keypoints1[i].pt.x; 
	cout<<"\nkeypoints1y " <<  keypoints1[i].pt.y; 
         
	}*/


  /*Mat imcopy;
  image1.copyTo(imcopy);
  for(int i=0;i<keypoints1.size();i++){
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[0]=0;
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[1]=0;
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[2]=255;
  }
  namedWindow( "Image copy", CV_WINDOW_AUTOSIZE );
  imshow( "Image copy",  imcopy );
  */

 
  cout << "\ntaille du vecteur de keypoints: " << keypoints1.size(); 

  
  SiftDescriptorExtractor siftDesc;
  
  Mat descriptors1,descriptors2;
  siftDesc.compute(image1,keypoints1,descriptors1);
  siftDesc.compute(image2,keypoints2,descriptors2);
  
   // Construction of the matcher
BruteForceMatcher<L2<float> > matcher;

// Match the two image descriptors
vector<DMatch> matches;
matcher.match(descriptors1,descriptors2, matches);

nth_element(matches.begin(),    // initial position
          matches.begin()+24, // position of the sorted element
          matches.end());     // end position
      // remove all elements after the 25th
	//display the element attributs
	//cout<< "\nmatches  " <<  matches;
	
	//afficher les matches


	for(int i=0;i<matches.size();i++){
//affichage des attributs
/*		cout<< "\n\npoint num " <<  i;		
		cout<< "\nimgIdx  " <<  matches[i].imgIdx ;	
		cout<< "\nqueryIdx   " <<  matches[i].queryIdx;
		cout<< "\ntrainIdx   " <<  matches[i].trainIdx;
		cout<< "\ndistance   " <<  matches[i].distance;
*/
                
/*
		while(matches[i].distance >100  && i<matches.size()){
			cout << "\ni= " << i;
			matches.erase(matches.begin()+i, matches.begin()+i+1);
		}
           */     
                
	}
        

for(int i=0;i<matches.size();i++){
cout<< "\nOn relie le point de coordonee x1= " << keypoints1[matches[i].queryIdx].pt.x;
		cout<< "\ny1= " << keypoints1[matches[i].queryIdx].pt.y;

		cout<< "\nAvec le point de coordonne x2= " << keypoints2[matches[i].trainIdx].pt.x;
 		cout<< "\ny2= " << keypoints2[matches[i].trainIdx].pt.y;

}
	



      cout << '\n' << "nombre de correspondances:" << matches.size() << '\n';  
	
      
      //matches.erase(matches.begin(), matches.end());
      //keypoints1.erase(keypoints1.begin(), keypoints1.end());
      //keypoints2.erase(keypoints2.begin(), keypoints2.end());
      //matches.erase(matches.begin(), matches.begin()+1600);



Mat imageMatches;
Mat matchesMask;
drawMatches(
  image1,keypoints1, // 1st image and its keypoints
  image2,keypoints2, // 2nd image and its keypoints
  matches,            // the matches
  imageMatches,      // the image produced
  Scalar::all(-1),   // color of the lines
  Scalar(255,255,255) //color of the keypoints
  );
  namedWindow( "Matches SIFT", CV_WINDOW_AUTOSIZE );
  imshow( "Matches SIFT", imageMatches );
  imwrite("resultat.png", imageMatches);

  /*
  drawKeypoints(src,keypoints1,dst,cv::Scalar(255,255,255));
  cout << '\n' << keypoints1.size() << '\n';
  imshow( "Image 1", dst );
  
  imwrite("resultat.png", dst);
  */


  waitKey(0);
  
  return 0;








}
Example #19
0
// run main function
int main(int argc, char *argv[])
{

    // load images
    Mat tmp = cv::imread( "set1/pic1.jpg", CV_LOAD_IMAGE_COLOR );
    Mat in  = cv::imread( "set1/pic2.jpg", CV_LOAD_IMAGE_COLOR );
    
    
    // SIFT feature detector and feature extractor
    SiftFeatureDetector detector( 0.02, 5.0 );
    SiftDescriptorExtractor extractor( 2.0 );

    // Feature detection
    
    vector<KeyPoint> keypoints1, keypoints2;
    detector.detect( tmp, keypoints1 );
    detector.detect( in, keypoints2 );
    
    // Feature display
    Mat feat1,feat2;
    drawKeypoints(tmp,keypoints1,feat1,Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    drawKeypoints(in,keypoints2,feat2,Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    imwrite( "feat1.png", feat1 );
    imwrite( "feat2.png", feat2 );
    
    // Feature descriptor computation
    Mat descriptor1,descriptor2;
    extractor.compute( tmp, keypoints1, descriptor1 );
    extractor.compute( in, keypoints2, descriptor2 );
    
    // corresponded points
    vector<DMatch> matches;
    
    // L2 distance based matching. Brute Force Matching
    BruteForceMatcher<L2<float>> matcher;
    
    // display of corresponding points
    matcher.match( descriptor1, descriptor2, matches );
    
    
    double max_dist = 0; double min_dist = 100;
    
    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptor1.rows; i++ )
    {
        double dist = matches[i].distance;
        cout << dist << endl;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    
    //-- Draw only "good" matches when the threshold is above a certain value
    std::vector< DMatch > good_matches;
    
    for( int i = 0; i < descriptor1.rows; i++ ){
        if( matches[i].distance <= max(2.25*min_dist, 0.01))
        { good_matches.push_back( matches[i]); }
    }
    
    //-- Draw only "good" matches
    Mat img_matches;
    drawMatches( tmp, keypoints1, in, keypoints2,
                good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
    
    // matching result
    
    // output file
    imwrite( "result.png", img_matches);
    
    // display the result
    namedWindow("SIFT", CV_WINDOW_AUTOSIZE );
    imshow("SIFT", img_matches);
    waitKey(0); //press any key to quit
    
    return 0;
}
Example #20
0
int main( int argc, char** argv ) {
    // check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
    // for OpenCV general detection/matching framework details

    if( argc != 3 ) {
        help(argv);
        return -1;
    }

    // Load images
    Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgA.data ) {
        std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
        return -1;
    }

    Mat imgB = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgB.data ) {
        std::cout << " --(!) Error reading image " << argv[2] << std::endl;
        return -1;
    }

    std::vector<KeyPoint> keypointsA, keypointsB;
    Mat descriptorsA, descriptorsB;
    std::vector<DMatch> matches;

    // DETECTION
    // Any openCV detector such as
    SurfFeatureDetector detector(1000,4);

    // DESCRIPTOR
    // Our proposed FREAK descriptor
    // (roation invariance, scale invariance, pattern radius corresponding to SMALLEST_KP_SIZE,
    // number of octaves, optional vector containing the selected pairs)
    // FREAK extractor(true, true, 22, 4, std::vector<int>());
    FREAK extractor;

    // MATCHER
    // The standard Hamming distance can be used such as
    // BruteForceMatcher<Hamming> matcher;
    // or the proposed cascade of hamming distance using SSSE3
    BruteForceMatcher<Hamming> matcher;

    // detect
    double t = (double)getTickCount();
    detector.detect( imgA, keypointsA );
    detector.detect( imgB, keypointsB );
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "detection time [s]: " << t/1.0 << std::endl;

    std::cout << " nb key points : " << keypointsA.size() << "," <<keypointsB.size() << std::endl;
    // extract
    t = (double)getTickCount();
    extractor.compute( imgA, keypointsA, descriptorsA );
    extractor.compute( imgB, keypointsB, descriptorsB );
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "extraction time [s]: " << t << std::endl;

    // match
    t = (double)getTickCount();
    matcher.match(descriptorsA, descriptorsB, matches);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "matching time [s]: " << t << std::endl;

    // Draw matches
    Mat imgMatch;
    drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);

    namedWindow("matches", CV_WINDOW_KEEPRATIO);
    imshow("matches", imgMatch);
    waitKey(0);
}
Example #21
0
int main()
{
	// 读取输入图像
	Mat image1 = imread("E:/桌面资料/编程/openCV/opencv-2-cookbook-src-master/images/church01.jpg", 0);
	Mat image2 = imread("E:/桌面资料/编程/openCV/opencv-2-cookbook-src-master/images/church03.jpg", 0);
	if (!image1.data || !image2.data)
		return 0;

	// 显示输入图像
	namedWindow("Right Image");
	imshow("Right Image", image1);
	namedWindow("Left Image");
	imshow("Left Image", image2);

	// 关键点容器
	vector<KeyPoint> keypoints1;
	vector<KeyPoint> keypoints2;

	// 构造 surf 特征检测器 
	SurfFeatureDetector surf(2500);

	// 检测 surf 特征
	surf.detect(image1, keypoints1);
	surf.detect(image2, keypoints2);

	cout << "Number of SURF points (1): " << keypoints1.size() << endl;
	cout << "Number of SURF points (2): " << keypoints2.size() << endl;

	// 绘制keypoints
	Mat imageKP;
	drawKeypoints(image1, keypoints1, imageKP, Scalar(255, 255, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	namedWindow("Right SURF Features");
	imshow("Right SURF Features", imageKP);

	drawKeypoints(image2, keypoints2, imageKP, Scalar(255, 255, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	namedWindow("Left SURF Features");
	imshow("Left SURF Features", imageKP);

	// 构造 surf 特征描述子提取器
	SurfDescriptorExtractor surfDesc;

	// 提取 surf 特征描述子
	Mat descriptors1, descriptors2;
	surfDesc.compute(image1, keypoints1, descriptors1);
	surfDesc.compute(image2, keypoints2, descriptors2);

	cout << "descriptor matrix size: " << descriptors1.rows << " by " << descriptors1.cols << endl;

	// 构造匹配器
	BruteForceMatcher<L2<float>> matcher;

	// 匹配两幅图像的描述子
	vector<DMatch> matches;
	matcher.match(descriptors1, descriptors2, matches);

	cout << "Number of matched points: " << matches.size() << endl;

	// 选择少量匹配结果 
	vector<DMatch> selMatches;
	/*
	keypoints1.push_back(KeyPoint(342.,615.,2));
	keypoints2.push_back(KeyPoint(410.,600.,2));
	selMatches.push_back(DMatch(keypoints1.size()-1,keypoints2.size()-1,0)); // street light bulb
	selMatches.push_back(matches[6]);  // right tower
	selMatches.push_back(matches[60]);  // left bottom window
	selMatches.push_back(matches[139]);
	selMatches.push_back(matches[141]);  // middle window
	selMatches.push_back(matches[213]);
	selMatches.push_back(matches[273]);

	int kk=0;
	while (kk<matches.size()) 
	{
	cout<<kk<<endl;
	selMatches.push_back(matches[kk++]);
	selMatches.pop_back();
	waitKey();
	}
	*/

	cout << matches.size() << endl;

	/* between church01 and church03 */
	selMatches.push_back(matches[14]);
	selMatches.push_back(matches[16]);
	selMatches.push_back(matches[141]);
	selMatches.push_back(matches[146]);
	selMatches.push_back(matches[235]);
	selMatches.push_back(matches[238]);
	selMatches.push_back(matches[273]); //vector subscript out of range 274

	// 绘制选择的匹配
	Mat imageMatches;
	drawMatches(image1, keypoints1,  // 第一幅图及其关键点
		image2, keypoints2,  // 第二幅图及其关键点
		//selMatches,			// the matches
		matches,			// 匹配结果
		imageMatches,		// 生成的图像
		Scalar(255, 255, 255)); // 直线颜色

	namedWindow("Matches");
	imshow("Matches", imageMatches);

	// 将一个 keypoints 向量转换为两个 Point2f 向量
	vector<int> pointIndexes1;
	vector<int> pointIndexes2;
	for (vector<DMatch>::const_iterator it = selMatches.begin(); it != selMatches.end(); ++it) 
	{
		// 得到选择的匹配点的索引
		pointIndexes1.push_back(it->queryIdx);
		pointIndexes2.push_back(it->trainIdx);
	}

	// 转换 keypoints 类型为 Point2f
	vector<Point2f> selPoints1, selPoints2;
	KeyPoint::convert(keypoints1, selPoints1, pointIndexes1);
	KeyPoint::convert(keypoints2, selPoints2, pointIndexes2);

	// check by drawing the points 
	vector<Point2f>::const_iterator it = selPoints1.begin();
	while (it != selPoints1.end()) 
	{
		// 在每一个角点位置画一个圈
		circle(image1, *it, 3, Scalar(255, 255, 255), 2);
		++it;
	}

	it = selPoints2.begin();
	while (it != selPoints2.end()) 
	{
		// 在每一个角点位置画一个圈
		circle(image2, *it, 3, Scalar(255, 255, 255), 2);
		++it;
	}

	// 从7个匹配中计算F矩阵
	Mat fundemental = findFundamentalMat(
		Mat(selPoints1), // 图1中的点
		Mat(selPoints2), // 图2中的点
		CV_FM_7POINT);   // 使用7个点的方法

	cout << "F-Matrix size= " << fundemental.rows << "," << fundemental.cols << endl;

	// 在右图中绘制对应的极线
	vector<Vec3f> lines1;
	computeCorrespondEpilines(
		Mat(selPoints1), // 图像点
		1,               // 图1(也可以是2)
		fundemental,     // F矩阵
		lines1);         // 一组极线

	// 对于所有极线
	for (vector<Vec3f>::const_iterator it = lines1.begin(); it != lines1.end(); ++it) 
	{
		// 绘制第一列与最后一列之间的极线
		line(image2, Point(0, -(*it)[2] / (*it)[1]),
			Point(image2.cols, -((*it)[2] + (*it)[0] * image2.cols) / (*it)[1]),
			Scalar(255, 255, 255));
	}

	// 在左图中绘制对应的极线
	vector<Vec3f> lines2;
	computeCorrespondEpilines(Mat(selPoints2), 2, fundemental, lines2);

	for (vector<Vec3f>::const_iterator it = lines2.begin(); it != lines2.end(); ++it) 
	{
		// 绘制第一列与最后一列之间的极线
		line(image1, Point(0, -(*it)[2] / (*it)[1]),
			Point(image1.cols, -((*it)[2] + (*it)[0] * image1.cols) / (*it)[1]),
			Scalar(255, 255, 255));
	}

	// 显示图像以及图像中的点和极线
	namedWindow("Right Image Epilines");
	imshow("Right Image Epilines", image1);
	namedWindow("Left Image Epilines");
	imshow("Left Image Epilines", image2);

	waitKey();
	return 0;
}