Example #1
0
vector<KeyPoint> Pyramids::extractKeypoints(const Mat &m)
{
	vector<KeyPoint> keypoints;
	SiftFeatureDetector dec;
	dec.detect(m, keypoints);
	return keypoints;
}
void detectSiftMatchWithOpenCV(const char* img1_path, const char* img2_path, MatrixXf &match) {
  Mat img1 = imread(img1_path);   
  Mat img2 = imread(img2_path);   

  SiftFeatureDetector detector;
  SiftDescriptorExtractor extractor;
  vector<KeyPoint> key1;
  vector<KeyPoint> key2;
  Mat desc1, desc2;
  detector.detect(img1, key1);
  detector.detect(img2, key2);
  extractor.compute(img1, key1, desc1);
  extractor.compute(img2, key2, desc2);

  FlannBasedMatcher matcher;
  vector<DMatch> matches;
  matcher.match(desc1, desc2, matches);

  match.resize(matches.size(), 6);
  cout << "match count: " << matches.size() << endl;
  for (int i = 0; i < matches.size(); i++) {
    match(i, 0) = key1[matches[i].queryIdx].pt.x;
    match(i, 1) = key1[matches[i].queryIdx].pt.y;
    match(i, 2) = 1;
    match(i, 3) = key2[matches[i].trainIdx].pt.x;
    match(i, 4) = key2[matches[i].trainIdx].pt.y;
    match(i, 5) = 1;
  }
  
}
int SIFTfeatureCalculate(Mat &img, vector<KeyPoint> &keypoints,Mat &descriptors ){
    SiftFeatureDetector detector;
    SiftDescriptorExtractor extractor;
    
    detector.detect( img, keypoints );
    extractor.compute( img, keypoints, descriptors );
}
Example #4
0
Mat  tSIFT(String path)
{
	Mat img = imread(path, CV_LOAD_IMAGE_GRAYSCALE);
	//特征点描述符
	Mat des;
	if (!img.data){
		std::cout << "Can't open" << std::endl;
		system("Pause");
		exit(0);
	}

	SiftFeatureDetector detector;
	std::vector<KeyPoint> tSIFTkp;
	detector.detect(img, tSIFTkp);
	Mat img1;
	drawKeypoints(img, tSIFTkp, img1, Scalar::all(-1), 4);
	//FeaturesExtract
	SiftDescriptorExtractor extractor;
	//提取特征向量
	extractor.compute(img,tSIFTkp,des);

	showImg(img1);

	return des;
}
Example #5
0
int run_demo()
{
	//cv::initModule_nonfree();
	//cout <<"initModule_nonfree() called" << endl;

	// Input and output image path.
	const char * imgInFile = "/sdcard/nonfree/img1.jpg";
	const char * imgOutFile = "/sdcard/nonfree/img1_result.jpg";

	Mat image;
	image = imread(imgInFile, CV_LOAD_IMAGE_COLOR);
	if(! image.data )
	{
		LOGI("Could not open or find the image!\n");
		return -1;
	}

	vector<KeyPoint> keypoints;
	Mat descriptors;

	// Create a SIFT keypoint detector.
	SiftFeatureDetector detector;
	detector.detect(image, keypoints);
	LOGI("Detected %d keypoints\n", (int) keypoints.size());

	// Compute feature description.
	detector.compute(image,keypoints, descriptors);
	LOGI("Compute feature.\n");

	// Store description to "descriptors.des".
	FileStorage fs;
	fs.open("descriptors.des", FileStorage::WRITE);
	LOGI("Opened file to store the features.\n");
	fs << "descriptors" << descriptors;
	LOGI("Finished writing file.\n");
	fs.release();
	LOGI("Released file.\n");

	// Show keypoints in the output image.
	Mat outputImg;
	Scalar keypointColor = Scalar(255, 0, 0);
	drawKeypoints(image, keypoints, outputImg, keypointColor, DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	LOGI("Drew keypoints in output image file.\n");

#ifdef WIN32
	namedWindow("Output image", CV_WINDOW_AUTOSIZE );
	imshow("Output image", outputImg);
	waitKey(0);
#endif
	
	LOGI("Generate the output image.\n");
	imwrite(imgOutFile, outputImg);

	LOGI("Done.\n");
	return 0;
}
Example #6
0
int sift_feature()
{
    Mat img_1=imread("./samples/box.png",CV_LOAD_IMAGE_GRAYSCALE);//宏定义时CV_LOAD_IMAGE_GRAYSCALE=0,也就是读取灰度图像
    Mat img_2=imread("./samples/box_in_scene.png",CV_LOAD_IMAGE_GRAYSCALE);//一定要记得这里路径的斜线方向,这与Matlab里面是相反的

    if(!img_1.data || !img_2.data)//如果数据为空
    {
        cout<<"opencv error"<<endl;
        return -1;
    }
    cout<<"open right"<<endl;

    //第一步,用SIFT算子检测关键点

    SiftFeatureDetector detector;//构造函数采用内部默认的
    vector<KeyPoint> keypoints_1,keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

    detector.detect(img_1,keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
    detector.detect(img_2,keypoints_2);//同理

    //在图像中画出特征点
    Mat img_keypoints_1,img_keypoints_2;

    drawKeypoints(img_1,keypoints_1,img_keypoints_1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);//在内存中画出特征点
    drawKeypoints(img_2,keypoints_2,img_keypoints_2,Scalar::all(-1),DrawMatchesFlags::DEFAULT);

    imshow("sift_keypoints_1",img_keypoints_1);//显示特征点
    imshow("sift_keypoints_2",img_keypoints_2);

    //计算特征向量
    SiftDescriptorExtractor extractor;//定义描述子对象

    Mat descriptors_1,descriptors_2;//存放特征向量的矩阵

    extractor.compute(img_1,keypoints_1,descriptors_1);//计算特征向量
    extractor.compute(img_2,keypoints_2,descriptors_2);

    //用burte force进行匹配特征向量
    BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
    vector<DMatch>matches;
    matcher.match(descriptors_1,descriptors_2,matches);

    //绘制匹配线段
    Mat img_matches;
    drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);//将匹配出来的结果放入内存img_matches中

    //显示匹配线段
    imshow("sift_Matches",img_matches);//显示的标题为Matches
    waitKey(0);
    return 0;
}
Example #7
0
int main()
{
	//从文件中读入图像
	Mat img_1 = imread("class.png");
	Mat img_2 = imread("class2.png");
	//如果读入图像失败
	if (img_1.empty() || img_2.empty())
	{
		cout << "load image error" << endl;
		return -1;
	}
	//显示图像
	imshow("src image 1", img_1);
	imshow("src image 2", img_2);
	//第一步,用SIFT算子检测关键点
	SiftFeatureDetector detector;//构造函数采用内部默认的
	std::vector<KeyPoint> keypoints_1, keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

	detector.detect(img_1, keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
	detector.detect(img_2, keypoints_2);//同理

	//在图像中画出特征点
	Mat img_keypoints_1, img_keypoints_2;

	drawKeypoints(img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);//在内存中画出特征点
	drawKeypoints(img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

	imshow("sift_keypoints_1", img_keypoints_1);//显示特征点
	imshow("sift_keypoints_2", img_keypoints_2);

	//计算特征向量
	SiftDescriptorExtractor extractor;//定义描述子对象
	Mat descriptors_1, descriptors_2;//存放特征向量的矩阵

	extractor.compute(img_1, keypoints_1, descriptors_1);//计算特征向量
	extractor.compute(img_2, keypoints_2, descriptors_2);

	//用burte force进行匹配特征向量
	BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
	vector<DMatch>matches;
	matcher.match(descriptors_1, descriptors_2, matches);

	//绘制匹配线段
	Mat img_matches;
	drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_matches);//将匹配出来的结果放入内存img_matches中

	//显示匹配线段
	imshow("sift_Matches", img_matches);//显示的标题为Matches
	cvWaitKey(0);
}
Example #8
0
Mat computeSifts(const string& fileName)
{
    const Mat input = cv::imread(fileName.c_str(), 0); //Load as grayscale
    if(input.empty())
        cout<<"ERROR: Image "<<fileName<<" was not read"<<endl;
    Mat descriptors;
    SiftFeatureDetector detector;
    vector<cv::KeyPoint> keypoints;
    detector.detect(input, keypoints);
    SiftDescriptorExtractor extractor;
    extractor.compute(input, keypoints, descriptors);
    // cout<<descriptors<<endl;
    return descriptors;
}
Example #9
0
Mat siftExtract(string imgName) {

    Mat img = imread(imgName, CV_LOAD_IMAGE_GRAYSCALE); 
    // resize(img, img, Size(), 0.625, 0.625);

    // feature detection
    // SiftFeatureDetector detector(0.05, 5.0);
    SiftFeatureDetector detector;
    vector<KeyPoint> keypoints;
    detector.detect(img, keypoints);

    // // feature extraction
    SiftDescriptorExtractor extractor(3.0);
    Mat descr;
    extractor.compute(img, keypoints, descr);
    return descr;
}
Example #10
0
int main( int argc, char** argv ) {
    // check http://docs.opencv.org/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html
    // for OpenCV general detection/matching framework details

    if( argc != 3 ) {
        help(argv);
        return -1;
    }

    // Load images
    Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
    if( !imgA.data ) {
        std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
        return -1;
    }

    std::vector<KeyPoint> keypoints1, keypoints2;
    
    // DETECTION
    // Any openCV detector such as
    SurfFeatureDetector detectorSurf(2000,4);
	SiftFeatureDetector detectorSift;
	//OrbFeatureDetector detector(400);
	//FastFeatureDetector detector(10);
    
    // detect
    double t = (double)getTickCount();
    detectorSift.detect( imgA, keypoints1);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "SIFT detection time [s]: " << t/1.0 << std::endl;
	//-- Draw keypoints
	Mat imgKeypoint1;
    cv::drawKeypoints(imgA, keypoints1, imgKeypoint1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("SIFT keypoint", imgKeypoint1);

    // detect
    t = (double)getTickCount();
    detectorSurf.detect( imgA, keypoints2);
    t = ((double)getTickCount() - t)/getTickFrequency();
    std::cout << "SURF detection time [s]: " << t/1.0 << std::endl;
	//-- Draw keypoints
	Mat imgKeypoint2;
    cv::drawKeypoints(imgA, keypoints2, imgKeypoint2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    imshow("SURF keypoint", imgKeypoint2);
	waitKey(0);
}
Example #11
0
/* 
*	Function : doSift
*	Description : Find sift points on the image
*	
*	path : path of the image
*	container : container for sift keypoints and their descriptor
*/
void doSift(const string &path, struct SFeatures &container)
{
	Mat img, des;
	vector<KeyPoint> keypoints;

	img = imread(path.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

	SiftFeatureDetector detector;

   	detector.detect(img, keypoints);

   	SiftDescriptorExtractor extractor;

    extractor.compute(img, keypoints, des);

    container.des = des;
    container.keys = keypoints;
}
void ASiftDetector::detectAndCompute(const Mat& img, std::vector< KeyPoint >& keypoints, Mat& descriptors)
{
    keypoints.clear();
    descriptors = Mat(0, 128, CV_32F);
    for(int tl = 1; tl < 6; tl++)
    {
        double t = pow(2, 0.5*tl);
        for(int phi = 0; phi < 180; phi += 72.0/t)
        {
            std::vector<KeyPoint> kps;
            Mat desc;

            Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(t, phi, timg, mask, Ai);

#if 0
            Mat img_disp;
            bitwise_and(mask, timg, img_disp);
            namedWindow( "Skew", WINDOW_AUTOSIZE );// Create a window for display.
            imshow( "Skew", img_disp );
            waitKey(0);
#endif

            SiftFeatureDetector detector;
            detector.detect(timg, kps, mask);

            SiftDescriptorExtractor extractor;
            extractor.compute(timg, kps, desc);

            for(unsigned int i = 0; i < kps.size(); i++)
            {
                Point3f kpt(kps[i].pt.x, kps[i].pt.y, 1);
                Mat kpt_t = Ai*Mat(kpt);
                kps[i].pt.x = kpt_t.at<float>(0,0);
                kps[i].pt.y = kpt_t.at<float>(1,0);
            }
            keypoints.insert(keypoints.end(), kps.begin(), kps.end());
            descriptors.push_back(desc);
        }
    }
}
Example #13
0
int test(Mat img2)
{
	//Obtains the result for a particular image. 0 for ten, 1 for twenty, 2 for fifty, 3 for hundred, 4 for five-hundred and 5 for thousand
    // <0 when unsure (There are some other conditions for -1/-2 but just put all <0 as unsure)
    Mat img;
    cvtColor(img2,img,CV_BGR2GRAY);

    Mat mask=grabcut_seg(img2);

    if(img.cols>640)
    {
        resize(img,img,Size(640,img.rows*(640.0/img.cols)));
    }

    vector<KeyPoint> keypoints;
    detector.detect(img,keypoints);

    remove_keypoints(keypoints,mask);
    //cout<<keypoints.size()<<" keypoints after masking"<<endl;

    if(keypoints.size()<20)
    {
        return -1;
    }

    Mat descriptors;
    vector<vector<int> > pointIdxsOfClusters;
    bowDE.compute(img,keypoints,descriptors,&pointIdxsOfClusters);

    int dot_result=sort_by_dot_product(descriptors);

    re_rank_geo(keypoints,pointIdxsOfClusters);
    //cout<<"After spatial re-ranking"<<endl;

    int res=get_top_geoscore();
    int result=labels.at<uchar>(res,0);
    for(int i=0;i<10;i++)
    {
        res=get_top_geoscore();
        result=labels.at<uchar>(res,0);
    }


    int response=max_decision();
    for(int i=0;i<6;i++)
    {
            decision[i]=0;
    }

    return response;

}
Example #14
0
std::vector<KeyPoint> extract_sift_keypoints(Mat image)
{
	cout << "Extracting features..." << endl ;

	image = threshold_and_convert(image);

	SiftFeatureDetector detector;
    vector<KeyPoint> keypoints;
    detector.detect(image, keypoints);

    cout<<"keys:" << keypoints.size()<< endl ;

    if(DEBUG)
    {
    	// Add results to image and save.
    	cv::Mat output;
    	cv::drawKeypoints(image, keypoints, output);
    	imshow("sift_keys", output);
    	moveWindow("sift_keys" , 500 , 100);
	}
    return keypoints;
}
/**
 * @function main
 */
int main( int argc, char** argv )
{
	ros::init(argc, argv, "object_detector");
	ros::NodeHandle nh;
	
	///subscribe to camera image topic
	image_transport::ImageTransport it(nh);
	image_transport::Subscriber sub = it.subscribe((string)IMAGE_TOPIC, 1, imageCallback);
	
    ///read calibration data
    ifstream file (CALIBRATION_FILE);
    if (!file.is_open()){
        printf("ERROR: Unable to open calibration file\n");
        return 2;
    }
    H=readCalibration(file);


    
	//feature calculation of objct image
	img_object = imread( (string)DATA_FOLDER+(string)IMAGE_NAME, CV_LOAD_IMAGE_GRAYSCALE );
	//-- Step 1: Detect the keypoints using SURF Detector
	SiftFeatureDetector detector;
	detector.detect( img_object, keypoints_object );;
	//-- Step 2: Calculate descriptors (feature vectors)
	SiftDescriptorExtractor extractor;
	extractor.compute( img_object, keypoints_object, descriptors_object );
    
	
    //run service
	ros::ServiceServer service = nh.advertiseService("vision/get_plate_position", get_plate_position);
	ros::ServiceServer service1 = nh.advertiseService("vision/displayFrame",displayFrame);
	ROS_INFO("ready to detect the plate");
        
    ros::spin();
	return 0;
}
Example #16
0
/*
	processFeatures function process images and derives the features of each image which in our case is a character.
	These features/descriptors of the characters are then added to BOWKMeansTrainer which is a Bag of Features
	This function takes as input parameters a vector of images, this allows us to process all images in one place.
	We pass by reference our BOWKMeansTrainer (bag of features) since this is a variable we want available in main scope
	A pointer for DescriptorExtractor and SurfFeatureDetector is also passed from main since we define them there already.
	The function process each image by using SurfFeatureDetector to derive keypoints of the image/character.
	Using the pointer DescriptorExtractor extractor we are able to compute features for our character based on the character's keypoints.
	Then, we add the features computed to our bowTrainer (bag of features)
	Although the function is void, we are accessing bowTrainer from main scope so results will be stored there
	Overall, the main purpose of this is to allow us to build a vocabulary of features.
	However, we wouldn't be able to determine the corresponding character to features
*/
void processFeatures(vector<Mat> images, BOWKMeansTrainer &bowTrainer, Ptr<DescriptorExtractor> extractor, SiftFeatureDetector detector) {
    for (int j=0; j<images.size(); j++) {
        Mat image, src;

        resize(images.at(j), src, Size(0,0), 10,10);

        copyMakeBorder(src, image, 10,10,10,10,BORDER_CONSTANT, Scalar(255));


        vector<KeyPoint> keypoints;
        detector.detect(image, keypoints);
        Mat features;
        extractor->compute(image, keypoints, features);
        bowTrainer.add(features);

    }
}
Example #17
0
/*
	dataTraining dunction is a function that will allow us to store both character features and their corresponding float value for the ascii chracter they represent. As an example, 'A' is stored as 65.
	The function takes a vector of images, a reference to a Mat array of labels which will hold the float value of ascii characters, a reference to a Mat array of trainingData, SurfFeatureDetector detector and a reference to BOWImgDescriptorExtractor and also the character we are training for.
	We compute the keypoints of a character and their features (bowDescriptor) and store these features in a Mat array called trainingData. We also store the corresponding label for the features as the float value of the ascii character we are seeing.
	The function will access and modify two Mat arrays holding features and the matching labels.

*/
void dataTraining (vector<Mat> images, Mat &labels, Mat &trainingData, SiftFeatureDetector detector, BOWImgDescriptorExtractor &bowDE, char character) {
    vector<KeyPoint> keypoints;
    Mat bowDescriptor;
    cout<<images.size()<<" images"<<endl;
    bool verify = false;
    for (int j=0; j<images.size(); j++) {
        Mat image = images.at(j);
        detector.detect(image, keypoints);
        bowDE.compute(image, keypoints, bowDescriptor);
        cout<<keypoints.size()<<" "<<bowDescriptor.size()<<endl;
        if (bowDescriptor.size()!=Size(0,0)) {
            verify=true;
            trainingData.push_back(bowDescriptor);
            labels.push_back((float) character);
        }
    }

    cout<<labels.rows<<" "<<trainingData.rows<<endl<<endl;

}
void CRenderCenterDlg::OnBnClickedTarget()
{
	// TODO: 在此添加控件通知处理程序代码
	if(!imgtarget.data)
	{
		MessageBox(TEXT("no target image loaded!"),TEXT("error"),MB_OK);
		_Global_Obj_Ptr->OnBnClickedStop();
		return;
	}
	SiftFeatureDetector siftdtc;
	vector<KeyPoint>kp1,kp2;
	siftdtc.detect(imgtarget,kp1);
	siftdtc.detect(imgfusion,kp2);

	SiftDescriptorExtractor extractor;
	Mat descriptor1,descriptor2;
	extractor.compute(imgtarget,kp1,descriptor1);
	extractor.compute(imgfusion,kp2,descriptor2);

	BruteForceMatcher<L2<float>> matcher;
	vector<DMatch> matches;
	matcher.match(descriptor1,descriptor2,matches);

	int i,j;
	int pointcount=(int)matches.size();
	Mat point1(pointcount,2,CV_32F);
	Mat point2(pointcount,2,CV_32F);
	Point2f point;
	for(i=0;i<pointcount;i++)
	{
		point=kp1[matches[i].queryIdx].pt;
		point1.at<float>(i,0)=point.x;
		point1.at<float>(i,1)=point.y;

		point=kp2[matches[i].trainIdx].pt;
		point2.at<float>(i,0)=point.x;
		point2.at<float>(i,1)=point.y;
	}
	Mat m_fundamental;
	vector<uchar> m_ransacstatus;
	m_fundamental=findFundamentalMat(point1,point2,m_ransacstatus,FM_RANSAC);

	float hhh[9];
	for(i=0;i<9;i++)
		hhh[i]=0;

	for(i=0;i<3;i++)
	{
		for(j=0;j<3;j++)
		{
			hhh[i*3+j]=m_fundamental.ptr<float>(i)[j];
		}
	}
	int outlinercount=0;
	for(i=0;i<pointcount;i++)
	{
		if(m_ransacstatus[i]==0)
		{
			outlinercount++;
		}
	}

	vector<Point2f> m_leftinliner;
	vector<Point2f> m_rightinliner;
	vector<DMatch> m_inlinermatches;
	int inlinercount=pointcount-outlinercount;
	m_inlinermatches.resize(inlinercount);
	m_leftinliner.resize(inlinercount);
	m_rightinliner.resize(inlinercount);
	inlinercount=0;
	for(i=0;i<pointcount;i++)
	{
		if(m_ransacstatus[i]!=0)
		{
			m_leftinliner[inlinercount].x=point1.at<float>(i,0);
			m_leftinliner[inlinercount].y=point1.at<float>(i,1);
			m_rightinliner[inlinercount].x=point2.at<float>(i,0);
			m_rightinliner[inlinercount].y=point2.at<float>(i,1);
			m_inlinermatches[inlinercount].queryIdx=inlinercount;
			m_inlinermatches[inlinercount].trainIdx=inlinercount;
			inlinercount++;
		}
	}
	vector<KeyPoint> key1(inlinercount);
	vector<KeyPoint> key2(inlinercount);
	KeyPoint::convert(m_leftinliner,key1);
	KeyPoint::convert(m_rightinliner,key2);

	Mat H=findHomography(m_leftinliner,m_rightinliner,CV_RANSAC);
	std::vector<Point2f> obj_corners(4);
    obj_corners[0]=cv::Point(0,0);obj_corners[1]=cv::Point(imgtarget.cols,0);
    obj_corners[2]=cv::Point(imgtarget.cols,imgtarget.rows);obj_corners[3]=cv::Point(0,imgtarget.rows);
	std::vector<Point2f>scene_corners(4);
	perspectiveTransform(obj_corners,scene_corners,H);
	rectangle(imgfusion,scene_corners[0],scene_corners[2],Scalar(0,0,255,0),1,8,0);


	CWnd *pWnd=GetDlgItem(IDC_IMGFUSION);
	CDC *pDC=pWnd->GetDC();
	HDC hDC=pDC->GetSafeHdc();
	IplImage img=imgfusion;
	CvvImage cimg;
	cimg.CopyOf(&img);
	CRect rect;
	GetDlgItem(IDC_IMGFUSION)->GetClientRect(&rect);
	cimg.DrawToHDC(hDC,&rect);
}
Example #19
0
void call_my_code() {
    
//    Mat img1_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img2_online.jpg");
//    Mat img2_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img1_online.jpg");
    
    
    Mat img1_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img1.JPG");
    Mat img2_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img2.JPG");

    
    if (img1_rgb.empty() || img2_rgb.empty()) {
        exit(-1);
    }
    
    Mat img1, img2;
    
    cvtColor(img1_rgb, img1, CV_RGB2GRAY);
    cvtColor(img2_rgb, img2, CV_RGB2GRAY);
    
    
    SiftFeatureDetector detector;
    vector<KeyPoint> keypoints_img1, keypoints_img2;
    detector.detect(img1, keypoints_img1);
    detector.detect(img2, keypoints_img2);
    
    SiftDescriptorExtractor extractor;
    
    Mat descriptors_img1, descriptors_img2;
    
    extractor.compute(img1, keypoints_img1, descriptors_img1);
    extractor.compute(img2, keypoints_img2, descriptors_img2);
    
    FlannBasedMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors_img1, descriptors_img2, matches);
    
    double max_dist = 0.0, min_dist = numeric_limits<double>::max();
    
    for (int i = 0; i < descriptors_img1.rows; ++i) {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    
    cout << "-- Max dist: " << max_dist << endl;
    cout << "-- Min dist: " << min_dist << endl;
    
    
    vector<DMatch> good_matches;
    for (int i = 0; i < descriptors_img1.rows; ++i) {
        if (matches[i].distance < 3 * min_dist) {
            good_matches.push_back(matches[i]);
        }
    }
    
    vector<Point2f> img1_matches;
    vector<Point2f> img2_matches;
    
    for (int i = 0; i < good_matches.size(); ++i) {
        img1_matches.push_back(keypoints_img1[good_matches[i].queryIdx].pt);
        img2_matches.push_back(keypoints_img2[good_matches[i].trainIdx].pt);
    }
    
    Mat H = findHomography(img1_matches, img2_matches, CV_RANSAC);
    
    Mat result;
    
    cout << H << endl;
    
    
    warpPerspective(img1_rgb, result, H, Size(img1_rgb.cols+img2_rgb.cols,img1_rgb.rows));
    
    /*
     namedWindow("img1");
     imshow("img1", img1);
     namedWindow("result");
     imshow("result", result);
     */
    
    
    
     Mat half(result, Rect(0, 0, img2_rgb.cols, img2_rgb.rows));
     img2_rgb.copyTo(half);
     imshow("Result", result);
    
    /*
    
    Mat img_matches;
    drawMatches(img1, keypoints_img1, img2, keypoints_img2, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    
    namedWindow("img_matches");
    imshow("img_matches", img_matches);
    
    */
    /*
     
     Mat img1_output, img2_output;
     
     drawKeypoints(img1, keypoints_img1, img1_output);
     namedWindow("Image 1 keypoints", WINDOW_AUTOSIZE);
     
     imshow("Image 1 keypoints", img1_output);
     
     
     drawKeypoints(img2, keypoints_img2, img2_output);
     namedWindow("Image 2 keypoints");
     
     imshow("Image 2 keypoints", img2_output);
     */

}
/*void normalize(Mat* srcMat, Mat* dstMat) {

	//cout<<"START#";
	int nRows = srcMat->rows;
	int nCols = srcMat->cols;

	vector<float> maxValues(nCols), minValues(nCols);
	srcMat->row(0).copyTo(maxValues);
	srcMat->row(0).copyTo(minValues);
	for(int col = 0; col < nCols; col++) {
		float max = maxValues[col];
		float min = minValues[col];
		float temp;
		for(int row = 0; row < nRows; row++) {
			temp = dstMat->at<float>(row,col);
			if(temp > max)
				max = temp;
			if(temp < min)
				min = temp;
		}
		maxValues[col] = max;
		minValues[col] = min;
	}

	for(int row = 0; row < nRows; row++) {
		for(int col = 0; col < nCols; col++) {
			dstMat->at<float>(row,col) = float(srcMat->at<float>(row,col) - minValues[col])/float(maxValues[col] - minValues[col]);
		}
	}
	for(int col = 0; col < nCols; col++) {
		cout<<maxValues[col]<<" ";
	}
	cout<<"\n";
	for(int col = 0; col < nCols; col++) {
			cout<<minValues[col]<<" ";
		}
	cout<<"===================================\n";
	//cout<<"END#";
}*/
int main()
{

	// parameters to set
	int num_classes = 10;
	int PCA_FLAG = 0;					// set this to use PCA
	int dimensionToReduceTo = 45;		// PCA good value 45,15

	// output:
	// 		SIFT_PCA_NaiveBayes_feature_train.txt
	// 		SIFT_PCA_NaiveBayes_label_train.txt
	// 		SIFT_PCA_NaiveBayes_feature_test.txt
	//      SIFT_PCA_NaiveBayes_label_test.txt
	// 		sift_pca_bagOfwords_results.txt
	if(PCA_FLAG==0)
		dimensionToReduceTo = 128;

	// Read parameters
	string raw_data_location;
	int num_training_samples_class;
	int num_testing_samples_class;

	vector<string> feature_train_image_names;
	vector<string> feature_test_image_names;

	ifstream fin;
	fin.open("dataDescription.txt");
	if(fin){
		string temp;
		getline(fin,raw_data_location);
		getline(fin,temp);
		num_training_samples_class = atoi(temp.c_str());
		getline(fin,temp);
		num_testing_samples_class = atoi(temp.c_str());
		fin.close();
	}
	else
	{
		cout<<"Unable to open dataDesciption.txt\n";
		exit(1);
	}

	// Make a list of all valid class names
	string class_name_array[num_classes];
	fin.open("feature_class_names.txt");
	string temp;
	if(fin){

		vector<string> validClassNames;
		while(getline(fin,temp)){
			temp = temp.substr(0, temp.find("\t"));
			validClassNames.push_back(temp);
		}
		fin.close();

		if( num_classes > validClassNames.size() ){
			cout<<"\nWe do not enough classes that required number of samples. \nPlease reduce the"
					"number of training and/or test samples you want to use";
		}
		else {
			for(int i = 0; i < num_classes; i++) {
				class_name_array[i] = validClassNames[i];
			}
			fin.open("feature_train_image_names.txt");
			if(fin){
				string temp;
				while(getline(fin,temp)){
					feature_train_image_names.push_back(temp);
				}
				fin.close();

			}
			else
			{
				cout<<"Unable to open feature_train_image_names.txt \nPlease run randomDataSubSampler.cpp first\n";
				exit(1);
			}
			fin.open("feature_test_image_names.txt");
			if(fin){
				string temp;
				while(getline(fin,temp)){
					feature_test_image_names.push_back(temp);
				}
				fin.close();

			}
			else
			{
				cout<<"Unable to open feature_test_image_names.txt \nPlease run randomDataSubSampler.cpp first\n";
				exit(1);
			}
		}
	}
	else
	{
		cout<<"Unable to open feature_class_names.txt. \nPlease run randomDataSubSampler.cpp first\n";
		exit(1);
	}

	// declare space to store SIFT features in 128X(total number of keypoints)
	//vector< vector<double> > sift_feature_matrix;
	Mat sift_feature_matrix;
	// store the number of keypoints in each image
	Mat_<int> num_keypoints_matrix(num_classes,num_training_samples_class);

	// iterate over each class one by one
	int cur_class = 0;
	int cum_image_num = 0;
	int labels_train[num_classes*num_training_samples_class];
	for(cur_class = 0; cur_class < num_classes; cur_class++) {

		string cur_class_raw_data_location = raw_data_location + class_name_array[cur_class] + "/";

		for(int cur_image_num = 0; cur_image_num < num_training_samples_class; cum_image_num++, cur_image_num++) {

			string cur_image_location = cur_class_raw_data_location + feature_train_image_names[cum_image_num];
		//	cout<<cur_image_location<<"\t";
			Mat cur_image = imread(cur_image_location,0);
		/*	imshow("curIMage",cur_image);
			waitKey(0);*/
			SiftFeatureDetector detector;
			vector<cv::KeyPoint> image_keypoints;
			detector.detect(cur_image, image_keypoints);
		/*	int a = image_keypoints.size();
			cout<<a<<"\t";*/
			Mat tempImg;
		/*	drawKeypoints(cur_image, image_keypoints, tempImg);
			imshow("img",tempImg);
			waitKey(0);*/

			num_keypoints_matrix[cur_class][cur_image_num] = image_keypoints.size();
//cout<<num_keypoints_matrix[cur_class][cur_image_num]<<"\n";
			// Calculate descriptors: For each of the key points
			// obtain the features describing the vicinity of the
			// the key points. This will be a 128 dimensional vector
			// at each key point

			SiftDescriptorExtractor extractor;
			Mat kepoint_descriptors;
			extractor.compute( cur_image, image_keypoints, kepoint_descriptors );
			sift_feature_matrix.push_back(kepoint_descriptors);

			labels_train[cum_image_num] = cur_class;
		}
	}

	// PCA to reduce dimensionality from 128 features to dimensionToReduceTo
	Mat_<float> pcaSIFT_feature_matrix;
	PCA pca(sift_feature_matrix, Mat(), CV_PCA_DATA_AS_ROW, dimensionToReduceTo);
	if(PCA_FLAG==1){
		int reducedDimension = dimensionToReduceTo;
		Size size_sift_feature_matrix = sift_feature_matrix.size();
		Mat_<float> projected(size_sift_feature_matrix.height,reducedDimension);
		pca.project(sift_feature_matrix,projected);
		projected.convertTo(pcaSIFT_feature_matrix,CV_32F);
	}
	else
	{
		pcaSIFT_feature_matrix = sift_feature_matrix;
	}

	// number of key points in each class
	vector<int> training_totalKeyPoints_class(num_classes,0);
	for(cur_class = 0; cur_class < num_classes; cur_class++) {
		int sum = 0;
		for(int imgNo = 0; imgNo < num_training_samples_class; imgNo++) {
			training_totalKeyPoints_class[cur_class] = training_totalKeyPoints_class[cur_class] + num_keypoints_matrix.at<int>(cur_class,imgNo);
		}
	}


	// ===============================================================
	// Read Test Images
	// ===============================================================

	Mat_<int> testing_num_keypoints_matrix(num_classes,num_testing_samples_class);
	Mat testing_sift_feature_matrix;
	int cum_image_index = 0;
	int testLabels[num_classes*num_testing_samples_class];
	for(cur_class = 0; cur_class < num_classes; cur_class++) {

		string cur_class_raw_data_location = raw_data_location + "/" + class_name_array[cur_class] + "/";

		//read image of the testing data of the current_class one at a time

		for(int cur_image_num = 0; cur_image_num < num_testing_samples_class; cum_image_index++,cur_image_num++) {
			string cur_image_location = cur_class_raw_data_location + feature_test_image_names[cum_image_index];

			Mat cur_image = imread(cur_image_location,0);

			SiftFeatureDetector detector;
			vector<cv::KeyPoint> image_keypoints;
			detector.detect(cur_image, image_keypoints);

			testing_num_keypoints_matrix[cur_class][cur_image_num] = image_keypoints.size();

			// Calculate descriptors: For each of the key points
			// obtain the features describing the vicinity of the
			// the key points. This will be a 128 dimensional vector
			// at each key point

			SiftDescriptorExtractor extractor;
			Mat kepoint_descriptors;
			extractor.compute( cur_image, image_keypoints, kepoint_descriptors );
			testing_sift_feature_matrix.push_back(kepoint_descriptors);
			testLabels[cum_image_index]=cur_class;
		}
	}

	// Project the test image SIFT feature to the PCA reduced
	// dimension plane
	Mat_<float> testing_pcaSIFT_feature_matrix;
	if(PCA_FLAG==1){
		Size size_testing_sift_feature_matrix = testing_sift_feature_matrix.size();
		Mat_<float> testing_projected(size_testing_sift_feature_matrix.height,dimensionToReduceTo);
		pca.project(testing_sift_feature_matrix,testing_projected);
		testing_projected.convertTo(testing_pcaSIFT_feature_matrix,CV_32F);
	}
	else{
		testing_pcaSIFT_feature_matrix = testing_sift_feature_matrix;
	}



	Size train_dimension = pcaSIFT_feature_matrix.size();
	Size test_dimension  = testing_pcaSIFT_feature_matrix.size();
	ofstream fout;
	// Write to file
	fout.open("SIFT_PCA_NaiveBayes_feature_test.txt");

	for(int i = 0; i < test_dimension.height;i++){
		for(int j = 0; j < test_dimension.width; j++){
			fout<<testing_pcaSIFT_feature_matrix.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.clear();
	fout.close();


	fout.open("SIFT_PCA_NaiveBayes_label_test.txt");
	fout.clear();
	for(int i = 0; i < num_testing_samples_class*num_classes;i++){
		fout<<testLabels[i]<<"\n";
	}
	fout.close();

	fout.open("SIFT_PCA_NaiveBayes_feature_train.txt");
	fout.clear();
	for(int i = 0; i < train_dimension.height;i++){
		for(int j = 0; j < train_dimension.width; j++){
			fout<<pcaSIFT_feature_matrix.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.close();

	fout.open("SIFT_PCA_NaiveBayes_label_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes; i++) {
		fout<<labels_train[i]<<"\n";
	}
	fout.close();

	fout.open("SIFT_PCA_NaiveBayes_label_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes; i++) {
		fout<<labels_train[i]<<"\n";
	}
	fout.close();



	fout.open("SIFT_PCA_NaiveBayes_train_keypoints_matrix.txt");
	fout.clear();

	for(int i = 0; i < num_classes;i++){
		for(int j = 0; j < num_training_samples_class; j++){
			fout<<num_keypoints_matrix.at<int>(i, j)<<" ";
		}
		fout<<"\n";
	}

	fout.close();


	fout.open("SIFT_PCA_NaiveBayes_test_keypoints_matrix.txt");
	fout.clear();

	for(int i = 0; i < num_classes;i++){
		for(int j = 0; j < num_testing_samples_class; j++){
			fout<<testing_num_keypoints_matrix.at<int>(i, j)<<" ";
		}
		fout<<"\n";
	}

	fout.close();









	/*cout<<"---1\n";

	// Construct KD Trees
	vector< KDTree > kdTrees(num_classes);
	int dimension = pcaSIFT_feature_matrix.size().width;
	int min_keypoint_class_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		cout<<"\t--->"<<curClass;
		int numKeyPointsCurClass = training_totalKeyPoints_class[curClass];
		Mat curClassDesriptors = pcaSIFT_feature_matrix(Rect(0,min_keypoint_class_index,dimension,min_keypoint_class_index+numKeyPointsCurClass));
		min_keypoint_class_index = min_keypoint_class_index + numKeyPointsCurClass;
		cout<<"|";
		KDTree kdCur(curClassDesriptors);
		kdTrees.push_back(kdCur);
		cout<<">\n";
	}


	cout<<"---2\n";


	Mat distMat;//(num_testing_samples_class*num_classes,num_classes);

	int min_keypoint_img_index = 0;

	int cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {

	//	int numKeyPointsCurClass = training_totalKeyPoints_class[curClass];
	//	Mat curClassDesriptors = pcaSIFT_feature_matrix(Rect(0,min_keypoint_class_index,dimension,min_keypoint_class_index+numKeyPointsCurClass));
	//	min_keypoint_class_index = min_keypoint_class_index + numKeyPointsCurClass;
		KDTree kd = kdTrees[curClass];

		for(int curImage = 0; curImage < num_testing_samples_class; cumImage_index++,curImage++) {

			int numKeyPointsCurTestImg = num_keypoints_matrix[curClass][curImage];

			Mat curTestImgDesriptorsMatrix = testing_pcaSIFT_feature_matrix(Rect(0, min_keypoint_img_index, dimension,min_keypoint_img_index+numKeyPointsCurTestImg));
			min_keypoint_img_index = min_keypoint_img_index + numKeyPointsCurTestImg;

			vector<int> NN_indices(curTestImgDesriptorsMatrix.size().height);
			kd.findNearest(curTestImgDesriptorsMatrix,1,32,NN_indices,noArray(),noArray());

			double dist_sum = 0;


			for(int i = 0; i < NN_indices.size(); i++){
				vector<float> ref(curTestImgDesriptorsMatrix.row(i));
				vector<float> NN_point(kd.getPoint(NN_indices[i]),kd.getPoint(NN_indices[i])+dimension);
				dist_sum = dist_sum + norm(ref,NN_point,NORM_L2);
			}
			distMat.at<float>(cumImage_index,curClass) = dist_sum;

		}

	}
	cout<<"---3\n";
	vector<int> predictedLabels(num_testing_samples_class*num_classes);
	for(int i = 0 ; i < num_testing_samples_class*num_classes; i++) {
		float min = distMat.at<float>(i,0);

		for(int j = 0; j < num_classes; j++){

			if(distMat.at<float>(i,j) < min){
				min = distMat.at<float>(i,j);
				predictedLabels[i] = j;
			}
		}
	}

	for(int i = 0 ; i < num_testing_samples_class*num_classes; i++) {
	cout<<predictedLabels[i]<<" ";
	}*/

	/*
	cout<<"---2\n";
	Mat distMat;
	int dimension = pcaSIFT_feature_matrix.size().width;
	int min_keypoint_class_index = 0;
	int min_keypoint_img_index = 0;
	int cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		cout<<"\t---"<<curClass<<"\n";
		int numKeyPointsCurClass = training_totalKeyPoints_class[curClass];
		Mat curClassDesriptors = pcaSIFT_feature_matrix(Rect(0,min_keypoint_class_index,dimension,min_keypoint_class_index+numKeyPointsCurClass));
		min_keypoint_class_index = min_keypoint_class_index + numKeyPointsCurClass;

		for(int curImage = 0; curImage < num_testing_samples_class; cumImage_index++,curImage++) {
			cout<<"\t\t---"<<curImage;
			int numKeyPointsCurTestImg = num_keypoints_matrix[curClass][curImage];

			Mat curTestImgDesriptorsMatrix = testing_pcaSIFT_feature_matrix(Rect(0, min_keypoint_img_index, dimension,min_keypoint_img_index+numKeyPointsCurTestImg));
			min_keypoint_img_index = min_keypoint_img_index + numKeyPointsCurTestImg;
			cout<<"D";
			FlannBasedMatcher flann_matcher;
			std::vector< DMatch > flann_matches;
			flann_matcher.match( curTestImgDesriptorsMatrix, curClassDesriptors, flann_matches );
			cout<<"F";
			double dist_sum = 0;

			for(int i = 0; i < numKeyPointsCurTestImg; i++){
				vector<float> ref(curTestImgDesriptorsMatrix.row(i));
				vector<float> NN_point(curClassDesriptors.row(flann_matches[i].trainIdx));
				dist_sum = dist_sum + norm(ref,NN_point,NORM_L2);
			}
			distMat.at<float>(cumImage_index,curClass) = dist_sum;
			cout<<">\n";
		}

	}

	cout<<"---3\n";
	vector<int> predictedLabels(num_testing_samples_class*num_classes);
	for(int i = 0 ; i < num_testing_samples_class*num_classes; i++) {
		float min = distMat.at<float>(i,0);

		for(int j = 0; j < num_classes; j++){

			if(distMat.at<float>(i,j) < min){
				min = distMat.at<float>(i,j);
				predictedLabels[i] = j;
			}
		}
	}

	 */




	/*
	// k means clustering
	// labels: vector storing the labels assigned to each vector
	//         (the pcaSIFT feature of a keypoint). Therefore labels
	//         is of size = total number of keypoints = size_sift_feature_matrix.height

	vector<int> labels;//(size_sift_feature_matrix.height);
	int attempts = 5;
	Mat centers;
	TermCriteria criteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20000, 0.0001);

	kmeans(pcaSIFT_feature_matrix, num_clusters, labels,criteria, attempts, KMEANS_RANDOM_CENTERS,centers );

	// Object Feature Vector
	// computing histograms of each image
	// the keypoint_matrix stores the number of keypoints of each image
	// each image has a different number of keypoints
	// using this matrix, we will compute the histogram for each image
	// Also, note that the pcaSIFT_matrix stores the pcaSift_features in
	// following order:
	// pcaSift_feature of keypoint 1 of image 1 of class 1
	// pcaSift_feature of keypoint 2 of image 1 of class 1
	// .
	// .
	// pcaSift_feature of keypoint 1 of image 2 of class 1
	// pcaSift_feature of keypoint 2 of image 2 of class 1
	// .
	// .
	// pcaSift_feature of keypoint 1 of image 1 of class 2
	// .
	// .
	// .
	// pcaSift_feature of last keypoint of last image of last class

	Mat histogram_images = Mat(num_training_samples_class*num_classes, num_clusters, CV_32F, float(0.0));

	vector<int> labels_train(num_training_samples_class*num_classes);
	int cImg = 0;

	int min_keypoint_index = 0;
	int cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		for(int curImage = 0; curImage < num_training_samples_class; curImage++) {

			int numKeypoints = num_keypoints_matrix[curClass][curImage];

			for(unsigned int i = 0; i < numKeypoints; i++) {

				int id = labels[min_keypoint_index+i];
				histogram_images.at<float>(cumImage_index,id) += 1.0;
			}

			min_keypoint_index = min_keypoint_index + numKeypoints;
			labels_train[cumImage_index] = curClass;
			cumImage_index++;
		}

	}

	ofstream fout;
		fout.open("histogram_images.txt");

		for(int i = 0; i < num_training_samples_class*num_classes;i++){
			for(int j = 0; j < num_clusters; j++){
				fout<<histogram_images.at<double>(i, j)<<" ";
			}
			fout<<"\n";
		}
		fout.clear();
		fout.close();
	// Normalize the histogram matrix
	Mat normalized_histogram_images;
	normalize(histogram_images, normalized_histogram_images);
	histogram_images = normalized_histogram_images;


	// ===============================================================
	// Read Test Images
	// ===============================================================

	Mat_<int> testing_num_keypoints_matrix(num_classes,num_testing_samples_class);
	Mat testing_sift_feature_matrix;
	int cum_image_index = 0;
	for(cur_class = 0; cur_class < num_classes; cur_class++) {

		string cur_class_raw_data_location = raw_data_location + "/" + class_name_array[cur_class] + "/";

		//read image of the testing data of the current_class one at a time

		for(int cur_image_num = 0; cur_image_num < num_testing_samples_class; cum_image_index++,cur_image_num++) {
			string cur_image_location = cur_class_raw_data_location + feature_test_image_names[cum_image_index];

			Mat cur_image = imread(cur_image_location,0);

			SiftFeatureDetector detector;
			vector<cv::KeyPoint> image_keypoints;
			detector.detect(cur_image, image_keypoints);

			testing_num_keypoints_matrix[cur_class][cur_image_num] = image_keypoints.size();

			// Calculate descriptors: For each of the key points
			// obtain the features describing the vicinity of the
			// the key points. This will be a 128 dimensional vector
			// at each key point

			SiftDescriptorExtractor extractor;
			Mat kepoint_descriptors;
			extractor.compute( cur_image, image_keypoints, kepoint_descriptors );
			testing_sift_feature_matrix.push_back(kepoint_descriptors);

		}
	}

	// Project the test image SIFT feature to the PCA reduced
	// dimension plane
	Size size_testing_sift_feature_matrix = testing_sift_feature_matrix.size();
	Mat_<float> testing_projected(size_testing_sift_feature_matrix.height,reducedDimension);
	pca.project(testing_sift_feature_matrix,testing_projected);

	Mat_<float> testing_pcaSIFT_feature_matrix;
	testing_projected.convertTo(testing_pcaSIFT_feature_matrix,CV_32F);


	Mat testing_histogram_images = Mat(num_testing_samples_class*num_classes, num_clusters, CV_32F, float(0.0));
	vector<int> labels_test(num_testing_samples_class*num_classes);
	cImg = 0;
	min_keypoint_index = 0;
	cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		for(int curImage = 0; curImage < num_testing_samples_class; curImage++) {

			int numKeypoints = testing_num_keypoints_matrix[curClass][curImage];

			Mat tempDescriptor=testing_pcaSIFT_feature_matrix(cv::Rect(0,min_keypoint_index,reducedDimension,numKeypoints));

			FlannBasedMatcher flann_matcher;
			std::vector< DMatch > flann_matches;
			flann_matcher.match( tempDescriptor, centers, flann_matches );

			for(unsigned int i = 0; i < flann_matches.size(); i++) {
				int id = flann_matches[i].trainIdx;
				testing_histogram_images.at<float>(cumImage_index,id) += 1.0;
			}

			min_keypoint_index = min_keypoint_index + numKeypoints;
			labels_test[cumImage_index] = curClass;
			cumImage_index++;
		}
	}

	// NORMALIZE HISTOGRAMS
	Mat normalized_testing_histogram_images;
	normalize(testing_histogram_images,normalized_testing_histogram_images);
	testing_histogram_images = normalized_testing_histogram_images;

	cout<<"\n\n===========BOW=======================\n\n";

	FlannBasedMatcher flann_matcher;
	vector< vector < DMatch > > flann_matches;

	Mat_<float> testHist = testing_histogram_images;
	Mat_<float> trainHist = histogram_images;

	flann_matcher.knnMatch( testHist, trainHist, flann_matches, k_nearest_neighbor );

	int predTestLabels[num_testing_samples_class*num_classes];
	for(int imgNo = 0; imgNo < num_testing_samples_class*num_classes; imgNo++) {
		vector < DMatch > temp = flann_matches[imgNo];

		float votes[num_clusters]={0.0};
		const int N = sizeof(votes) / sizeof(float);
		for(int neigh = 0; neigh < temp.size(); neigh++ ) {
			int id = temp[neigh].trainIdx;
			int ind = id;
			id = ind/num_training_samples_class;
			if(ind%num_training_samples_class == 0)
				id = id - 1;

			float dist = temp[neigh].distance;
			votes[id] = votes[id] + (1.0/dist);

		}
		predTestLabels[imgNo] = distance(votes, max_element(votes, votes + N));
	}


	// compute error
	vector<float> error(num_classes,0.0);
	float totalError=0.0;
	for(int i = 0; i < num_testing_samples_class*num_classes; i++) {

		if(predTestLabels[i] != labels_test[i])
		{
			error[labels_test[i]] = error[labels_test[i]] + 1.0;
			totalError = totalError + 1.0;
		}
	}


//	ofstream fout;
	// Write to file
	fout.open("feature_test.txt");

	for(int i = 0; i < num_testing_samples_class*num_classes;i++){
		for(int j = 0; j < num_clusters; j++){
			fout<<testing_histogram_images.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.clear();
	fout.close();


	fout.open("label_test.txt");
	fout.clear();
	for(int i = 0; i < num_testing_samples_class*num_classes;i++){
			fout<<labels_test[i]<<"\n";
	}
	fout.close();

	fout.open("feature_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes;i++){
		for(int j = 0; j < num_clusters; j++){
			fout<<histogram_images.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.close();

	fout.open("label_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes; i++) {
		fout<<labels_train[i]<<"\n";
	}
	fout.close();

	fout.open("predictedLabels.txt");
	fout.clear();
	for(int i = 0; i < num_testing_samples_class*num_classes; i++) {
		fout<<predTestLabels[i]<<"\n";
	}
	fout.clear();
	fout<<"\nClass Wise Number of Miss-classifications ("<<num_testing_samples_class<<" test samples "
			"in each class)\n";
	for(int i = 0; i < num_classes; i++) {
		fout<<class_name_array[i]<<"\t: "<<error[i]<<"\n";
	}
	fout<<"Total Error(%)\n"<<totalError*100/(num_testing_samples_class*num_classes);
	fout.close();
	 */

}
Example #21
0
void Detect( Mat& img_scene ){
	LOGI("starting object detection");
	detector1.detect( img_scene, keypoints_scene );
	LOGI("Keypoints detected");

	extractor.compute( img_scene, keypoints_scene, descriptors_scene );
	LOGI("Descriptors extracted");

	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	matcher.match( descriptors_source, descriptors_scene, matches );
	LOGI("Matching done");

	//-- Quick calculation of max and min distances between keypoints
	double min_dist=1000, max_dist;
	for( int i = 0; i < descriptors_source.rows; i++ )
	{ double dist = matches[i].distance;
	if( dist < min_dist ) min_dist = dist;
	if( dist > max_dist ) max_dist = dist;
	}

	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;

	for( int i = 0; i < descriptors_source.rows; i++ )
	{ if( matches[i].distance <= 4*min_dist )
	{ good_matches.push_back( matches[i]); }
	}

	// GEOM FILTER

	good_matches.clear();
	vector<uchar> inliers;
	vector<Point2f> pts1, pts2;
	for (int i = 0; i < matches.size(); i++) {
		pts1.push_back(keypoints_source[matches[i].queryIdx].pt);
		pts2.push_back(keypoints_scene[matches[i].trainIdx].pt);
	}
	Mat F = findFundamentalMat(Mat(pts1), Mat(pts2),
			FM_RANSAC, 3, 0.99, inliers);
	for (int i = 0; i < inliers.size(); i++) {
		if ( (int)inliers[i] ) {
			good_matches.push_back(matches[i]);
		}
	}

	//-- Localize the object
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;

	for( int i = 0; i < good_matches.size(); i++ )
	{
		//-- Get the keypoints from the good matches
		obj.push_back( keypoints_source[ good_matches[i].queryIdx ].pt );
		scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
	}

	LOGI("Point Correspondence done");

	Mat img_matches;
	Mat img_object = imread("/sdcard/charminarAR/obj.jpg");
	drawMatches( img_object, keypoints_source, img_scene, keypoints_scene,
			good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
			vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
	imwrite("/sdcard/charminarAR/matches2.jpg", img_matches);
	LOGI("saved matches");

	prev_scene_points = source_points;
	HomographicTransformation(  obj, scene );
	points[1] = scene;
}
Example #22
0
/**
 * @function main
 */
int main( int, char** argv )
{
  


  image1 = imread( argv[1], 1 );
  image2 = imread( argv[2], 1 );
  rows=image1.rows;
  cols=image1.cols;

  
  namedWindow( "image1", WINDOW_AUTOSIZE );
  imshow( "image1",image1 );
  namedWindow( "image2", WINDOW_AUTOSIZE );
  imshow( "image2",image2 );

  

  sift.detect(image1,keypoints1);
  sift.detect(image2,keypoints2);

  DescriptorExtractor FreakDesc;

  //FreakDesc.create("FREAK");
  
  //FREAK FreakDesc;

  //Ptr <DescriptorExtractor> FreakDesc;// = new DescriptorExtractor("FREAK");

  //DescriptorExtractor* FreakDesc = new DescriptorExtractor("FREAK");

  FreakDesc = DescriptorExtractor::create("FREAK");

  Mat descriptors1,descriptors2;
  (&FreakDesc)->compute(image1,keypoints1,descriptors1);
  (&FreakDesc)->compute(image2,keypoints2,descriptors2);

  
  
  // Construction of the matcher
  //BruteForceMatcher< HammingLUT > matcher;
  BruteForceMatcher<Hamming> matcher;// =BruteForceMatcher<Hamming>(10);

  Mat descriptorAuxKp1;
  Mat descriptorAuxKp2;


  vector<int> associateIdx;

  for(int i=0;i<descriptors1.rows;i++){
   //on copie la ligne i du descripteur, qui correspond aux différentes valeurs données par le descripteur pour le Keypoints[i]
   descriptors1.row(i).copyTo(descriptorAuxKp1);

//ici on va mettre que les valeurs du descripteur des keypoints de l'image 2 que l'on veut comparer aux keypoints de l'image1 en cours de traitement
   descriptorAuxKp2.create(0,0,CV_8UC1);
   

   //associateIdx va servir à faire la transition entre les indices renvoyés par matches et ceux des Keypoints
   associateIdx.erase(associateIdx.begin(),associateIdx.end());

    
    for(int j=0;j<descriptors2.rows;j++){

      float p1x=keypoints1[i].pt.x;
      float p1y=keypoints1[i].pt.y;
      float p2x=keypoints2[j].pt.x;
      float p2y=keypoints2[j].pt.y;

      float distance=sqrt(pow((p1x-p2x),2)+pow((p1y-p2y),2));
      
	//parmis les valeurs dans descriptors2 on ne va garder que ceux dont les keypoints associés sont à une distance définie du keypoints en cours, en l'occurence le ieme ici.
      if(distance<4){
       
        descriptorAuxKp2.push_back(descriptors2.row(j));
        associateIdx.push_back(j);
       
      }
      

    }
    //ici on ne matche qu'un keypoints de l'image1 avec tous les keypoints gardés de l'image 2
    matcher.match(descriptorAuxKp1,descriptorAuxKp2, matches);
    
    //on remet à la bonne valeur les attributs de matches
    for(int idxMatch=0;idxMatch<matches.size();idxMatch++){
	//on a comparer le keypoints i
      matches[idxMatch].queryIdx=i;
        //avec le keypoints2 j
      matches[idxMatch].trainIdx=associateIdx[matches[idxMatch].trainIdx];
    }
    
	//on concatene les matches trouvés pour les points précedents avec les nouveaux
    matchesWithDist.insert(matchesWithDist.end(),matches.begin(),matches.end());
   

  }



//ici on trie les matchesWithDist par distance des valeurs des descripteurs et non par distance euclidienne
  nth_element(matchesWithDist.begin(),matchesWithDist.begin()+24,matchesWithDist.end());  
    // initial position
 // position of the sorted element
             // end position

Mat imageMatches;
Mat matchesMask;
drawMatches(
  image1,keypoints1, // 1st image and its keypoints
  image2,keypoints2, // 2nd image and its keypoints
  matchesWithDist,            // the matches
  imageMatches,      // the image produced
  Scalar::all(-1),   // color of the lines
  Scalar(255,255,255) //color of the keypoints
  );


  namedWindow( "Matches BRIEF", CV_WINDOW_AUTOSIZE );
  imshow( "Matches BRIEF", imageMatches );
  imwrite("resultat.png", imageMatches);
  

  
  /// Create a window and a trackbar
  namedWindow(transparency_window, WINDOW_AUTOSIZE );
  createTrackbar( "Threshold: ", transparency_window, &thresh, max_thresh, interface );
  //imshow(transparency_window,image1 );







  interface( 0, 0 );

  waitKey(0);
  return(0);
}
int main(int argc,char** argv){

Mat image1,image2;





const char* source_window = "Source image";

 /// Load images
 image1 = imread( argv[1], 1 );
 image2 = imread( argv[2], 1 );



  if( argc != 3 || !image1.data || !image2.data)
    {
      printf( "No image data \n" );
      return 1;
    }


    int cols=image1.cols;
    int rows=image1.rows;
   //   cout<<"\ntaille de la matrice:" <<image1.size();
  //  cout<<"\ntype de la matrice: \n" << image1.type();
  //  cout<<"\nflags" << image1.flags;
  //  cout<<"\ndims" << image1.dims;
    cout<<"\nrows" << image1.rows;
    cout<<"\ncols" << image1.cols;
  //  Point pt = Point(1,2);
    
  //  cout<<"\nnombre de chanels: " << image1.channels();

  //  cout<< "\npoints 1 1 " << (int)image1.at<cv::Vec3b>(0,1)[1];
    
    /*
    for(int i=0;i<cols;i++){
	for(int j=0;j<rows;j++){
		image1.at<cv::Vec3b>(i,j)[0]=0;
		image1.at<cv::Vec3b>(i,j)[1]=0;
		image1.at<cv::Vec3b>(i,j)[2]=0;
	}
    }
    */

    cout<< "\nmais que se passe-t'il?";

 // cout<<"\nimage1" <<  image1; 

 /// vector of keypoints 
  vector<KeyPoint> keypoints1,keypoints2;



///Construct the SURF feature detector object
  SiftFeatureDetector sift;

  sift.detect(image1,keypoints1);
  sift.detect(image2,keypoints2);

  namedWindow( "Image 1", CV_WINDOW_AUTOSIZE );
  imshow( "Image 1", image1 );
  namedWindow( "Image 2", CV_WINDOW_AUTOSIZE );
  imshow( "Image 2", image2 );
  //afficher les coordonées des points des keypoints
	/*for(int i=0;i<keypoints1.size();i++){
        cout<<"\n\nkeypoints number" << i <<"\n";
	cout<<"\nkeypoints1" <<  keypoints1[i].pt; 
  	cout<<"\nkeypoints1x " <<  keypoints1[i].pt.x; 
	cout<<"\nkeypoints1y " <<  keypoints1[i].pt.y; 
         
	}*/


  /*Mat imcopy;
  image1.copyTo(imcopy);
  for(int i=0;i<keypoints1.size();i++){
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[0]=0;
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[1]=0;
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[2]=255;
  }
  namedWindow( "Image copy", CV_WINDOW_AUTOSIZE );
  imshow( "Image copy",  imcopy );
  */

 
  cout << "\ntaille du vecteur de keypoints: " << keypoints1.size(); 

  
  SiftDescriptorExtractor siftDesc;
  
  Mat descriptors1,descriptors2;
  siftDesc.compute(image1,keypoints1,descriptors1);
  siftDesc.compute(image2,keypoints2,descriptors2);
  
   // Construction of the matcher
BruteForceMatcher<L2<float> > matcher;

// Match the two image descriptors
vector<DMatch> matches;
matcher.match(descriptors1,descriptors2, matches);

nth_element(matches.begin(),    // initial position
          matches.begin()+24, // position of the sorted element
          matches.end());     // end position
      // remove all elements after the 25th
	//display the element attributs
	//cout<< "\nmatches  " <<  matches;
	
	//afficher les matches


	for(int i=0;i<matches.size();i++){
//affichage des attributs
/*		cout<< "\n\npoint num " <<  i;		
		cout<< "\nimgIdx  " <<  matches[i].imgIdx ;	
		cout<< "\nqueryIdx   " <<  matches[i].queryIdx;
		cout<< "\ntrainIdx   " <<  matches[i].trainIdx;
		cout<< "\ndistance   " <<  matches[i].distance;
*/
                
/*
		while(matches[i].distance >100  && i<matches.size()){
			cout << "\ni= " << i;
			matches.erase(matches.begin()+i, matches.begin()+i+1);
		}
           */     
                
	}
        

for(int i=0;i<matches.size();i++){
cout<< "\nOn relie le point de coordonee x1= " << keypoints1[matches[i].queryIdx].pt.x;
		cout<< "\ny1= " << keypoints1[matches[i].queryIdx].pt.y;

		cout<< "\nAvec le point de coordonne x2= " << keypoints2[matches[i].trainIdx].pt.x;
 		cout<< "\ny2= " << keypoints2[matches[i].trainIdx].pt.y;

}
	



      cout << '\n' << "nombre de correspondances:" << matches.size() << '\n';  
	
      
      //matches.erase(matches.begin(), matches.end());
      //keypoints1.erase(keypoints1.begin(), keypoints1.end());
      //keypoints2.erase(keypoints2.begin(), keypoints2.end());
      //matches.erase(matches.begin(), matches.begin()+1600);



Mat imageMatches;
Mat matchesMask;
drawMatches(
  image1,keypoints1, // 1st image and its keypoints
  image2,keypoints2, // 2nd image and its keypoints
  matches,            // the matches
  imageMatches,      // the image produced
  Scalar::all(-1),   // color of the lines
  Scalar(255,255,255) //color of the keypoints
  );
  namedWindow( "Matches SIFT", CV_WINDOW_AUTOSIZE );
  imshow( "Matches SIFT", imageMatches );
  imwrite("resultat.png", imageMatches);

  /*
  drawKeypoints(src,keypoints1,dst,cv::Scalar(255,255,255));
  cout << '\n' << keypoints1.size() << '\n';
  imshow( "Image 1", dst );
  
  imwrite("resultat.png", dst);
  */


  waitKey(0);
  
  return 0;








}
/**
 * @function detectAndDisplay return 0 if find the object,1 if not
 */
int detectAndDisplay( Mat img_frame, Mat img_object, vector<KeyPoint> keypoints_object, Mat descriptors_object,vision::platePosition::Response &res, Mat H2)
{
    int minHessian = 400;
	SiftFeatureDetector detector;
	SiftDescriptorExtractor extractor;
	std::vector<KeyPoint> keypoints_frame;
	Mat descriptors_frame;
	
	//-- Step 1: Detect the keypoints
	detector.detect( img_frame, keypoints_frame );
	
	//-- Step 2: Calculate descriptors (feature vectors)
	extractor.compute( img_frame, keypoints_frame, descriptors_frame );
	
	//-- Step 3: Matching descriptor vectors using FLANN matcher
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	printf("size: descriptor_object rows: %d\t descriptors_frame rows: %d\n",descriptors_object.rows,descriptors_frame.rows);
	if(!descriptors_frame.rows){
		printf("!!null scene descriptor\n");
		return 1;
	}
	matcher.match( descriptors_object, descriptors_frame, matches );
	//printf("matches size: %d\n",matches.size());
	

	//-- Quick calculation of max and min distances between keypoints
	double max_dist = 0; double min_dist = 1000;
	for( int i = 0; i < descriptors_object.rows; i++ )
	{ double dist = matches[i].distance;
		if( dist < min_dist ) min_dist = dist;
		if( dist > max_dist ) max_dist = dist;
		//printf("i:%d\t",i);
	}
	printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

	
	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for( int i = 0; i < descriptors_object.rows; i++ )
	{ 
		//~ if( matches[i].distance < 3*max(0.02,min_dist) )
		//~ { good_matches.push_back( matches[i]); }
		if( matches[i].distance < 250)
		{ good_matches.push_back( matches[i]); }
	}
	printf("good matches size %d\n",(int)good_matches.size());
	Mat img_matches;
	drawMatches( img_object, keypoints_object, img_frame, keypoints_frame,
			   good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
			   vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
			  
	//-- Localize the object from img_1 in img_2
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;

	for( size_t i = 0; i < good_matches.size(); i++ )
	{
		//-- Get the keypoints from the good matches
		obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
		scene.push_back( keypoints_frame[ good_matches[i].trainIdx ].pt );
	}
	if (good_matches.size()<=9){
	  printf("insufficient good matches\n");
	  imshow( window_name, img_matches );
	  waitKey(0);
	  
      return 1;
	}
	else{
		Mat H = findHomography( obj, scene, CV_RANSAC );
		printf("lala\n");
		
		//-- Get the corners from the image_1 ( the object to be "detected" )
		std::vector<Point2f> obj_corners(4);
		obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
		obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
		std::vector<Point2f> scene_corners(4);
		
		perspectiveTransform( obj_corners, scene_corners, H);
		
		
		//-- Draw lines between the corners (the mapped object in the scene - image_2 )
		Point2f offset( (float)img_object.cols, 0);
		printf("image object size: row %d, col %d\n",img_object.rows,img_object.cols);
        printf("image scene size: row %d, col %d\n",img_frame.rows,img_frame.cols);
		line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
		line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
		line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
		line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
        
        string point1 = "p1: "+tostr(scene_corners[0].x)+" "+tostr(scene_corners[0].y);
        string point2 = "p2: "+tostr(scene_corners[1].x)+" "+tostr(scene_corners[1].y);
        string point3 = "p3: "+tostr(scene_corners[2].x)+" "+tostr(scene_corners[2].y);
        string point4 = "p4: "+tostr(scene_corners[3].x)+" "+tostr(scene_corners[3].y);
        putText(img_matches,point1,scene_corners[0] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
        putText(img_matches,point2,scene_corners[1] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
		putText(img_matches,point3,scene_corners[2] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
        putText(img_matches,point4,scene_corners[3] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
        
        printf("point:\n%s\n%s\n%s\n%s\n",point1.c_str(),point2.c_str(),point3.c_str(),point4.c_str());
        
		imshow( window_name, img_matches );
        waitKey(0);
        
        
        //to find the plate in real position
        //to calibrate change pr_r* and Point2f pr_p*
        //position_reference real
        Point2f pr_r1(0.852991670452,0.166859215521),pr_r2(0.870399996545, -0.185887708082),pr_r3( 0.588867961436,-0.170358598533),pr_r4(0.597280405865,0.178723491525);
        //position_reference pixel//set to plate
        Point2f pr_p1(247.478,181.249),pr_p2(420.907,181.566),pr_p3(429.926,313.306),pr_p4(230.793,306.769);
    
        
        std::vector<Point2f> ref_pixel_position;//known
        std::vector<Point2f> ref_real_position;//to measure by moving baxter hand to the poit
        //std::vector<Point2f> plate_pixel_position;//=scene corners known
        std::vector<Point2f> plate_real_position;//to find
        
        ref_real_position.push_back(pr_r1);
        ref_real_position.push_back(pr_r2);
        ref_real_position.push_back(pr_r3);
        ref_real_position.push_back(pr_r4);
        
        ref_pixel_position.push_back(pr_p1);
        ref_pixel_position.push_back(pr_p2);
        ref_pixel_position.push_back(pr_p3);
        ref_pixel_position.push_back(pr_p4);
        
        
        Mat H2 = findHomography( ref_pixel_position, ref_real_position);
        
        perspectiveTransform( scene_corners, plate_real_position, H2);
        
        //print out
        string plate_point1 = "p1: "+tostr(plate_real_position[0].x)+" "+tostr(plate_real_position[0].y);
            string plate_point2 = "p2: "+tostr(plate_real_position[1].x)+" "+tostr(plate_real_position[1].y);
            string plate_point3 = "p3: "+tostr(plate_real_position[2].x)+" "+tostr(plate_real_position[2].y);
            string plate_point4 = "p4: "+tostr(plate_real_position[3].x)+" "+tostr(plate_real_position[3].y);
            printf("plate real postion:\n%s\n%s\n%s\n%s\n",plate_point1.c_str(),plate_point2.c_str(),plate_point3.c_str(),plate_point4.c_str());
        
        
        //write to service response
        res.p1[0] = plate_real_position[0].x;
        res.p1[1] = plate_real_position[0].y;
        res.p2[0] = plate_real_position[1].x;
        res.p2[1] = plate_real_position[1].y;
        res.p3[0] = plate_real_position[2].x;
        res.p3[1] = plate_real_position[2].y;
        res.p4[0] = plate_real_position[3].x;
        res.p4[1] = plate_real_position[3].y;
        return 0;
    }
}
int main()
{
	//step1 load image
	Mat img1=imread("alcatraz1.jpg");
	Mat img2=imread("alcatraz2.jpg");
	Mat gimg1=imread("alcatraz1.jpg",CV_LOAD_IMAGE_GRAYSCALE);
	Mat gimg2=imread("alcatraz2.jpg",CV_LOAD_IMAGE_GRAYSCALE);
	//cvtColor(img1,gimg1,CV_BGR2GRAY);
	cout<<"compute keypoint"<<endl;
	//step2 compute keypoint
	SiftFeatureDetector detector;
	vector<KeyPoint> kp1, kp2; 
	detector.detect(gimg1,kp1);
	detector.detect(gimg2,kp2);
	//step3 compute descriptor
	SiftDescriptorExtractor extractor;
	Mat descriptor1,descriptor2;
	extractor.compute(gimg1,kp1,descriptor1);
	extractor.compute(gimg2,kp2,descriptor2);
	cout<<"compute match"<<endl;
	//step4 gimg1 <-->gimg2 match
	BFMatcher matcher(NORM_L2);
	vector<DMatch> matches1,matches2,twoside_matches;
	matcher.match(descriptor1,descriptor2,matches1);
	matcher.match(descriptor2,descriptor1,matches2);
	cout<<"match end"<<endl;
	vector<DMatch>::iterator it1;
	vector<DMatch>::iterator it2;
	for(it1 = matches1.begin();it1 != matches1.end();it1++)
	{	for(it2 =matches2.begin();it2 != matches2.end();it2++)
		{
			if((*it1).queryIdx == (*it2).trainIdx && (*it2).queryIdx == (*it1).trainIdx)
			{
				twoside_matches.push_back(DMatch((*it1).queryIdx,(*it1).trainIdx,(*it1).distance));
				//break;
			}	
		}
	}
	//step5 draw twoside_matches
	Mat imgmathces;
	drawMatches(gimg1,kp1,gimg2,kp2,twoside_matches,imgmathces);
	//load matches keypoint
	int n=twoside_matches.size();
	Mat_<float> matches_kp1(3,n),matches_kp2(3,n);
//	vector<DMatch>::iterator it3;
//	for(it3 = twoside_matches.begin();it3 != twoside_matches.end();it3++)
//	{
//		matches_kp1.puch_back(kp);
//	}
	for(int i=0;i < twoside_matches.size();i++)
	{
		Point2f x1=kp1[twoside_matches[i].queryIdx].pt;
		Point2f x2=kp2[twoside_matches[i].queryIdx].pt;
		matches_kp1(0,i)=x1.x;
		matches_kp1(1,i)=x1.y;
		matches_kp1(2,i)=1;
		matches_kp2(0,i)=x2.x;
		matches_kp2(1,i)=x2.y;
		matches_kp2(2,i)=1;
	}
	cout<<"save keypoints"<<endl;
	FileStorage fs("points.yml",FileStorage::WRITE);
	fs<<"x1"<<matches_kp1;
	fs<<"x2"<<matches_kp2;
	fs.release();
        cout<<"show match"<<endl;
	namedWindow("img1",WINDOW_NORMAL);
	namedWindow("img2",WINDOW_NORMAL);
	namedWindow("imgmatch",WINDOW_NORMAL);
	imshow("img1",gimg1);
	imshow("img2",gimg2);
	imshow("imgmatch",imgmathces);
	waitKey(0);

	return 0;
}
int main(int argc, char** argv)
{
    if( argc < 2 )
    {
        printPrompt( argv[0] );
        return -1;
    }

    initModule_nonfree();

    // Get Input Data
    ifstream file(argv[1]);
    if ( !file.is_open() )
        return false;
    
    string str;
    
        // Image Name
    getline( file, str ); getline( file, str );
    string image_name = str;
        // Cloud Name
    getline( file, str ); getline( file, str );
    string cloud_name = str;
        // width of images to be created.
    getline( file, str ); getline( file, str );
    int w = atoi(str.c_str());
        // height of images to be created
    getline( file, str ); getline( file, str );
    int h = atoi(str.c_str());
        // resolution of voxel grids
    getline( file, str ); getline( file, str );
    float r = atof(str.c_str());
        // f (distance from pinhole)
    getline( file, str ); getline( file, str );
    float f = atof(str.c_str());
        // thetax (initial rotation about X Axis of map)
    getline( file, str ); getline( file, str );
    float thetaX = atof(str.c_str());
        // thetay (initial rotation about Y Axis of map)
    getline( file, str ); getline( file, str );
    float thetaY = atof(str.c_str());
        // number of points to go to
    getline( file, str ); getline( file, str );
    float nop = atoi(str.c_str());
        // Number of divisions
    getline( file, str ); getline( file, str );
    float divs = atoi(str.c_str());
        // Number of images to return
    getline( file, str ); getline( file, str );
    int numtoreturn = atoi(str.c_str());    
        // Should we load or create photos?
    getline( file, str ); getline( file, str );
    string lorc =str.c_str();
        // Directory to look for photos
    getline( file, str ); getline( file, str );
    string dir =str.c_str();
        // Directory to look for kp and descriptors
    getline( file, str ); getline( file, str );
    string kdir =str.c_str();
        // save photos?
    getline( file, str ); getline( file, str );
    string savePhotos =str.c_str();
    
    file.close();
    // Done Getting Input Data

    map<vector<float>, Mat> imagemap;
    map<vector<float>, Mat> surfmap;
    map<vector<float>, Mat> siftmap;
    map<vector<float>, Mat> orbmap;
    map<vector<float>, Mat> fastmap;
    imagemap.clear();

    vector<KeyPoint> SurfKeypoints;
    vector<KeyPoint> SiftKeypoints;
    vector<KeyPoint> OrbKeypoints;
    vector<KeyPoint> FastKeypoints;
    Mat SurfDescriptors;
    Mat SiftDescriptors;
    Mat OrbDescriptors;
    Mat FastDescriptors;

    int minHessian = 300;

    SurfFeatureDetector SurfDetector (minHessian);
    SiftFeatureDetector SiftDetector (minHessian);
    OrbFeatureDetector OrbDetector (minHessian);
    FastFeatureDetector FastDetector (minHessian);


    SurfDescriptorExtractor SurfExtractor;
    SiftDescriptorExtractor SiftExtractor;
    OrbDescriptorExtractor OrbExtractor;

    if ( !fs::exists( dir ) || lorc == "c" )
    { // Load Point Cloud and render images
        PointCloud<PT>::Ptr cloud (new pcl::PointCloud<PT>);
        io::loadPCDFile<PT>(cloud_name, *cloud);

        Eigen::Affine3f tf = Eigen::Affine3f::Identity();
        tf.rotate (Eigen::AngleAxisf (thetaX, Eigen::Vector3f::UnitX()));
        pcl::transformPointCloud (*cloud, *cloud, tf);
        tf = Eigen::Affine3f::Identity();
        tf.rotate (Eigen::AngleAxisf (thetaY, Eigen::Vector3f::UnitY()));
        pcl::transformPointCloud (*cloud, *cloud, tf);

        // Create images from point cloud
        imagemap = render::createImages(cloud, nop, w, h, r, f);

        if (savePhotos == "y")
        {
            for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
            {
                // Create image name and storagename
                string imfn = dir + "/";
                string kpfn = kdir + "/";
                for (int j = 0; j < i->first.size(); j++)
                {
                    imfn += boost::to_string(i->first[j]) + " ";
                    kpfn += boost::to_string(i->first[j]) + " ";
                }
                imfn += ".jpg";
                imwrite(imfn, i->second);

                // Detect keypoints, add to keypoint map. Same with descriptors

                SurfDetector.detect(i->second, SurfKeypoints);
                SiftDetector.detect(i->second, SiftKeypoints);
                OrbDetector.detect(i->second, OrbKeypoints);
                FastDetector.detect(i->second, FastKeypoints);

                SurfExtractor.compute(i->second, SurfKeypoints, SurfDescriptors);
                SiftExtractor.compute(i->second, SiftKeypoints, SiftDescriptors);
                OrbExtractor.compute(i->second, OrbKeypoints, OrbDescriptors);
                SiftExtractor.compute(i->second, FastKeypoints, FastDescriptors);

                // Store KP and Descriptors in yaml file.

                kpfn += ".yml";
                FileStorage store(kpfn, cv::FileStorage::WRITE);
                write(store,"SurfKeypoints",SurfKeypoints);
                write(store,"SiftKeypoints",SiftKeypoints);
                write(store,"OrbKeypoints", OrbKeypoints);
                write(store,"FastKeypoints",FastKeypoints);
                write(store,"SurfDescriptors",SurfDescriptors);
                write(store,"SiftDescriptors",SiftDescriptors);
                write(store,"OrbDescriptors", OrbDescriptors);
                write(store,"FastDescriptors",FastDescriptors);
                store.release();

                surfmap[i->first] = SurfDescriptors;
                siftmap[i->first] = SiftDescriptors;
                orbmap[i->first]  = OrbDescriptors;
                fastmap[i->first] = FastDescriptors;
            }
        }
    } 
    else 
    { // load images from the folder dir
        // First look into the folder to get a list of filenames
        vector<fs::path> ret;
        const char * pstr = dir.c_str();
        fs::path p(pstr);
        get_all(pstr, ret);

        for (int i = 0; i < ret.size(); i++)
        {
            // Load Image via filename
            string fn = ret[i].string();
            istringstream iss(fn);
            vector<string> tokens;
            copy(istream_iterator<string>(iss), istream_iterator<string>(), back_inserter<vector<string> >(tokens));

            // Construct ID from filename
            vector<float> ID;
            for (int i = 0; i < 6; i++) // 6 because there are three location floats and three direction floats
                ID.push_back(::atof(tokens[i].c_str()));
            string imfn = dir + "/" + fn;

            // Read image and add to imagemap.
            Mat m = imread(imfn);
            imagemap[ID] = m;

            // Create Filename for loading Keypoints and descriptors
            string kpfn = kdir + "/";
            for (int j = 0; j < ID.size(); j++)
            {
                kpfn += boost::to_string(ID[j]) + " ";
            }

            kpfn = kpfn+ ".yml";
            
            // Create filestorage item to read from and add to map.
            FileStorage store(kpfn, cv::FileStorage::READ);

            FileNode n1 = store["SurfKeypoints"];
            read(n1,SurfKeypoints);
            FileNode n2 = store["SiftKeypoints"];
            read(n2,SiftKeypoints);
            FileNode n3 = store["OrbKeypoints"];
            read(n3,OrbKeypoints);
            FileNode n4 = store["FastKeypoints"];
            read(n4,FastKeypoints);
            FileNode n5 = store["SurfDescriptors"];
            read(n5,SurfDescriptors);
            FileNode n6 = store["SiftDescriptors"];
            read(n6,SiftDescriptors);
            FileNode n7 = store["OrbDescriptors"];
            read(n7,OrbDescriptors);
            FileNode n8 = store["FastDescriptors"];
            read(n8,FastDescriptors);

            store.release();

            surfmap[ID] = SurfDescriptors;
            siftmap[ID] = SiftDescriptors;
            orbmap[ID]  = OrbDescriptors;
            fastmap[ID] = FastDescriptors;
        }
    }

    TickMeter tm;
    tm.reset();
    cout << "<\n  Analyzing Images ..." << endl;

    // We have a bunch of images, now we compute their grayscale and black and white.
    map<vector<float>, Mat> gsmap;
    map<vector<float>, Mat> bwmap;
    for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
    {
        vector<float> ID = i->first;
        Mat Image = i-> second;
        GaussianBlur( Image, Image, Size(5,5), 0, 0, BORDER_DEFAULT );


        gsmap[ID] = averageImage::getPixSumFromImage(Image, divs);
        bwmap[ID] = averageImage::aboveBelow(gsmap[ID]);
    }
    Mat image = imread(image_name);
    Mat gsimage = averageImage::getPixSumFromImage(image, divs);
    Mat bwimage = averageImage::aboveBelow(gsimage);

    // cout << gsimage <<endl;
    imwrite("GS.png", gsimage);
    namedWindow("GSIMAGE (Line 319)");
    imshow("GSIMAGE (Line 319)", gsimage);
    waitKey(0);

    vector<KeyPoint> imgSurfKeypoints;
    vector<KeyPoint> imgSiftKeypoints;
    vector<KeyPoint> imgOrbKeypoints;
    vector<KeyPoint> imgFastKeypoints;
    Mat imgSurfDescriptors;
    Mat imgSiftDescriptors;
    Mat imgOrbDescriptors;
    Mat imgFastDescriptors;

    SurfDetector.detect(image, imgSurfKeypoints);
    SiftDetector.detect(image, imgSiftKeypoints);
    OrbDetector.detect(image, imgOrbKeypoints);
    FastDetector.detect(image, imgFastKeypoints);

    SurfExtractor.compute(image, imgSurfKeypoints, imgSurfDescriptors);
    SiftExtractor.compute(image, imgSiftKeypoints, imgSiftDescriptors);
    OrbExtractor.compute(image, imgOrbKeypoints, imgOrbDescriptors);
    SiftExtractor.compute(image, imgFastKeypoints, imgFastDescriptors);


    tm.start();

    cout << ">\n<\n  Comparing Images ..." << endl;

    // We have their features, now compare them!
    map<vector<float>, float> gssim; // Gray Scale Similarity
    map<vector<float>, float> bwsim; // Above Below Similarity
    map<vector<float>, float> surfsim;
    map<vector<float>, float> siftsim;
    map<vector<float>, float> orbsim;
    map<vector<float>, float> fastsim;

    for (map<vector<float>, Mat>::iterator i = gsmap.begin(); i != gsmap.end(); ++i)
    {
        vector<float> ID = i->first;
        gssim[ID] = similarities::getSimilarity(i->second, gsimage);
        bwsim[ID] = similarities::getSimilarity(bwmap[ID], bwimage); 
        surfsim[ID] = similarities::compareDescriptors(surfmap[ID], imgSurfDescriptors);
        siftsim[ID] = similarities::compareDescriptors(siftmap[ID], imgSiftDescriptors);
        orbsim[ID] = 0;//similarities::compareDescriptors(orbmap[ID], imgOrbDescriptors);
        fastsim[ID] = 0;//similarities::compareDescriptors(fastmap[ID], imgFastDescriptors);
    }

    map<vector<float>, int> top;

    bool gotone = false;
    typedef map<vector<float>, int>::iterator iter;

    // Choose the best ones!
    for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
    {
        vector<float> ID = i->first;

        int sim = /* gssim[ID] + 0.5*bwsim[ID] + */ 5*surfsim[ID] + 0.3*siftsim[ID] + orbsim[ID] + fastsim[ID];

        // cout << surfsim[ID] << "\t";
        // cout << siftsim[ID] << "\t";
        // cout << orbsim[ID] << "\t";
        // cout << fastsim[ID] << endl;

        if (!gotone)
        {
            top[ID] = sim;
            gotone = true;
        }

        iter it = top.begin();
        iter end = top.end();
        int max_value = it->second;
        vector<float> max_ID = it->first;
        for( ; it != end; ++it) 
        {
            int current = it->second;
            if(current > max_value) 
            {
                max_value = it->second;
                max_ID = it->first;
            }
        }
        // cout << "Sim: " << sim << "\tmax_value: " << max_value << endl;
        if (top.size() < numtoreturn)
            top[ID] = sim;
        else
        {
            if (sim < max_value)
            {
                top[ID] = sim;
                top.erase(max_ID);
            }
        }
    }
    tm.stop();
        double s = tm.getTimeSec();


    cout << ">\n<\n  Writing top " << numtoreturn << " images ..." << endl;

    int count = 1;
    namedWindow("Image");
    namedWindow("Match");
    namedWindow("ImageBW");
    namedWindow("MatchBW");
    namedWindow("ImageGS");
    namedWindow("MatchGS");

    imshow("Image", image);
    imshow("ImageBW", bwimage);
    imshow("ImageGS", gsimage);


    vector<KeyPoint> currentPoints;

    for (iter i = top.begin(); i != top.end(); ++i)
    {
        vector<float> ID = i->first;

        cout << "  Score: "<< i->second << "\tGrayScale: " << gssim[ID] << "\tBW: " << bwsim[ID] << "  \tSURF: " << surfsim[ID] << "\tSIFT: " << siftsim[ID] << endl;
        string fn = "Sim_" + boost::to_string(count) + "_" + boost::to_string(i->second) + ".png";
        imwrite(fn, imagemap[ID]);
        count++;

        normalize(bwmap[ID], bwmap[ID], 0, 255, NORM_MINMAX, CV_64F);
        normalize(gsmap[ID], gsmap[ID], 0, 255, NORM_MINMAX, CV_64F);

        imshow("Match", imagemap[ID]);
        imshow("MatchBW", bwmap[ID]);
        imshow("MatchGS", gsmap[ID]);


        waitKey(0);

    }

    cout << ">\nComparisons took " << s << " seconds for " << imagemap.size() << " images (" 
        << (int) imagemap.size()/s << " images per second)." << endl;

return 0;
}
void FeatureMatching(const Mat& img_1, 
				   const Mat& img_2, 
				   vector<KeyPoint>& keypts1,
				   vector<KeyPoint>& keypts2,
				   vector<KeyPoint>& keypts1_good,
				   vector<KeyPoint>& keypts2_good,
				   	vector<DMatch>* matches,
					int method)
{
	
	Mat descriptors_1, descriptors_2;
	

	if(method == 1) // SURF descriptor
	{
		double minHessian = 400;
		SurfFeatureDetector detector( minHessian);

		detector.detect( img_1,keypts1);
		detector.detect( img_2, keypts2);

		//-- Step 2: Calculate descriptors (feature vectors)
		 SurfDescriptorExtractor extractor;


		extractor.compute( img_1,keypts1, descriptors_1 );
		extractor.compute( img_2, keypts2, descriptors_2 );


		


		//-- Draw only "good" matches
		/*Mat img_matches;
		drawMatches( img_1, keypts1, img_2, keypts2,
						good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
						vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );		
			//-- Show detected matches
		imshow( "Feature Matches", img_matches );
		waitKey(0);
		destroyWindow("Feature Matches");*/

	}
	if(method == 2) // BRIEF descriptor
	{
		Ptr<FeatureDetector> detector = FeatureDetector::create("ORB"); //"BRISK"
		detector->detect(img_1,keypts1);
		detector->detect(img_2,keypts2);

		Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("ORB");
		extractor->create("ORB");
		extractor->compute(img_1,keypts1, descriptors_1);
		extractor->compute(img_2,keypts2, descriptors_2);


	}
	if(method == 3) // SIFT descriptor
	{
		SiftFeatureDetector detector;
		detector.detect( img_1,keypts1);
		detector.detect( img_2, keypts2);

		//-- Step 2: Calculate descriptors (feature vectors)
		SiftDescriptorExtractor extractor;

		
		extractor.compute( img_1,keypts1, descriptors_1 );
		extractor.compute( img_2, keypts2, descriptors_2 );

	}
	if(method == 4) // KAZE descriptor
	{
		/*KAZEOptions options;
		options.img_width = img_1.cols;
		options.img_height = img_1.rows;
		KAZE evolution1(options);

		evolution1.Create_Nonlinear_Scale_Space(img_1);
		evolution1.Feature_Detection(keypts1);
		evolution1.Feature_Description(keypts1,descriptors_1);

		options.img_width = img_2.cols;
		options.img_height = img_2.rows;
		KAZE evolution2(options);

		evolution2.Create_Nonlinear_Scale_Space(img_2);
		evolution2.Feature_Detection(keypts2);
		evolution2.Feature_Description(keypts2,descriptors_2);*/
	}

	//-- Step 3: Matching descriptor vectors using BF matcher
	BFMatcher matcher(NORM_L2,true);
	std::vector< DMatch > matches_;
	if (matches == NULL) {
		matches = &matches_;
	}
	matcher.match( descriptors_1, descriptors_2, *matches ); // Match the feature points

	double max_dist = 0; double min_dist = 1000.0;
	//-- Quick calculation of max and min distances between keypoints
	for(unsigned int i = 0; i < matches->size(); i++ )
	{ 
		double dist = (*matches)[i].distance;
		if( dist < min_dist ) min_dist = dist;
		if( dist > max_dist ) max_dist = dist;
	}
	std::vector< DMatch > good_matches;
	vector<KeyPoint> imgpts1_good,imgpts2_good;

	if (min_dist <= 0) {
		min_dist = 10.0;
	}

	double cutoff = 4.0*min_dist;//4.0*min_dist;
	std::set<int> existing_trainIdx;
	for(unsigned int i = 0; i < matches->size(); i++ )
	{ 
		if ((*matches)[i].trainIdx <= 0) {
			(*matches)[i].trainIdx = (*matches)[i].imgIdx;
		}

		if( existing_trainIdx.find((*matches)[i].trainIdx) == existing_trainIdx.end() && 
			(*matches)[i].trainIdx >= 0 && (*matches)[i].trainIdx < (int)(keypts2.size()) &&
			(*matches)[i].distance > 0.0 && (*matches)[i].distance < cutoff ) 
		{
			good_matches.push_back( (*matches)[i]);
			keypts1_good.push_back(keypts1[(*matches)[i].queryIdx]);
			keypts2_good.push_back(keypts2[(*matches)[i].trainIdx]);
			existing_trainIdx.insert((*matches)[i].trainIdx);
		}
	}


		
}
//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main()
{
	//【0】改变console字体颜色
	system("color 5F"); 

	ShowHelpText();

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图",trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测SIFT关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescription;
	SiftFeatureDetector featureDetector;
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SiftDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescription);

	// 【3】进行基于描述符的暴力匹配
	BFMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescription);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while(char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		double time0 = static_cast<double>(getTickCount( ));//记录起始时间
		Mat captureImage, captureImage_gray;
		cap >> captureImage;//采集视频到testImage中
		if(captureImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(captureImage, captureImage_gray, CV_BGR2GRAY);

		//<3>检测SURF关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(captureImage_gray, test_keyPoint);
		featureExtractor.compute(captureImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for(unsigned int i = 0; i < matches.size(); i++)
		{
			if(matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(captureImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}
Example #29
0
/**
 * @function main
 * @brief Main function
 */
int main( int argc, char** argv )
{
  if( argc != 3 )
  { readme(); return -1; }

  Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
  int s_row = (img_1.rows < img_2.rows) ? img_1.rows : img_2.rows;
  int s_col = (img_1.cols < img_2.cols) ? img_1.cols : img_2.cols;
  //s_row = s_row/2;
  //s_col = s_col/2;
  resize( img_1, img_1, Size(s_row, s_col), 0, 0, INTER_CUBIC );
  resize( img_2, img_2, Size(s_row, s_col), 0, 0, INTER_CUBIC );

  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 100;

  SiftFeatureDetector detector;

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  detector.detect( img_1, keypoints_1 );
  detector.detect( img_2, keypoints_2 );

  //-- Step 2: Calculate descriptors (feature vectors)
  SiftDescriptorExtractor extractor;

  Mat descriptors_1, descriptors_2;

  extractor.compute( img_1, keypoints_1, descriptors_1 );
  extractor.compute( img_2, keypoints_2, descriptors_2 );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  //BFMatcher bf_matcher(NORM_L2, false);
  std::vector< DMatch > matches;
  matcher.match( descriptors_1, descriptors_2, matches );

  double max_dist = 0; double min_dist = 400;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_1.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
  //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
  //-- small)
  //-- PS.- radiusMatch can also be used here.
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_1.rows; i++ )
  { if( matches[i].distance <= max(2*min_dist, 0.02) )
    { good_matches.push_back( matches[i]); }
  }

  //-- Draw only "good" matches
  Mat img_matches;
  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  //-- Show detected matches
  namedWindow("Good Matches", CV_WINDOW_NORMAL); 
  resize(img_matches,img_matches, Size(1280,720), 0, 0, INTER_CUBIC); 
  imshow( "Good Matches", img_matches ); 
  resizeWindow("Good Matches", 1280, 720);

  for( int i = 0; i < (int)good_matches.size(); i++ )
  { printf( "-- %F Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", good_matches[i].distance,i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }

  waitKey(0);

  return 0;
}