示例#1
0
Mat  tSIFT(String path)
{
	Mat img = imread(path, CV_LOAD_IMAGE_GRAYSCALE);
	//特征点描述符
	Mat des;
	if (!img.data){
		std::cout << "Can't open" << std::endl;
		system("Pause");
		exit(0);
	}

	SiftFeatureDetector detector;
	std::vector<KeyPoint> tSIFTkp;
	detector.detect(img, tSIFTkp);
	Mat img1;
	drawKeypoints(img, tSIFTkp, img1, Scalar::all(-1), 4);
	//FeaturesExtract
	SiftDescriptorExtractor extractor;
	//提取特征向量
	extractor.compute(img,tSIFTkp,des);

	showImg(img1);

	return des;
}
示例#2
0
文件: pyramids.cpp 项目: yca/VideoAI
Mat Pyramids::computeFeatures(const Mat &m, vector<KeyPoint> &keypoints)
{
	Mat features;
	SiftDescriptorExtractor ex;
	ex.compute(m, keypoints, features);
	return features;
}
int SIFTfeatureCalculate(Mat &img, vector<KeyPoint> &keypoints,Mat &descriptors ){
    SiftFeatureDetector detector;
    SiftDescriptorExtractor extractor;
    
    detector.detect( img, keypoints );
    extractor.compute( img, keypoints, descriptors );
}
void detectSiftMatchWithOpenCV(const char* img1_path, const char* img2_path, MatrixXf &match) {
  Mat img1 = imread(img1_path);   
  Mat img2 = imread(img2_path);   

  SiftFeatureDetector detector;
  SiftDescriptorExtractor extractor;
  vector<KeyPoint> key1;
  vector<KeyPoint> key2;
  Mat desc1, desc2;
  detector.detect(img1, key1);
  detector.detect(img2, key2);
  extractor.compute(img1, key1, desc1);
  extractor.compute(img2, key2, desc2);

  FlannBasedMatcher matcher;
  vector<DMatch> matches;
  matcher.match(desc1, desc2, matches);

  match.resize(matches.size(), 6);
  cout << "match count: " << matches.size() << endl;
  for (int i = 0; i < matches.size(); i++) {
    match(i, 0) = key1[matches[i].queryIdx].pt.x;
    match(i, 1) = key1[matches[i].queryIdx].pt.y;
    match(i, 2) = 1;
    match(i, 3) = key2[matches[i].trainIdx].pt.x;
    match(i, 4) = key2[matches[i].trainIdx].pt.y;
    match(i, 5) = 1;
  }
  
}
示例#5
0
Mat Panorama::getDescriptors(vector<KeyPoint> kp){

    cout << "Computing descriptors..." << endl;

	SiftDescriptorExtractor extractor;
	Mat descriptors;
	extractor.compute(srcGray, kp, descriptors);
    
    return descriptors;
}
// Faeture Detection and Decription
void det_desc_features(vector <Image>& images, bool flag)
{
	// Detect the keypoints using SIFT Detector
	SiftFeatureDetector detector(nfeatures, nOctaveLayers, contrastThreshold, edgeThreshold, sigma);
	// Calculate descriptors (feature vectors)
	SiftDescriptorExtractor extractor;
	//// Detect the keypoints using SIFT Detector
	//SurfFeatureDetector detector(500);
	//// Calculate descriptors (feature vectors)
	//SurfDescriptorExtractor extractor;

	for (size_t i = 0; i < images.size(); i++)
	{
	/*	Mat mask = Mat::zeros(images[i].getImg_gray().size(), images[i].getImg_gray().type());
		Mat roi1(mask, cv::Rect(images[i].getImg_gray().cols - 60, 0, images[i].getImg_gray().cols - (images[i].getImg_gray().cols - 60), images[i].getImg_gray().rows));
		roi1 = Scalar(255);
		Mat roi2(mask, cv::Rect(0, 0, 60, images[i].getImg_gray().rows));
		roi2 = Scalar(255);*/

		// Feature Detection
		vector <KeyPoint> tmp_keypoints;
		detector.detect(images[i].getImg_gray(), tmp_keypoints);

		cout << "Features detected in image #" << i << " : " << tmp_keypoints.size() << endl;
		// Feature Description
		Mat tmp_descriptors;
		extractor.compute(images[i].getImg_gray(), tmp_keypoints, tmp_descriptors);

		// Store keypoints and descriptors
		images[i].setImageFeatures(tmp_keypoints, tmp_descriptors);

		// Draw keypoints
		Mat tmp_img_keypoints;
		drawKeypoints(images[i].getImg_gray(), tmp_keypoints, tmp_img_keypoints, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
		images[i].setImg_Keypoint(tmp_img_keypoints);

		// Store img_keypoints
		string str;
		if (flag == 0)
		{
			str = "images/SIFT_Keypoints/Original_Image/";
		}
		else
		{
			str = "images/SIFT_Keypoints/Histogram_Equalazition/";
		}
		str.append("Image_");
		str.append(to_string(images[i].getID()));
		str.append("_Keypoints_detected_");
		str.append(to_string(tmp_keypoints.size()));
		str.append(".jpg");
		imwrite(str, tmp_img_keypoints);
	}
}
示例#7
0
int sift_feature()
{
    Mat img_1=imread("./samples/box.png",CV_LOAD_IMAGE_GRAYSCALE);//宏定义时CV_LOAD_IMAGE_GRAYSCALE=0,也就是读取灰度图像
    Mat img_2=imread("./samples/box_in_scene.png",CV_LOAD_IMAGE_GRAYSCALE);//一定要记得这里路径的斜线方向,这与Matlab里面是相反的

    if(!img_1.data || !img_2.data)//如果数据为空
    {
        cout<<"opencv error"<<endl;
        return -1;
    }
    cout<<"open right"<<endl;

    //第一步,用SIFT算子检测关键点

    SiftFeatureDetector detector;//构造函数采用内部默认的
    vector<KeyPoint> keypoints_1,keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

    detector.detect(img_1,keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
    detector.detect(img_2,keypoints_2);//同理

    //在图像中画出特征点
    Mat img_keypoints_1,img_keypoints_2;

    drawKeypoints(img_1,keypoints_1,img_keypoints_1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);//在内存中画出特征点
    drawKeypoints(img_2,keypoints_2,img_keypoints_2,Scalar::all(-1),DrawMatchesFlags::DEFAULT);

    imshow("sift_keypoints_1",img_keypoints_1);//显示特征点
    imshow("sift_keypoints_2",img_keypoints_2);

    //计算特征向量
    SiftDescriptorExtractor extractor;//定义描述子对象

    Mat descriptors_1,descriptors_2;//存放特征向量的矩阵

    extractor.compute(img_1,keypoints_1,descriptors_1);//计算特征向量
    extractor.compute(img_2,keypoints_2,descriptors_2);

    //用burte force进行匹配特征向量
    BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
    vector<DMatch>matches;
    matcher.match(descriptors_1,descriptors_2,matches);

    //绘制匹配线段
    Mat img_matches;
    drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);//将匹配出来的结果放入内存img_matches中

    //显示匹配线段
    imshow("sift_Matches",img_matches);//显示的标题为Matches
    waitKey(0);
    return 0;
}
示例#8
0
Mat compute_descriptors(Mat image, std::vector<KeyPoint> keypoints)
{
	cout << "Extracting sift descriptors..." << endl;

	SiftDescriptorExtractor extractor ;

	Mat descriptor;

	image.convertTo(image, CV_8U);

	extractor.compute(image, keypoints , descriptor);

	return descriptor;
}
示例#9
0
int main()
{
	//从文件中读入图像
	Mat img_1 = imread("class.png");
	Mat img_2 = imread("class2.png");
	//如果读入图像失败
	if (img_1.empty() || img_2.empty())
	{
		cout << "load image error" << endl;
		return -1;
	}
	//显示图像
	imshow("src image 1", img_1);
	imshow("src image 2", img_2);
	//第一步,用SIFT算子检测关键点
	SiftFeatureDetector detector;//构造函数采用内部默认的
	std::vector<KeyPoint> keypoints_1, keypoints_2;//构造2个专门由点组成的点向量用来存储特征点

	detector.detect(img_1, keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
	detector.detect(img_2, keypoints_2);//同理

	//在图像中画出特征点
	Mat img_keypoints_1, img_keypoints_2;

	drawKeypoints(img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT);//在内存中画出特征点
	drawKeypoints(img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

	imshow("sift_keypoints_1", img_keypoints_1);//显示特征点
	imshow("sift_keypoints_2", img_keypoints_2);

	//计算特征向量
	SiftDescriptorExtractor extractor;//定义描述子对象
	Mat descriptors_1, descriptors_2;//存放特征向量的矩阵

	extractor.compute(img_1, keypoints_1, descriptors_1);//计算特征向量
	extractor.compute(img_2, keypoints_2, descriptors_2);

	//用burte force进行匹配特征向量
	BruteForceMatcher<L2<float>>matcher;//定义一个burte force matcher对象
	vector<DMatch>matches;
	matcher.match(descriptors_1, descriptors_2, matches);

	//绘制匹配线段
	Mat img_matches;
	drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_matches);//将匹配出来的结果放入内存img_matches中

	//显示匹配线段
	imshow("sift_Matches", img_matches);//显示的标题为Matches
	cvWaitKey(0);
}
示例#10
0
Mat computeSifts(const string& fileName)
{
    const Mat input = cv::imread(fileName.c_str(), 0); //Load as grayscale
    if(input.empty())
        cout<<"ERROR: Image "<<fileName<<" was not read"<<endl;
    Mat descriptors;
    SiftFeatureDetector detector;
    vector<cv::KeyPoint> keypoints;
    detector.detect(input, keypoints);
    SiftDescriptorExtractor extractor;
    extractor.compute(input, keypoints, descriptors);
    // cout<<descriptors<<endl;
    return descriptors;
}
示例#11
0
/* 
*	Function : doSift
*	Description : Find sift points on the image
*	
*	path : path of the image
*	container : container for sift keypoints and their descriptor
*/
void doSift(const string &path, struct SFeatures &container)
{
	Mat img, des;
	vector<KeyPoint> keypoints;

	img = imread(path.c_str(), CV_LOAD_IMAGE_GRAYSCALE);

	SiftFeatureDetector detector;

   	detector.detect(img, keypoints);

   	SiftDescriptorExtractor extractor;

    extractor.compute(img, keypoints, des);

    container.des = des;
    container.keys = keypoints;
}
void ASiftDetector::detectAndCompute(const Mat& img, std::vector< KeyPoint >& keypoints, Mat& descriptors)
{
    keypoints.clear();
    descriptors = Mat(0, 128, CV_32F);
    for(int tl = 1; tl < 6; tl++)
    {
        double t = pow(2, 0.5*tl);
        for(int phi = 0; phi < 180; phi += 72.0/t)
        {
            std::vector<KeyPoint> kps;
            Mat desc;

            Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(t, phi, timg, mask, Ai);

#if 0
            Mat img_disp;
            bitwise_and(mask, timg, img_disp);
            namedWindow( "Skew", WINDOW_AUTOSIZE );// Create a window for display.
            imshow( "Skew", img_disp );
            waitKey(0);
#endif

            SiftFeatureDetector detector;
            detector.detect(timg, kps, mask);

            SiftDescriptorExtractor extractor;
            extractor.compute(timg, kps, desc);

            for(unsigned int i = 0; i < kps.size(); i++)
            {
                Point3f kpt(kps[i].pt.x, kps[i].pt.y, 1);
                Mat kpt_t = Ai*Mat(kpt);
                kps[i].pt.x = kpt_t.at<float>(0,0);
                kps[i].pt.y = kpt_t.at<float>(1,0);
            }
            keypoints.insert(keypoints.end(), kps.begin(), kps.end());
            descriptors.push_back(desc);
        }
    }
}
pair<vector<Point2f>, vector<Point2f> > computeMatching(Mat &img1, Mat &img2, vector<KeyPoint> &keypoints1, vector<KeyPoint> &keypoints2)
{
    SiftDescriptorExtractor extractor;
    Mat descriptors1, descriptors2;
    extractor.compute(img1, keypoints1, descriptors1);
    extractor.compute(img2, keypoints2, descriptors2);
    BruteForceMatcher<L2<float> > matcher;
    vector<DMatch> matches1_2, matches2_1;
    matcher.match(descriptors1, descriptors2, matches1_2);
    matcher.match(descriptors2, descriptors1, matches2_1);
    pair<vector<Point2f>, vector<Point2f> > matches;
    vector<DMatch> dmatchFiltrado;
    double maxDistance = 90;
    for (uint i=0; i < matches1_2.size(); i++) {
        if (matches1_2[i].distance > maxDistance) {
            continue;
        }
        pair<Point2f, Point2f> match1_2 = pair<Point2f, Point2f>(keypoints1[matches1_2[i].queryIdx].pt, keypoints2[matches1_2[i].trainIdx].pt);
        for (uint j=0; j < matches2_1.size(); j++) {
            if (matches2_1[j].distance > maxDistance) {
                continue;
            }
            pair<Point2f, Point2f> match2_1 = pair<Point2f, Point2f>(keypoints1[matches2_1[j].trainIdx].pt, keypoints2[matches2_1[j].queryIdx].pt);
            if (match1_2.first == match2_1.first && match1_2.second == match2_1.second) {
                if (dmatchFiltrado.empty() || (matches.first.back() != match1_2.first || matches.second.back() != match1_2.second)) {
                    dmatchFiltrado.push_back(matches1_2[i]);
                    matches.first.push_back(match1_2.first);
                    matches.second.push_back(match1_2.second);
                }
            }
        }
    }

    Mat img3;
    drawMatches(img1, keypoints1, img2, keypoints2, dmatchFiltrado, img3);
    imshow("Correspondencias", img3);
    waitKey();
    destroyWindow("Correspondencias");
    return matches;
}
示例#14
0
文件: sift.cpp 项目: KeiUe/cpp
// パノラマ合成
Mat panorama(Mat src1, Mat src2, int width, int height)
{
	// SIFT特徴点の検出と特徴量の計算
	Mat gray1, gray2, des1, des2;
	SiftFeatureDetector detector(2000);
	SiftDescriptorExtractor extrator;
	vector<KeyPoint> kps1, kps2;
	cvtColor(src1, gray1, CV_BGR2GRAY);
	cvtColor(src2, gray2, CV_BGR2GRAY);
	detector.detect(gray1, kps1);
	detector.detect(gray2, kps2);
	extrator.compute(gray1, kps1, des1);
	extrator.compute(gray2, kps2, des2);

	// 特徴点の対応付け
	vector<DMatch> matches;
	BruteForceMatcher< L2<float> > matcher;
	matcher.match(des1, des2, matches);
	vector<Vec2f> pts1(matches.size());
	vector<Vec2f> pts2(matches.size());

	// ホモグラフィの計算
	for (size_t i = 0; i < matches.size(); ++i){
		pts1[i][0] = kps1[matches[i].queryIdx].pt.x;
		pts1[i][1] = kps1[matches[i].queryIdx].pt.y;
		pts2[i][0] = kps2[matches[i].trainIdx].pt.x;
		pts2[i][1] = kps2[matches[i].trainIdx].pt.y;
	}
	Mat H = findHomography(pts1, pts2, CV_RANSAC);

	// ホモグラフィ行列Hを用いてパノラマ合成
	Mat dst;
	warpPerspective(src1, dst, H, Size(width, height));
	for (int y = 0; y < src1.rows; y++){
		for (int x = 0; x < src1.cols; x++){
			dst.at<Vec3b>(y, x) = src2.at<Vec3b>(y, x);
		}
	}
	return dst;
}
示例#15
0
int cv_featureDescriptor(CParamArray *pa)
{
    using namespace cv;

    // read image
    string imageFN = svar.GetString("image", "./test.png");
    Mat img = imread(imageFN);

    // extract keypoints & descriptors
    Ptr<FeatureDetector>    detector;
    SiftDescriptorExtractor extractor;

    vector<KeyPoint>        keypoints;
    Mat                     descriptors;

    detector = new SiftFeatureDetector;

    detector->detect(img, keypoints);
    extractor.compute(img, keypoints, descriptors);

    // print keypoints
    for(int i=0; i<keypoints.size(); i++) {
        KeyPoint &p = keypoints[i];

        printf("kp[%6d] x, y = %12f, %12f\n", i, p.pt.x, p.pt.y);
        printf("           size = %12f, angle = %12f\n", p.size, p.angle);
        printf("           response = %12f, octave = %3d, class_id = %4d\n", p.response, p.octave, p.class_id);
    }
    printf("\n");

    // print descriptors
    //      type: CV_MAT_TYPE, CV_32F
    printf("descriptor: \n");
    printf("    cols     = %d\n", descriptors.cols);
    printf("    rows     = %d\n", descriptors.rows);
    printf("    channels = %d\n", descriptors.channels());
    printf("    type     = %d\n", descriptors.type());

    return 0;
}
static void make_vocabulary()
{
    if(flag==1)
    {
        return ;
    }
    cout<<" MAKING VOCABULARY...."<<endl;
    for(int i=1; i<=20; i++)
    {
        cout<<" Reading File "<<i<<endl;
        stringstream ss;
        ss << path_People << "person_"<<setfill('0') << setw(3) << i <<".image.png";
        cout<<ss.str()<<endl;
        img=imread(ss.str(),0);
        Mat tempp=imread(ss.str(),1);
        //vector< vector<Point > > superpixel=make_superpixels(tempp);
        //cout<<superpixel.size()<<" Superpixel size "<<endl;
        for(int  k=0; k<1; k++)
        {
            /*   int x1=superpixel[k][0].x;
               int y1=superpixel[k][0].y;
               int x2=superpixel[k][1].x;
               int y2=superpixel[k][1].y;
               Mat newimg=Mat(x2-x1+1,y2-y1+1,0,Scalar(255,255,255));
               for(int l=2; l<superpixel[k].size(); l++)
               {
                  int x=superpixel[k][l].x;
                  int y=superpixel[k][l].y;
                  newimg.at<uchar>(x-x1,y-y1)=img.at<uchar>(x,y);
               }*/
            keypoints.clear();
            detector.detect(img,keypoints);
            detector.compute(img,keypoints,descriptor);
            features_unclustered.push_back(descriptor);
        }
    }
    cout<<"VOCABULARY BUILT...."<<endl;
    cout<<endl;
}
/**
 * @function main
 */
int main( int argc, char** argv )
{
	ros::init(argc, argv, "object_detector");
	ros::NodeHandle nh;
	
	///subscribe to camera image topic
	image_transport::ImageTransport it(nh);
	image_transport::Subscriber sub = it.subscribe((string)IMAGE_TOPIC, 1, imageCallback);
	
    ///read calibration data
    ifstream file (CALIBRATION_FILE);
    if (!file.is_open()){
        printf("ERROR: Unable to open calibration file\n");
        return 2;
    }
    H=readCalibration(file);


    
	//feature calculation of objct image
	img_object = imread( (string)DATA_FOLDER+(string)IMAGE_NAME, CV_LOAD_IMAGE_GRAYSCALE );
	//-- Step 1: Detect the keypoints using SURF Detector
	SiftFeatureDetector detector;
	detector.detect( img_object, keypoints_object );;
	//-- Step 2: Calculate descriptors (feature vectors)
	SiftDescriptorExtractor extractor;
	extractor.compute( img_object, keypoints_object, descriptors_object );
    
	
    //run service
	ros::ServiceServer service = nh.advertiseService("vision/get_plate_position", get_plate_position);
	ros::ServiceServer service1 = nh.advertiseService("vision/displayFrame",displayFrame);
	ROS_INFO("ready to detect the plate");
        
    ros::spin();
	return 0;
}
示例#18
0
int main(int argc, char* argv[])
{	

	int DICTIONARY_BUILD = 3;

if (DICTIONARY_BUILD == 1){

	//Step 1 - Obtain the set of bags of features.

	//to store the input file names
	char * filename = new char[100];		
	//to store the current input image
	Mat input;	

	//To store the keypoints that will be extracted by SIFT
	vector<KeyPoint> keypoints;
	//To store the SIFT descriptor of current image
	Mat descriptor;
	//To store all the descriptors that are extracted from all the images.
	Mat featuresUnclustered;
	//The SIFT feature extractor and descriptor
	SiftDescriptorExtractor detector;	
	
	//I select 20 (1000/50) images from 1000 images to extract feature descriptors and build the vocabulary
	int startid = 1;
	int endid = 39;
	for(int f=startid;f<=endid;f++){		
		//create the file name of an image
		sprintf(filename,".\\Release\\omocha_train\\%i.jpg",f);
		//open the file
		input = imread(filename, CV_LOAD_IMAGE_GRAYSCALE); //Load as grayscale				
		//detect feature points
		detector.detect(input, keypoints);
		//compute the descriptors for each keypoint
		detector.compute(input, keypoints,descriptor);		
		//put the all feature descriptors in a single Mat object 
		featuresUnclustered.push_back(descriptor);		
		//print the percentage
		printf("%i percent done\n",f);
	}	


	//Construct BOWKMeansTrainer
	//the number of bags
	int dictionarySize=200;
	//define Term Criteria
	TermCriteria tc(CV_TERMCRIT_ITER,100,0.001);
	//retries number
	int retries=1;
	//necessary flags
	int flags=KMEANS_PP_CENTERS;
	//Create the BoW (or BoF) trainer
	BOWKMeansTrainer bowTrainer(dictionarySize,tc,retries,flags);
	//cluster the feature vectors
	Mat dictionary=bowTrainer.cluster(featuresUnclustered);	
	//store the vocabulary
	FileStorage fs(".\\dictionary.yml", FileStorage::WRITE);
	fs << "vocabulary" << dictionary;
	fs.release();
	
}else if(DICTIONARY_BUILD == 2){
	//Step 2 - Obtain the BoF descriptor for given image/video frame. 

    //prepare BOW descriptor extractor from the dictionary    
	Mat dictionary; 
	FileStorage fs(".\\dictionary.yml", FileStorage::READ);
	fs["vocabulary"] >> dictionary;
	fs.release();	
    
	//create a nearest neighbor matcher
	Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
	//create Sift feature point extracter
	Ptr<FeatureDetector> detector(new SiftFeatureDetector());
	//create Sift descriptor extractor
	Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);	
	//create BoF (or BoW) descriptor extractor
	BOWImgDescriptorExtractor bowDE(extractor,matcher);
	//Set the dictionary with the vocabulary we created in the first step
	bowDE.setVocabulary(dictionary);

	//To store the image file name
	char * filename = new char[100];

	char * inputfile = new char[100];
	//To store the image tag name - only for save the descriptor in a file
	//char * imageTag = new char[10];

	int startid = 1;
	int endid = 39;
	for(int i = startid; i <= endid; i++)
	{

		sprintf(inputfile,".\\Release\\omocha_train\\%i.jpg", i);
		sprintf(filename, ".\\%i.yml", i);

		//open the file to write the resultant descriptor
		FileStorage fs1(filename, FileStorage::WRITE);	
	
		//read the image
		Mat img=imread(inputfile,CV_LOAD_IMAGE_GRAYSCALE);		
		//To store the keypoints that will be extracted by SIFT
		vector<KeyPoint> keypoints;		
		//Detect SIFT keypoints (or feature points)
		detector->detect(img,keypoints);
		//To store the BoW (or BoF) representation of the image
		Mat bowDescriptor;		
		//extract BoW (or BoF) descriptor from given image
		bowDE.compute(img,keypoints,bowDescriptor);

		//prepare the yml (some what similar to xml) file
		//sprintf(imageTag,"img1");			
		//write the new BoF descriptor to the file
		//fs1 << imageTag << bowDescriptor;

		fs1 << "imageData" << bowDescriptor;


		//You may use this descriptor for classifying the image.
			
		//release the file storage
		fs1.release();
	}
}else{
int main(int argc, char** argv)
{
    if( argc < 2 )
    {
        printPrompt( argv[0] );
        return -1;
    }

    initModule_nonfree();

    // Get Input Data
    ifstream file(argv[1]);
    if ( !file.is_open() )
        return false;
    
    string str;
    
        // Image Name
    getline( file, str ); getline( file, str );
    string image_name = str;
        // Cloud Name
    getline( file, str ); getline( file, str );
    string cloud_name = str;
        // width of images to be created.
    getline( file, str ); getline( file, str );
    int w = atoi(str.c_str());
        // height of images to be created
    getline( file, str ); getline( file, str );
    int h = atoi(str.c_str());
        // resolution of voxel grids
    getline( file, str ); getline( file, str );
    float r = atof(str.c_str());
        // f (distance from pinhole)
    getline( file, str ); getline( file, str );
    float f = atof(str.c_str());
        // thetax (initial rotation about X Axis of map)
    getline( file, str ); getline( file, str );
    float thetaX = atof(str.c_str());
        // thetay (initial rotation about Y Axis of map)
    getline( file, str ); getline( file, str );
    float thetaY = atof(str.c_str());
        // number of points to go to
    getline( file, str ); getline( file, str );
    float nop = atoi(str.c_str());
        // Number of divisions
    getline( file, str ); getline( file, str );
    float divs = atoi(str.c_str());
        // Number of images to return
    getline( file, str ); getline( file, str );
    int numtoreturn = atoi(str.c_str());    
        // Should we load or create photos?
    getline( file, str ); getline( file, str );
    string lorc =str.c_str();
        // Directory to look for photos
    getline( file, str ); getline( file, str );
    string dir =str.c_str();
        // Directory to look for kp and descriptors
    getline( file, str ); getline( file, str );
    string kdir =str.c_str();
        // save photos?
    getline( file, str ); getline( file, str );
    string savePhotos =str.c_str();
    
    file.close();
    // Done Getting Input Data

    map<vector<float>, Mat> imagemap;
    map<vector<float>, Mat> surfmap;
    map<vector<float>, Mat> siftmap;
    map<vector<float>, Mat> orbmap;
    map<vector<float>, Mat> fastmap;
    imagemap.clear();

    vector<KeyPoint> SurfKeypoints;
    vector<KeyPoint> SiftKeypoints;
    vector<KeyPoint> OrbKeypoints;
    vector<KeyPoint> FastKeypoints;
    Mat SurfDescriptors;
    Mat SiftDescriptors;
    Mat OrbDescriptors;
    Mat FastDescriptors;

    int minHessian = 300;

    SurfFeatureDetector SurfDetector (minHessian);
    SiftFeatureDetector SiftDetector (minHessian);
    OrbFeatureDetector OrbDetector (minHessian);
    FastFeatureDetector FastDetector (minHessian);


    SurfDescriptorExtractor SurfExtractor;
    SiftDescriptorExtractor SiftExtractor;
    OrbDescriptorExtractor OrbExtractor;

    if ( !fs::exists( dir ) || lorc == "c" )
    { // Load Point Cloud and render images
        PointCloud<PT>::Ptr cloud (new pcl::PointCloud<PT>);
        io::loadPCDFile<PT>(cloud_name, *cloud);

        Eigen::Affine3f tf = Eigen::Affine3f::Identity();
        tf.rotate (Eigen::AngleAxisf (thetaX, Eigen::Vector3f::UnitX()));
        pcl::transformPointCloud (*cloud, *cloud, tf);
        tf = Eigen::Affine3f::Identity();
        tf.rotate (Eigen::AngleAxisf (thetaY, Eigen::Vector3f::UnitY()));
        pcl::transformPointCloud (*cloud, *cloud, tf);

        // Create images from point cloud
        imagemap = render::createImages(cloud, nop, w, h, r, f);

        if (savePhotos == "y")
        {
            for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
            {
                // Create image name and storagename
                string imfn = dir + "/";
                string kpfn = kdir + "/";
                for (int j = 0; j < i->first.size(); j++)
                {
                    imfn += boost::to_string(i->first[j]) + " ";
                    kpfn += boost::to_string(i->first[j]) + " ";
                }
                imfn += ".jpg";
                imwrite(imfn, i->second);

                // Detect keypoints, add to keypoint map. Same with descriptors

                SurfDetector.detect(i->second, SurfKeypoints);
                SiftDetector.detect(i->second, SiftKeypoints);
                OrbDetector.detect(i->second, OrbKeypoints);
                FastDetector.detect(i->second, FastKeypoints);

                SurfExtractor.compute(i->second, SurfKeypoints, SurfDescriptors);
                SiftExtractor.compute(i->second, SiftKeypoints, SiftDescriptors);
                OrbExtractor.compute(i->second, OrbKeypoints, OrbDescriptors);
                SiftExtractor.compute(i->second, FastKeypoints, FastDescriptors);

                // Store KP and Descriptors in yaml file.

                kpfn += ".yml";
                FileStorage store(kpfn, cv::FileStorage::WRITE);
                write(store,"SurfKeypoints",SurfKeypoints);
                write(store,"SiftKeypoints",SiftKeypoints);
                write(store,"OrbKeypoints", OrbKeypoints);
                write(store,"FastKeypoints",FastKeypoints);
                write(store,"SurfDescriptors",SurfDescriptors);
                write(store,"SiftDescriptors",SiftDescriptors);
                write(store,"OrbDescriptors", OrbDescriptors);
                write(store,"FastDescriptors",FastDescriptors);
                store.release();

                surfmap[i->first] = SurfDescriptors;
                siftmap[i->first] = SiftDescriptors;
                orbmap[i->first]  = OrbDescriptors;
                fastmap[i->first] = FastDescriptors;
            }
        }
    } 
    else 
    { // load images from the folder dir
        // First look into the folder to get a list of filenames
        vector<fs::path> ret;
        const char * pstr = dir.c_str();
        fs::path p(pstr);
        get_all(pstr, ret);

        for (int i = 0; i < ret.size(); i++)
        {
            // Load Image via filename
            string fn = ret[i].string();
            istringstream iss(fn);
            vector<string> tokens;
            copy(istream_iterator<string>(iss), istream_iterator<string>(), back_inserter<vector<string> >(tokens));

            // Construct ID from filename
            vector<float> ID;
            for (int i = 0; i < 6; i++) // 6 because there are three location floats and three direction floats
                ID.push_back(::atof(tokens[i].c_str()));
            string imfn = dir + "/" + fn;

            // Read image and add to imagemap.
            Mat m = imread(imfn);
            imagemap[ID] = m;

            // Create Filename for loading Keypoints and descriptors
            string kpfn = kdir + "/";
            for (int j = 0; j < ID.size(); j++)
            {
                kpfn += boost::to_string(ID[j]) + " ";
            }

            kpfn = kpfn+ ".yml";
            
            // Create filestorage item to read from and add to map.
            FileStorage store(kpfn, cv::FileStorage::READ);

            FileNode n1 = store["SurfKeypoints"];
            read(n1,SurfKeypoints);
            FileNode n2 = store["SiftKeypoints"];
            read(n2,SiftKeypoints);
            FileNode n3 = store["OrbKeypoints"];
            read(n3,OrbKeypoints);
            FileNode n4 = store["FastKeypoints"];
            read(n4,FastKeypoints);
            FileNode n5 = store["SurfDescriptors"];
            read(n5,SurfDescriptors);
            FileNode n6 = store["SiftDescriptors"];
            read(n6,SiftDescriptors);
            FileNode n7 = store["OrbDescriptors"];
            read(n7,OrbDescriptors);
            FileNode n8 = store["FastDescriptors"];
            read(n8,FastDescriptors);

            store.release();

            surfmap[ID] = SurfDescriptors;
            siftmap[ID] = SiftDescriptors;
            orbmap[ID]  = OrbDescriptors;
            fastmap[ID] = FastDescriptors;
        }
    }

    TickMeter tm;
    tm.reset();
    cout << "<\n  Analyzing Images ..." << endl;

    // We have a bunch of images, now we compute their grayscale and black and white.
    map<vector<float>, Mat> gsmap;
    map<vector<float>, Mat> bwmap;
    for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
    {
        vector<float> ID = i->first;
        Mat Image = i-> second;
        GaussianBlur( Image, Image, Size(5,5), 0, 0, BORDER_DEFAULT );


        gsmap[ID] = averageImage::getPixSumFromImage(Image, divs);
        bwmap[ID] = averageImage::aboveBelow(gsmap[ID]);
    }
    Mat image = imread(image_name);
    Mat gsimage = averageImage::getPixSumFromImage(image, divs);
    Mat bwimage = averageImage::aboveBelow(gsimage);

    // cout << gsimage <<endl;
    imwrite("GS.png", gsimage);
    namedWindow("GSIMAGE (Line 319)");
    imshow("GSIMAGE (Line 319)", gsimage);
    waitKey(0);

    vector<KeyPoint> imgSurfKeypoints;
    vector<KeyPoint> imgSiftKeypoints;
    vector<KeyPoint> imgOrbKeypoints;
    vector<KeyPoint> imgFastKeypoints;
    Mat imgSurfDescriptors;
    Mat imgSiftDescriptors;
    Mat imgOrbDescriptors;
    Mat imgFastDescriptors;

    SurfDetector.detect(image, imgSurfKeypoints);
    SiftDetector.detect(image, imgSiftKeypoints);
    OrbDetector.detect(image, imgOrbKeypoints);
    FastDetector.detect(image, imgFastKeypoints);

    SurfExtractor.compute(image, imgSurfKeypoints, imgSurfDescriptors);
    SiftExtractor.compute(image, imgSiftKeypoints, imgSiftDescriptors);
    OrbExtractor.compute(image, imgOrbKeypoints, imgOrbDescriptors);
    SiftExtractor.compute(image, imgFastKeypoints, imgFastDescriptors);


    tm.start();

    cout << ">\n<\n  Comparing Images ..." << endl;

    // We have their features, now compare them!
    map<vector<float>, float> gssim; // Gray Scale Similarity
    map<vector<float>, float> bwsim; // Above Below Similarity
    map<vector<float>, float> surfsim;
    map<vector<float>, float> siftsim;
    map<vector<float>, float> orbsim;
    map<vector<float>, float> fastsim;

    for (map<vector<float>, Mat>::iterator i = gsmap.begin(); i != gsmap.end(); ++i)
    {
        vector<float> ID = i->first;
        gssim[ID] = similarities::getSimilarity(i->second, gsimage);
        bwsim[ID] = similarities::getSimilarity(bwmap[ID], bwimage); 
        surfsim[ID] = similarities::compareDescriptors(surfmap[ID], imgSurfDescriptors);
        siftsim[ID] = similarities::compareDescriptors(siftmap[ID], imgSiftDescriptors);
        orbsim[ID] = 0;//similarities::compareDescriptors(orbmap[ID], imgOrbDescriptors);
        fastsim[ID] = 0;//similarities::compareDescriptors(fastmap[ID], imgFastDescriptors);
    }

    map<vector<float>, int> top;

    bool gotone = false;
    typedef map<vector<float>, int>::iterator iter;

    // Choose the best ones!
    for (map<vector<float>, Mat>::iterator i = imagemap.begin(); i != imagemap.end(); ++i)
    {
        vector<float> ID = i->first;

        int sim = /* gssim[ID] + 0.5*bwsim[ID] + */ 5*surfsim[ID] + 0.3*siftsim[ID] + orbsim[ID] + fastsim[ID];

        // cout << surfsim[ID] << "\t";
        // cout << siftsim[ID] << "\t";
        // cout << orbsim[ID] << "\t";
        // cout << fastsim[ID] << endl;

        if (!gotone)
        {
            top[ID] = sim;
            gotone = true;
        }

        iter it = top.begin();
        iter end = top.end();
        int max_value = it->second;
        vector<float> max_ID = it->first;
        for( ; it != end; ++it) 
        {
            int current = it->second;
            if(current > max_value) 
            {
                max_value = it->second;
                max_ID = it->first;
            }
        }
        // cout << "Sim: " << sim << "\tmax_value: " << max_value << endl;
        if (top.size() < numtoreturn)
            top[ID] = sim;
        else
        {
            if (sim < max_value)
            {
                top[ID] = sim;
                top.erase(max_ID);
            }
        }
    }
    tm.stop();
        double s = tm.getTimeSec();


    cout << ">\n<\n  Writing top " << numtoreturn << " images ..." << endl;

    int count = 1;
    namedWindow("Image");
    namedWindow("Match");
    namedWindow("ImageBW");
    namedWindow("MatchBW");
    namedWindow("ImageGS");
    namedWindow("MatchGS");

    imshow("Image", image);
    imshow("ImageBW", bwimage);
    imshow("ImageGS", gsimage);


    vector<KeyPoint> currentPoints;

    for (iter i = top.begin(); i != top.end(); ++i)
    {
        vector<float> ID = i->first;

        cout << "  Score: "<< i->second << "\tGrayScale: " << gssim[ID] << "\tBW: " << bwsim[ID] << "  \tSURF: " << surfsim[ID] << "\tSIFT: " << siftsim[ID] << endl;
        string fn = "Sim_" + boost::to_string(count) + "_" + boost::to_string(i->second) + ".png";
        imwrite(fn, imagemap[ID]);
        count++;

        normalize(bwmap[ID], bwmap[ID], 0, 255, NORM_MINMAX, CV_64F);
        normalize(gsmap[ID], gsmap[ID], 0, 255, NORM_MINMAX, CV_64F);

        imshow("Match", imagemap[ID]);
        imshow("MatchBW", bwmap[ID]);
        imshow("MatchGS", gsmap[ID]);


        waitKey(0);

    }

    cout << ">\nComparisons took " << s << " seconds for " << imagemap.size() << " images (" 
        << (int) imagemap.size()/s << " images per second)." << endl;

return 0;
}
示例#20
0
/************************************************************************************************
*								extractFeaturesSIFT  									        *
************************************************************************************************/
void  CFeatureExtraction::extractFeaturesSIFT(
		const CImage			&img,
		CFeatureList			&feats,
		unsigned int			init_ID,
		unsigned int			nDesiredFeatures,
		const TImageROI			&ROI) const
{
	bool usingROI = false;
	if( ROI.xMin != 0 || ROI.xMax != 0 || ROI.yMin != 0 || ROI.yMax != 0 )
		usingROI = true;	// A ROI has been defined
	
	// ROI can not be managed properly (yet) with these method, so we extract a subimage

	// use a smart pointer so we just copy the pointer if the image is grayscale, or we'll create a new one if it was RGB:
	CImage img_grayscale(img, FAST_REF_OR_CONVERT_TO_GRAY); // Was: auxImgPtr;
	if( usingROI )
	{
		ASSERT_( ROI.xMin >= 0 && ROI.xMin < ROI.xMax && ROI.xMax < img.getWidth() && ROI.yMin >= 0 && ROI.yMax < img.getHeight() && ROI.yMin < ROI.yMax );
		CImage auximg; 
		img_grayscale.extract_patch( auximg, ROI.xMin, ROI.yMin, ROI.xMax-ROI.xMin+1, ROI.yMax-ROI.yMin+1 ); // Subimage in "auxImg"
		img_grayscale.swap(auximg);
	}

	switch( options.SIFTOptions.implementation )
	{
// --------------------------------------------------------------------------------------
//		Binary in C# -> OPTIONAL: Feature position already computed
// --------------------------------------------------------------------------------------
		
	case CSBinary:
		{
#ifdef MRPT_OS_WINDOWS
		
			char			filImg[2000],filOut[2000],filFeat[2000];
			char			paramImg[2000];

			GetTempPathA(1000,filOut);	os::strcat(filOut,1000,"temp_out.txt");			// OUTPUT FILE
			GetTempPathA(1000,filImg);	os::strcat(filImg,1000,"temp_img.bmp");			// INPUT IMAGE (BMP) FOR BINARY IN (C#)

			bool onlyDesc = feats.size() > 0 ? true : false;

			if( onlyDesc )
			{
				GetTempPathA(1000,filFeat);	os::strcat(filFeat,1000,"temp_feats.txt");		// KEYPOINTS INPUT FILE
				CMatrix		listPoints(feats.size(),2);
				for (size_t i= 0;i<feats.size();i++)
				{
					listPoints(i,0) = feats[i]->x;
					listPoints(i,1) = feats[i]->y;
				}
				listPoints.saveToTextFile( filFeat, MATRIX_FORMAT_FIXED /*Float format*/ );
			} // end if
			// -------------------------------------------
			//		CALL TO "extractSIFT.exe"
			// -------------------------------------------
			img_grayscale.saveToFile( filImg );

			// ------------------------------------
			// Version  with "CreateProcess":
			// ------------------------------------
			os::strcpy(paramImg,1000,"extractSIFT.exe -i"); os::strcat(paramImg,1000,filImg);
			os::strcat(paramImg,1000," -f"); os::strcat(paramImg,1000,filOut);
			os::strcat(paramImg,1000," -l"); os::strcat(paramImg,1000,filFeat);

			// ------------------------------------
			// Launch process
			// ------------------------------------
			bool ret = mrpt::system::launchProcess( paramImg );

			if( !ret )
				THROW_EXCEPTION( "[extractFeaturesSIFT] Could not launch external process... (extractSIFT.exe)" )

			// Process Results
			CFeatureList::iterator	itFeat = feats.begin();
			size_t	nFeats;

			CMatrix		aux;
			aux.loadFromTextFile( filOut );
			std::cout << "[computeSiftFeatures] " << aux.getRowCount() << " features." << std::endl;

			if( onlyDesc )
				nFeats = feats.size();
			else
			{
				nFeats = aux.getRowCount();
				feats.resize( nFeats );
			}

			for( size_t i = 0;
                 itFeat != feats.end();
				 i++, itFeat++)
			{
				(*itFeat)->type			= featSIFT;
				(*itFeat)->x			= usingROI ? aux(i,0) + ROI.xMin : aux(i,0);
				(*itFeat)->y			= usingROI ? aux(i,1) + ROI.yMin : aux(i,1);
				(*itFeat)->orientation	= aux(i,2);
				(*itFeat)->scale		= aux(i,3);
				(*itFeat)->ID			= init_ID + i;

				// The descriptor:
				aux.extractRow(i, (*itFeat)->descriptors.SIFT, 4);
			}
			remove(filImg);
			remove(filOut);
#else
    THROW_EXCEPTION("Unfortunately, this SIFT Implementation only runs in Windows OS, try Hess implementation");
#endif
			break;
		} // end case Binary in C#
	case VedaldiBinary:
		{
		
		// --------------------------------------------------------------------------------------
		//		Binary by Vedaldi: NOT IMPLEMENTED YET. Input in PGM format
		// --------------------------------------------------------------------------------------
#ifdef MRPT_OS_WINDOWS
		THROW_EXCEPTION("Usage of Vedaldi Binary not implemented yet, please, try another one");
#else
	    THROW_EXCEPTION("Unfortunately, this SIFT Implementation only runs in Windows OS, try Hess implementation");
#endif
		break;
		} // end case Binary by Vedaldi
// --------------------------------------------------------------------------------------
//		Binary by David Lowe
// --------------------------------------------------------------------------------------
	case LoweBinary:			// Binary by Lowe
		{
		
#ifdef MRPT_OS_WINDOWS
			char			filImg[2000],filOut[2000];
			char			paramImg[2000];

			feats.clear();

			GetTempPathA(1000,filOut);	os::strcat(filOut,1000,"temp_out.txt");			// OUTPUT FILE
			GetTempPathA(1000,filImg);	os::strcat(filImg,1000,"temp_img.pgm");			// INPUT IMAGE (PGM) FOR ORIGINAL BINARY BY LOWE

			bool valid = img_grayscale.saveToFile( filImg );
			if(!valid)
				THROW_EXCEPTION( "An error occurred when saving input image into a .pgm file");

			// CONVERT TO UNCOMPRESSED RAW PGM (TODO: Solve in a better way this issue)
			os::strcpy( paramImg,1000, format( "cmd /C gmic.exe %s -o %s -quiet", filImg, filImg ).c_str() );

			bool ret = mrpt::system::launchProcess( paramImg );

			if(!ret)
				THROW_EXCEPTION("[extractFeaturesSIFT] Could not launch external process... (gmic.exe)");

			// ------------------------------------
			// Version  with "CreateProcess":
			// ------------------------------------
			os::strcpy(paramImg,1000,"cmd /C siftWin32.exe <"); os::strcat(paramImg,1000,filImg);
			os::strcat(paramImg,1000," >"); os::strcat(paramImg,1000,filOut);

			ret = mrpt::system::launchProcess( paramImg );

			if(!ret)
				THROW_EXCEPTION("[extractFeaturesSIFT] Could not launch external process... (siftWin32.exe)");

			// ------------------------------------
			// Process Results
			// ------------------------------------
			unsigned int dLen, nFeats;
			FILE *f = os::fopen( filOut, "rt");
			if(!f)
				THROW_EXCEPTION( "Error in extract SIFT with Lowe binary, output file not found!" );
			fscanf( f,"%u %u", &nFeats, &dLen);	// Number of feats and length of the descriptor

			for( size_t i = 0; i < nFeats; i++ )
			{
				CFeaturePtr feat	= CFeature::Create();

				feat->type			= featSIFT;			// Type
				feat->ID			= init_ID + i;		// Identifier

				// Position, orientation and scale
				// IMPORTANTE NOTE: Lowe format stores first the 'y' coordinate and then the 'x' one
				float fx,fy,fo,fs;
				fscanf( f, "%f %f %f %f", &fy, &fx, &fo, &fs );

				feat->x				= usingROI ? fx + ROI.xMin : fx;
				feat->y				= usingROI ? fy + ROI.yMin : fy;
				feat->orientation	= fo;
				feat->scale			= fs;

				// The descriptor
				feat->descriptors.SIFT.resize( dLen );
				unsigned int c;
				for(unsigned int k = 0; k < dLen; k++)
				{
					fscanf( f, "%u", &c );
					feat->descriptors.SIFT[k] = (unsigned char)c;
				}

				feats.push_back( feat );
			} // end for
			os::fclose( f );
			remove(filImg);
			remove(filOut);
#else
    THROW_EXCEPTION("Unfortunately, this SIFT Implementation only runs in Windows OS, try Hess implementation");
#endif
		break;
		} // end case Binary by Lowe
// --------------------------------------------------------------------------------------
//		Hess implementation
// --------------------------------------------------------------------------------------
		case Hess:			// Implementation by Robert Hess
		{

#if !MRPT_HAS_SIFT_HESS
			THROW_EXCEPTION("Method not available since MRPT has been compiled without Hess' SIFT library")
#elif MRPT_HAS_OPENCV	// OK, we have Hess' sift:
			IplImage* init_img;
			IplImage*** gauss_pyr, *** dog_pyr;
			CvMemStorage* storage;
			CvSeq* features;
			int octvs;
			std::cout << "got to hess 1";//gb
			/* check arguments */
			ASSERT_(img_grayscale.getWidth() != 0 && img_grayscale.getHeight() != 0);
			std::cout << "got to hess 2";//gb
			/* build scale space pyramid; smallest dimension of top level is ~4 pixels */
			const IplImage* ipl_im = img_grayscale.getAs<IplImage>();
			std::cout << "got to hess 3"; //gb the program crashes in the next line 
			init_img = create_init_img( ipl_im, SIFT_IMG_DBL, SIFT_SIGMA );
			std::cout << "got to hess 3b";//gb
			octvs = log( (float)(MIN( init_img->width, init_img->height )) ) / log((float)2) - 2;
			std::cout << "got to hess 4";//gb
			gauss_pyr = build_gauss_pyr( init_img, octvs, SIFT_INTVLS, SIFT_SIGMA );
			std::cout << "got to hess 5";//gb
			dog_pyr = build_dog_pyr( gauss_pyr, octvs, SIFT_INTVLS );
			std::cout << "got to hess 6";//gb
			storage = cvCreateMemStorage( 0 );
			std::cout << "got to hess 7"; //gb
			features = scale_space_extrema( dog_pyr, octvs, SIFT_INTVLS, 
				options.SIFTOptions.threshold, // SIFT_CONTR_THR,
				options.SIFTOptions.edgeThreshold, // SIFT_CURV_THR
				storage );
			calc_feature_scales( features, SIFT_SIGMA, SIFT_INTVLS );
			if( SIFT_IMG_DBL )
				adjust_for_img_dbl( features );
			calc_feature_oris( features, gauss_pyr );
			compute_descriptors( features, gauss_pyr, SIFT_DESCR_WIDTH, SIFT_DESCR_HIST_BINS );

			/* sort features by decreasing scale and move from CvSeq to array */
			cvSeqSort( features, (CvCmpFunc)feature_cmp, NULL );

			/* get only the desired features */
			if( nDesiredFeatures > 0 )
			{
				if( nDesiredFeatures < (unsigned int)features->total )
					cvSeqPopMulti( features, NULL, features->total - nDesiredFeatures );
				else
					cout << "[Warning] Detected less features than the requested " << features->total << " vs " << nDesiredFeatures << endl;
			} // end if

			/* convert CvSeq into a FeatureList */
			convertCvSeqInCFeatureList( features, feats, init_ID, ROI );

			// clear Hess-features
			cvClearSeq( features );
			cvReleaseMemStorage( &storage );
			cvReleaseImage( &init_img );
			release_pyr( &gauss_pyr, octvs, SIFT_INTVLS + 3 );
			release_pyr( &dog_pyr, octvs, SIFT_INTVLS + 2 );
#else
			THROW_EXCEPTION("Method not available since MRPT has been compiled without OpenCV")
#endif //MRPT_HAS_OPENCV
			break;
		} // end case Hess
//***********************************************************************************************
// USING OPENCV
//***********************************************************************************************
		case OpenCV:
		{
			

#if MRPT_HAS_OPENCV && MRPT_HAS_OPENCV_NONFREE

	#if MRPT_OPENCV_VERSION_NUM >= 0x211 && MRPT_OPENCV_VERSION_NUM < 0x300 

			SiftFeatureDetector SIFTDetector(
				options.SIFTOptions.threshold, //SIFT::DetectorParams::GET_DEFAULT_THRESHOLD(),
				options.SIFTOptions.edgeThreshold //SIFT::DetectorParams::GET_DEFAULT_EDGE_THRESHOLD() );
				); 

			SiftDescriptorExtractor SIFTDescriptor;

			vector<KeyPoint> cv_feats;									// The OpenCV output feature list


			const IplImage* cGrey = img_grayscale.getAs<IplImage>();

			Mat theImg = cvarrToMat( cGrey );
			SIFTDetector.detect( theImg, cv_feats );

			Mat desc;
			SIFTDescriptor.compute( theImg, cv_feats, desc );

			//fromOpenCVToMRPT( theImg, cv_feats, desc, nDesiredFeatures, outList );
			const size_t	N			= cv_feats.size();
			unsigned int	nMax		= nDesiredFeatures != 0 && N > nDesiredFeatures ? nDesiredFeatures : N;
			const int 		offset		= (int)this->options.patchSize/2 + 1;
			const size_t	size_2		= options.patchSize/2;
			const size_t 	imgH		= img.getHeight();
			const size_t 	imgW		= img.getWidth();
			unsigned int	i			= 0;
			unsigned int	cont		= 0;
			TFeatureID		nextID		= init_ID;
			feats.clear();
			while( cont != nMax && i != N )
			{
				const int xBorderInf = (int)floor( cv_feats[i].pt.x - size_2 );
				const int xBorderSup = (int)floor( cv_feats[i].pt.x + size_2 );
				const int yBorderInf = (int)floor( cv_feats[i].pt.y - size_2 );
				const int yBorderSup = (int)floor( cv_feats[i].pt.y + size_2 );

				if( options.patchSize==0 || ( (xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0) ) )
				{
					CFeaturePtr ft		= CFeature::Create();
					ft->type			= featSIFT;
					ft->ID				= nextID++;
					ft->x				= cv_feats[i].pt.x;
					ft->y				= cv_feats[i].pt.y;
					ft->response		= cv_feats[i].response;
					ft->orientation		= cv_feats[i].angle;
					ft->scale			= cv_feats[i].size;
					ft->patchSize		= options.patchSize;														// The size of the feature patch
					ft->descriptors.SIFT.resize( 128 );
					memcpy( &(ft->descriptors.SIFT[0]), &desc.data[128*i], 128*sizeof(ft->descriptors.SIFT[0]) );	// The descriptor

					if( options.patchSize > 0 )
					{
						img.extract_patch(
							ft->patch,
							round( ft->x ) - offset,
							round( ft->y ) - offset,
							options.patchSize,
							options.patchSize );						// Image patch surronding the feature
					}
					feats.push_back( ft );
					++cont;
				}
				++i;
			}
			feats.resize( cont );
	#endif

	#if MRPT_OPENCV_VERSION_NUM >= 0x300 
			
			using namespace cv;
			vector<KeyPoint> cv_feats;

			cv::Ptr<cv::xfeatures2d::SIFT>sift = cv::xfeatures2d::SIFT::create(nDesiredFeatures,3, options.SIFTOptions.threshold, options.SIFTOptions.edgeThreshold,1.6 ); //gb
			const IplImage* cGrey = img_grayscale.getAs<IplImage>();
			Mat theImg = cvarrToMat(cGrey);
			//SIFTDetector.detect(theImg, cv_feats);
			sift->detect(theImg, cv_feats); //gb
			Mat desc;
			//SIFTDescriptor.compute(theImg, cv_feats, desc);
			sift->compute(theImg, cv_feats, desc);
			
			//fromOpenCVToMRPT( theImg, cv_feats, desc, nDesiredFeatures, outList );
			const size_t	N = cv_feats.size();
			unsigned int	nMax = nDesiredFeatures != 0 && N > nDesiredFeatures ? nDesiredFeatures : N;
			const int 		offset = (int)this->options.patchSize / 2 + 1;
			const size_t	size_2 = options.patchSize / 2;
			const size_t 	imgH = img.getHeight();
			const size_t 	imgW = img.getWidth();
			unsigned int	i = 0;
			unsigned int	cont = 0;
			TFeatureID		nextID = init_ID;
			feats.clear();


			while (cont != nMax && i != N)
			{
				const int xBorderInf = (int)floor(cv_feats[i].pt.x - size_2);
				const int xBorderSup = (int)floor(cv_feats[i].pt.x + size_2);
				const int yBorderInf = (int)floor(cv_feats[i].pt.y - size_2);
				const int yBorderSup = (int)floor(cv_feats[i].pt.y + size_2);

				if (options.patchSize == 0 || ((xBorderSup < (int)imgW) && (xBorderInf > 0) && (yBorderSup < (int)imgH) && (yBorderInf > 0)))
				{
					CFeaturePtr ft = CFeature::Create();
					ft->type = featSIFT;
					ft->ID = nextID++;
					ft->x = cv_feats[i].pt.x;
					ft->y = cv_feats[i].pt.y;
					ft->response = cv_feats[i].response;
					ft->orientation = cv_feats[i].angle;
					ft->scale = cv_feats[i].size;
					ft->patchSize = options.patchSize;														// The size of the feature patch
					ft->descriptors.SIFT.resize(128);
					memcpy(&(ft->descriptors.SIFT[0]), &desc.data[128 * i], 128 * sizeof(ft->descriptors.SIFT[0]));	// The descriptor

					if (options.patchSize > 0)
					{
						img.extract_patch(
							ft->patch,
							round(ft->x) - offset,
							round(ft->y) - offset,
							options.patchSize,
							options.patchSize);						// Image patch surronding the feature
					}
					feats.push_back(ft);
					++cont;
				}
				++i;
			}
			feats.resize(cont);


	#endif
#else
	THROW_EXCEPTION("This method requires OpenCV >= 2.1.1 with nonfree module")
#endif
			break;
		} // end case OpenCV
		return;
		default:{break;} // end default
	} // end switch
} // end extractFeaturesSIFT
int main()
{
	//step1 load image
	Mat img1=imread("alcatraz1.jpg");
	Mat img2=imread("alcatraz2.jpg");
	Mat gimg1=imread("alcatraz1.jpg",CV_LOAD_IMAGE_GRAYSCALE);
	Mat gimg2=imread("alcatraz2.jpg",CV_LOAD_IMAGE_GRAYSCALE);
	//cvtColor(img1,gimg1,CV_BGR2GRAY);
	cout<<"compute keypoint"<<endl;
	//step2 compute keypoint
	SiftFeatureDetector detector;
	vector<KeyPoint> kp1, kp2; 
	detector.detect(gimg1,kp1);
	detector.detect(gimg2,kp2);
	//step3 compute descriptor
	SiftDescriptorExtractor extractor;
	Mat descriptor1,descriptor2;
	extractor.compute(gimg1,kp1,descriptor1);
	extractor.compute(gimg2,kp2,descriptor2);
	cout<<"compute match"<<endl;
	//step4 gimg1 <-->gimg2 match
	BFMatcher matcher(NORM_L2);
	vector<DMatch> matches1,matches2,twoside_matches;
	matcher.match(descriptor1,descriptor2,matches1);
	matcher.match(descriptor2,descriptor1,matches2);
	cout<<"match end"<<endl;
	vector<DMatch>::iterator it1;
	vector<DMatch>::iterator it2;
	for(it1 = matches1.begin();it1 != matches1.end();it1++)
	{	for(it2 =matches2.begin();it2 != matches2.end();it2++)
		{
			if((*it1).queryIdx == (*it2).trainIdx && (*it2).queryIdx == (*it1).trainIdx)
			{
				twoside_matches.push_back(DMatch((*it1).queryIdx,(*it1).trainIdx,(*it1).distance));
				//break;
			}	
		}
	}
	//step5 draw twoside_matches
	Mat imgmathces;
	drawMatches(gimg1,kp1,gimg2,kp2,twoside_matches,imgmathces);
	//load matches keypoint
	int n=twoside_matches.size();
	Mat_<float> matches_kp1(3,n),matches_kp2(3,n);
//	vector<DMatch>::iterator it3;
//	for(it3 = twoside_matches.begin();it3 != twoside_matches.end();it3++)
//	{
//		matches_kp1.puch_back(kp);
//	}
	for(int i=0;i < twoside_matches.size();i++)
	{
		Point2f x1=kp1[twoside_matches[i].queryIdx].pt;
		Point2f x2=kp2[twoside_matches[i].queryIdx].pt;
		matches_kp1(0,i)=x1.x;
		matches_kp1(1,i)=x1.y;
		matches_kp1(2,i)=1;
		matches_kp2(0,i)=x2.x;
		matches_kp2(1,i)=x2.y;
		matches_kp2(2,i)=1;
	}
	cout<<"save keypoints"<<endl;
	FileStorage fs("points.yml",FileStorage::WRITE);
	fs<<"x1"<<matches_kp1;
	fs<<"x2"<<matches_kp2;
	fs.release();
        cout<<"show match"<<endl;
	namedWindow("img1",WINDOW_NORMAL);
	namedWindow("img2",WINDOW_NORMAL);
	namedWindow("imgmatch",WINDOW_NORMAL);
	imshow("img1",gimg1);
	imshow("img2",gimg2);
	imshow("imgmatch",imgmathces);
	waitKey(0);

	return 0;
}
示例#22
0
int trainData() {

    std:: string videoName="";

    int n_frames[1000];
    //create dictionary
    int dict_size=100;//***

    Mat features;
    for(int i=1; i<no_videos; i++) {


        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/trainvideos/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);
        if(!cap.isOpened())  // check if we succeeded
            return -1;

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        //create window to show image
        //namedWindow("Video",1);
        //cout<<count<<endl;
        int jump=count/N;
        int j=1;

        int u=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            ////////EXTRACT INTEREST POINTS USING SIFT////
            // vector of keypoints
            std::vector<cv::KeyPoint> keypoints;
            // Construct the SIFT feature detector object
            SiftFeatureDetector sif(0.03,10.); // threshold  //***
            //Detect interest points
            sif.detect(gray_image,keypoints);

            ////////IMSHOW THE FRAMES EXTRACTED///////////

            //copy video stream to image
            //cap>>image;
            //print image to screen
            //imshow("Video",image);


            ///////////Save the frames//////////////

            stringstream temp2;
            temp2<<j;
            std::string no2=temp2.str();
            std::string frame_name="frame"+no2+".jpg";
            imwrite(frame_name,image);


            //////////////Draw the keypoints////////////

            /*
            Mat featureImage;
            // Draw the keypoints with scale and orientation information
            drawKeypoints(image, // original image
            keypoints, // vector of keypoints
            featureImage, // the resulting image
            Scalar(255,0,255), // color of the points
            DrawMatchesFlags::DRAW_RICH_KEYPOINTS); //flag
            //std::string name="image"+i;
            imshow(frame_name, featureImage );
            */

            ////////////////////detect decriptors//////////////////

            SiftDescriptorExtractor siftExtractor;
            Mat siftDesc;
            siftExtractor.compute(gray_image,keypoints,siftDesc);
            features.push_back(siftDesc);//add the descriptors from each frame..to create one for a video

            ////////////////
            //delay 33ms //***
            //waitKey(33);

            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }

        //store number of frames per video
        n_frames[i-1]=j-1;



    }

    TermCriteria term(CV_TERMCRIT_ITER,100,0.001);//***

    //retries number ***
    int retries=1;

    int flags=KMEANS_PP_CENTERS;
    BOWKMeansTrainer bowTrainer(dict_size,term,retries,flags);
    //cluster the feature vectors
    Mat dictionary=bowTrainer.cluster(features);

    //for further process
    full_dictionary.push_back(dictionary);
    ///////////////////////////////////////////////////
    FileStorage fs("full_dictionary.yml", FileStorage::WRITE);
    fs << "vocabulary" << full_dictionary;
    fs.release();
    //Created Vocabulary

    //Calculate histograms for the train videos
    //idf_vector(full_dictionary);

    return 0;
}
示例#23
0
void call_my_code() {
    
//    Mat img1_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img2_online.jpg");
//    Mat img2_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img1_online.jpg");
    
    
    Mat img1_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img1.JPG");
    Mat img2_rgb = imread("/Users/sunyuyin/Desktop/img_stiching/img2.JPG");

    
    if (img1_rgb.empty() || img2_rgb.empty()) {
        exit(-1);
    }
    
    Mat img1, img2;
    
    cvtColor(img1_rgb, img1, CV_RGB2GRAY);
    cvtColor(img2_rgb, img2, CV_RGB2GRAY);
    
    
    SiftFeatureDetector detector;
    vector<KeyPoint> keypoints_img1, keypoints_img2;
    detector.detect(img1, keypoints_img1);
    detector.detect(img2, keypoints_img2);
    
    SiftDescriptorExtractor extractor;
    
    Mat descriptors_img1, descriptors_img2;
    
    extractor.compute(img1, keypoints_img1, descriptors_img1);
    extractor.compute(img2, keypoints_img2, descriptors_img2);
    
    FlannBasedMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors_img1, descriptors_img2, matches);
    
    double max_dist = 0.0, min_dist = numeric_limits<double>::max();
    
    for (int i = 0; i < descriptors_img1.rows; ++i) {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    
    cout << "-- Max dist: " << max_dist << endl;
    cout << "-- Min dist: " << min_dist << endl;
    
    
    vector<DMatch> good_matches;
    for (int i = 0; i < descriptors_img1.rows; ++i) {
        if (matches[i].distance < 3 * min_dist) {
            good_matches.push_back(matches[i]);
        }
    }
    
    vector<Point2f> img1_matches;
    vector<Point2f> img2_matches;
    
    for (int i = 0; i < good_matches.size(); ++i) {
        img1_matches.push_back(keypoints_img1[good_matches[i].queryIdx].pt);
        img2_matches.push_back(keypoints_img2[good_matches[i].trainIdx].pt);
    }
    
    Mat H = findHomography(img1_matches, img2_matches, CV_RANSAC);
    
    Mat result;
    
    cout << H << endl;
    
    
    warpPerspective(img1_rgb, result, H, Size(img1_rgb.cols+img2_rgb.cols,img1_rgb.rows));
    
    /*
     namedWindow("img1");
     imshow("img1", img1);
     namedWindow("result");
     imshow("result", result);
     */
    
    
    
     Mat half(result, Rect(0, 0, img2_rgb.cols, img2_rgb.rows));
     img2_rgb.copyTo(half);
     imshow("Result", result);
    
    /*
    
    Mat img_matches;
    drawMatches(img1, keypoints_img1, img2, keypoints_img2, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
    
    namedWindow("img_matches");
    imshow("img_matches", img_matches);
    
    */
    /*
     
     Mat img1_output, img2_output;
     
     drawKeypoints(img1, keypoints_img1, img1_output);
     namedWindow("Image 1 keypoints", WINDOW_AUTOSIZE);
     
     imshow("Image 1 keypoints", img1_output);
     
     
     drawKeypoints(img2, keypoints_img2, img2_output);
     namedWindow("Image 2 keypoints");
     
     imshow("Image 2 keypoints", img2_output);
     */

}
/**
 * @function detectAndDisplay return 0 if find the object,1 if not
 */
int detectAndDisplay( Mat img_frame, Mat img_object, vector<KeyPoint> keypoints_object, Mat descriptors_object,vision::platePosition::Response &res, Mat H2)
{
    int minHessian = 400;
	SiftFeatureDetector detector;
	SiftDescriptorExtractor extractor;
	std::vector<KeyPoint> keypoints_frame;
	Mat descriptors_frame;
	
	//-- Step 1: Detect the keypoints
	detector.detect( img_frame, keypoints_frame );
	
	//-- Step 2: Calculate descriptors (feature vectors)
	extractor.compute( img_frame, keypoints_frame, descriptors_frame );
	
	//-- Step 3: Matching descriptor vectors using FLANN matcher
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	printf("size: descriptor_object rows: %d\t descriptors_frame rows: %d\n",descriptors_object.rows,descriptors_frame.rows);
	if(!descriptors_frame.rows){
		printf("!!null scene descriptor\n");
		return 1;
	}
	matcher.match( descriptors_object, descriptors_frame, matches );
	//printf("matches size: %d\n",matches.size());
	

	//-- Quick calculation of max and min distances between keypoints
	double max_dist = 0; double min_dist = 1000;
	for( int i = 0; i < descriptors_object.rows; i++ )
	{ double dist = matches[i].distance;
		if( dist < min_dist ) min_dist = dist;
		if( dist > max_dist ) max_dist = dist;
		//printf("i:%d\t",i);
	}
	printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

	
	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for( int i = 0; i < descriptors_object.rows; i++ )
	{ 
		//~ if( matches[i].distance < 3*max(0.02,min_dist) )
		//~ { good_matches.push_back( matches[i]); }
		if( matches[i].distance < 250)
		{ good_matches.push_back( matches[i]); }
	}
	printf("good matches size %d\n",(int)good_matches.size());
	Mat img_matches;
	drawMatches( img_object, keypoints_object, img_frame, keypoints_frame,
			   good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
			   vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
			  
	//-- Localize the object from img_1 in img_2
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;

	for( size_t i = 0; i < good_matches.size(); i++ )
	{
		//-- Get the keypoints from the good matches
		obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
		scene.push_back( keypoints_frame[ good_matches[i].trainIdx ].pt );
	}
	if (good_matches.size()<=9){
	  printf("insufficient good matches\n");
	  imshow( window_name, img_matches );
	  waitKey(0);
	  
      return 1;
	}
	else{
		Mat H = findHomography( obj, scene, CV_RANSAC );
		printf("lala\n");
		
		//-- Get the corners from the image_1 ( the object to be "detected" )
		std::vector<Point2f> obj_corners(4);
		obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
		obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
		std::vector<Point2f> scene_corners(4);
		
		perspectiveTransform( obj_corners, scene_corners, H);
		
		
		//-- Draw lines between the corners (the mapped object in the scene - image_2 )
		Point2f offset( (float)img_object.cols, 0);
		printf("image object size: row %d, col %d\n",img_object.rows,img_object.cols);
        printf("image scene size: row %d, col %d\n",img_frame.rows,img_frame.cols);
		line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
		line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
		line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
		line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
        
        string point1 = "p1: "+tostr(scene_corners[0].x)+" "+tostr(scene_corners[0].y);
        string point2 = "p2: "+tostr(scene_corners[1].x)+" "+tostr(scene_corners[1].y);
        string point3 = "p3: "+tostr(scene_corners[2].x)+" "+tostr(scene_corners[2].y);
        string point4 = "p4: "+tostr(scene_corners[3].x)+" "+tostr(scene_corners[3].y);
        putText(img_matches,point1,scene_corners[0] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
        putText(img_matches,point2,scene_corners[1] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
		putText(img_matches,point3,scene_corners[2] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
        putText(img_matches,point4,scene_corners[3] + offset, FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, Scalar(255,0,255),2);
        
        printf("point:\n%s\n%s\n%s\n%s\n",point1.c_str(),point2.c_str(),point3.c_str(),point4.c_str());
        
		imshow( window_name, img_matches );
        waitKey(0);
        
        
        //to find the plate in real position
        //to calibrate change pr_r* and Point2f pr_p*
        //position_reference real
        Point2f pr_r1(0.852991670452,0.166859215521),pr_r2(0.870399996545, -0.185887708082),pr_r3( 0.588867961436,-0.170358598533),pr_r4(0.597280405865,0.178723491525);
        //position_reference pixel//set to plate
        Point2f pr_p1(247.478,181.249),pr_p2(420.907,181.566),pr_p3(429.926,313.306),pr_p4(230.793,306.769);
    
        
        std::vector<Point2f> ref_pixel_position;//known
        std::vector<Point2f> ref_real_position;//to measure by moving baxter hand to the poit
        //std::vector<Point2f> plate_pixel_position;//=scene corners known
        std::vector<Point2f> plate_real_position;//to find
        
        ref_real_position.push_back(pr_r1);
        ref_real_position.push_back(pr_r2);
        ref_real_position.push_back(pr_r3);
        ref_real_position.push_back(pr_r4);
        
        ref_pixel_position.push_back(pr_p1);
        ref_pixel_position.push_back(pr_p2);
        ref_pixel_position.push_back(pr_p3);
        ref_pixel_position.push_back(pr_p4);
        
        
        Mat H2 = findHomography( ref_pixel_position, ref_real_position);
        
        perspectiveTransform( scene_corners, plate_real_position, H2);
        
        //print out
        string plate_point1 = "p1: "+tostr(plate_real_position[0].x)+" "+tostr(plate_real_position[0].y);
            string plate_point2 = "p2: "+tostr(plate_real_position[1].x)+" "+tostr(plate_real_position[1].y);
            string plate_point3 = "p3: "+tostr(plate_real_position[2].x)+" "+tostr(plate_real_position[2].y);
            string plate_point4 = "p4: "+tostr(plate_real_position[3].x)+" "+tostr(plate_real_position[3].y);
            printf("plate real postion:\n%s\n%s\n%s\n%s\n",plate_point1.c_str(),plate_point2.c_str(),plate_point3.c_str(),plate_point4.c_str());
        
        
        //write to service response
        res.p1[0] = plate_real_position[0].x;
        res.p1[1] = plate_real_position[0].y;
        res.p2[0] = plate_real_position[1].x;
        res.p2[1] = plate_real_position[1].y;
        res.p3[0] = plate_real_position[2].x;
        res.p3[1] = plate_real_position[2].y;
        res.p4[0] = plate_real_position[3].x;
        res.p4[1] = plate_real_position[3].y;
        return 0;
    }
}
int main(int argc, char* argv[])
{
    char *filename = new char[100];

    vector<string> validFormats;
    validFormats.push_back("png");
    validFormats.push_back("ppm");
    validFormats.push_back("jpg");
    validFormats.push_back("gif");
    validFormats.push_back("bmp");
    validFormats.push_back("tiff");

    int minHessian = 400; //Hessian Threshold

    Mat input;

    //To store the keypoints that will be extracted by SIFT
    vector<KeyPoint> keypoints;

    //To store the SIFT descriptor of current image
    Mat descriptor;

    //To store all the descriptors that are extracted from all the images.
    Mat featuresUnclustered;

    //The SIFT feature extractor and descriptor
    SiftDescriptorExtractor detector;

    DIR *dir;
    struct dirent *ent;

    if((dir = opendir(argv[1])) != NULL)
    {
        while((ent = readdir(dir)) != NULL)
        {
            if(ent->d_type == DT_REG)
            {
                string fullname(ent->d_name);
                int lastindex = fullname.find_last_of(".");
                string format = fullname.substr(lastindex + 1, fullname.length() - 1);

                if(find(validFormats.begin(), validFormats.end(), format) != validFormats.end())
                {
                    sprintf(filename, "%s/%s",argv[1], ent->d_name);
                    printf("%s\n", filename);
                    input = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
                    detector.detect(input, keypoints);
                    detector.compute(input, keypoints, descriptor);
                    featuresUnclustered.push_back(descriptor);
                }
            }
        }
        closedir(dir);
    }
    else
    {
        perror("");
        return EXIT_FAILURE;
    }

    int dictionarySize = 200;
    TermCriteria tc(CV_TERMCRIT_ITER, 100, 0.001);
    int retries = 1;
    int flags = KMEANS_RANDOM_CENTERS;

    BOWKMeansTrainer bowTrainer(dictionarySize,tc,retries,flags);
    //cout << "I'm here too\n";

    Mat dictionary = bowTrainer.cluster(featuresUnclustered);

    sprintf(filename, "%s/dictionary.yml", argv[2]);
    FileStorage fs(filename, FileStorage::WRITE);
    fs << "vocabulary" << dictionary;
    fs.release();

    //create a nearest neighbor matcher
    Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);

    //create Sift feature point extracter
    Ptr<FeatureDetector> siftdetector(new SiftFeatureDetector());

    //create Sift descriptor extractor
    Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);

    //create BoF (or BoW) descriptor extractor
    BOWImgDescriptorExtractor bowDE(extractor,matcher);

    //Set the dictionary with the vocabulary we created in the first step
    bowDE.setVocabulary(dictionary);

    //To store the image file name
    char *filename2 = new char[100];

    //To store the image tag name - only for save the descriptor in a file
    char *imageTag = new char[100];


    int i = 1;
    if((dir = opendir(argv[1])) != NULL)
    {
        while((ent = readdir(dir)) != NULL)
        {
            if(ent->d_type == DT_REG)
            {
                sprintf(filename, "%s/%s",argv[1], ent->d_name);

                string fullname(ent->d_name);
                int lastindex = fullname.find_last_of(".");
                string format = fullname.substr(lastindex + 1, fullname.length() - 1);

                if(find(validFormats.begin(), validFormats.end(), format) != validFormats.end())
                {
                    string rawname = fullname.substr(0, lastindex);
                    string complete = rawname + "*" + format;
                    // printf("Complete filename: %s\n", complete.c_str());

                    Mat img = imread(filename,CV_LOAD_IMAGE_GRAYSCALE);

                    vector<KeyPoint> keypoints;
                    siftdetector->detect(img,keypoints);

                    Mat bowDescriptor;
                    bowDE.compute(img,keypoints,bowDescriptor);

                    sprintf(filename2, "%s/siftdescriptors/%s.yml", argv[2], complete.c_str());

                    FileStorage fs1(filename2, FileStorage::WRITE);
                    printf("%s\n", filename2);

                    fs1 << rawname.c_str() << bowDescriptor;
                    fs1.release();
                }
            }
        }
        closedir(dir);
    }
    else
    {
        perror("");
        return EXIT_FAILURE;
    }
}
示例#26
0
int main(int argc,char** argv){

Mat image1,image2;





const char* source_window = "Source image";

 /// Load images
 image1 = imread( argv[1], 1 );
 image2 = imread( argv[2], 1 );



  if( argc != 3 || !image1.data || !image2.data)
    {
      printf( "No image data \n" );
      return 1;
    }


    int cols=image1.cols;
    int rows=image1.rows;
   //   cout<<"\ntaille de la matrice:" <<image1.size();
  //  cout<<"\ntype de la matrice: \n" << image1.type();
  //  cout<<"\nflags" << image1.flags;
  //  cout<<"\ndims" << image1.dims;
    cout<<"\nrows" << image1.rows;
    cout<<"\ncols" << image1.cols;
  //  Point pt = Point(1,2);
    
  //  cout<<"\nnombre de chanels: " << image1.channels();

  //  cout<< "\npoints 1 1 " << (int)image1.at<cv::Vec3b>(0,1)[1];
    
    /*
    for(int i=0;i<cols;i++){
	for(int j=0;j<rows;j++){
		image1.at<cv::Vec3b>(i,j)[0]=0;
		image1.at<cv::Vec3b>(i,j)[1]=0;
		image1.at<cv::Vec3b>(i,j)[2]=0;
	}
    }
    */

    cout<< "\nmais que se passe-t'il?";

 // cout<<"\nimage1" <<  image1; 

 /// vector of keypoints 
  vector<KeyPoint> keypoints1,keypoints2;



///Construct the SURF feature detector object
  SiftFeatureDetector sift;

  sift.detect(image1,keypoints1);
  sift.detect(image2,keypoints2);

  namedWindow( "Image 1", CV_WINDOW_AUTOSIZE );
  imshow( "Image 1", image1 );
  namedWindow( "Image 2", CV_WINDOW_AUTOSIZE );
  imshow( "Image 2", image2 );
  //afficher les coordonées des points des keypoints
	/*for(int i=0;i<keypoints1.size();i++){
        cout<<"\n\nkeypoints number" << i <<"\n";
	cout<<"\nkeypoints1" <<  keypoints1[i].pt; 
  	cout<<"\nkeypoints1x " <<  keypoints1[i].pt.x; 
	cout<<"\nkeypoints1y " <<  keypoints1[i].pt.y; 
         
	}*/


  /*Mat imcopy;
  image1.copyTo(imcopy);
  for(int i=0;i<keypoints1.size();i++){
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[0]=0;
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[1]=0;
     imcopy.at<cv::Vec3b>(keypoints1[i].pt.y,keypoints1[i].pt.x)[2]=255;
  }
  namedWindow( "Image copy", CV_WINDOW_AUTOSIZE );
  imshow( "Image copy",  imcopy );
  */

 
  cout << "\ntaille du vecteur de keypoints: " << keypoints1.size(); 

  
  SiftDescriptorExtractor siftDesc;
  
  Mat descriptors1,descriptors2;
  siftDesc.compute(image1,keypoints1,descriptors1);
  siftDesc.compute(image2,keypoints2,descriptors2);
  
   // Construction of the matcher
BruteForceMatcher<L2<float> > matcher;

// Match the two image descriptors
vector<DMatch> matches;
matcher.match(descriptors1,descriptors2, matches);

nth_element(matches.begin(),    // initial position
          matches.begin()+24, // position of the sorted element
          matches.end());     // end position
      // remove all elements after the 25th
	//display the element attributs
	//cout<< "\nmatches  " <<  matches;
	
	//afficher les matches


	for(int i=0;i<matches.size();i++){
//affichage des attributs
/*		cout<< "\n\npoint num " <<  i;		
		cout<< "\nimgIdx  " <<  matches[i].imgIdx ;	
		cout<< "\nqueryIdx   " <<  matches[i].queryIdx;
		cout<< "\ntrainIdx   " <<  matches[i].trainIdx;
		cout<< "\ndistance   " <<  matches[i].distance;
*/
                
/*
		while(matches[i].distance >100  && i<matches.size()){
			cout << "\ni= " << i;
			matches.erase(matches.begin()+i, matches.begin()+i+1);
		}
           */     
                
	}
        

for(int i=0;i<matches.size();i++){
cout<< "\nOn relie le point de coordonee x1= " << keypoints1[matches[i].queryIdx].pt.x;
		cout<< "\ny1= " << keypoints1[matches[i].queryIdx].pt.y;

		cout<< "\nAvec le point de coordonne x2= " << keypoints2[matches[i].trainIdx].pt.x;
 		cout<< "\ny2= " << keypoints2[matches[i].trainIdx].pt.y;

}
	



      cout << '\n' << "nombre de correspondances:" << matches.size() << '\n';  
	
      
      //matches.erase(matches.begin(), matches.end());
      //keypoints1.erase(keypoints1.begin(), keypoints1.end());
      //keypoints2.erase(keypoints2.begin(), keypoints2.end());
      //matches.erase(matches.begin(), matches.begin()+1600);



Mat imageMatches;
Mat matchesMask;
drawMatches(
  image1,keypoints1, // 1st image and its keypoints
  image2,keypoints2, // 2nd image and its keypoints
  matches,            // the matches
  imageMatches,      // the image produced
  Scalar::all(-1),   // color of the lines
  Scalar(255,255,255) //color of the keypoints
  );
  namedWindow( "Matches SIFT", CV_WINDOW_AUTOSIZE );
  imshow( "Matches SIFT", imageMatches );
  imwrite("resultat.png", imageMatches);

  /*
  drawKeypoints(src,keypoints1,dst,cv::Scalar(255,255,255));
  cout << '\n' << keypoints1.size() << '\n';
  imshow( "Image 1", dst );
  
  imwrite("resultat.png", dst);
  */


  waitKey(0);
  
  return 0;








}
//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main()
{
	//【0】改变console字体颜色
	system("color 5F"); 

	ShowHelpText();

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图",trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测SIFT关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescription;
	SiftFeatureDetector featureDetector;
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SiftDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescription);

	// 【3】进行基于描述符的暴力匹配
	BFMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescription);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while(char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		double time0 = static_cast<double>(getTickCount( ));//记录起始时间
		Mat captureImage, captureImage_gray;
		cap >> captureImage;//采集视频到testImage中
		if(captureImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(captureImage, captureImage_gray, CV_BGR2GRAY);

		//<3>检测SURF关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(captureImage_gray, test_keyPoint);
		featureExtractor.compute(captureImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for(unsigned int i = 0; i < matches.size(); i++)
		{
			if(matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(captureImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}
void FeatureMatching(const Mat& img_1, 
				   const Mat& img_2, 
				   vector<KeyPoint>& keypts1,
				   vector<KeyPoint>& keypts2,
				   vector<KeyPoint>& keypts1_good,
				   vector<KeyPoint>& keypts2_good,
				   	vector<DMatch>* matches,
					int method)
{
	
	Mat descriptors_1, descriptors_2;
	

	if(method == 1) // SURF descriptor
	{
		double minHessian = 400;
		SurfFeatureDetector detector( minHessian);

		detector.detect( img_1,keypts1);
		detector.detect( img_2, keypts2);

		//-- Step 2: Calculate descriptors (feature vectors)
		 SurfDescriptorExtractor extractor;


		extractor.compute( img_1,keypts1, descriptors_1 );
		extractor.compute( img_2, keypts2, descriptors_2 );


		


		//-- Draw only "good" matches
		/*Mat img_matches;
		drawMatches( img_1, keypts1, img_2, keypts2,
						good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
						vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );		
			//-- Show detected matches
		imshow( "Feature Matches", img_matches );
		waitKey(0);
		destroyWindow("Feature Matches");*/

	}
	if(method == 2) // BRIEF descriptor
	{
		Ptr<FeatureDetector> detector = FeatureDetector::create("ORB"); //"BRISK"
		detector->detect(img_1,keypts1);
		detector->detect(img_2,keypts2);

		Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("ORB");
		extractor->create("ORB");
		extractor->compute(img_1,keypts1, descriptors_1);
		extractor->compute(img_2,keypts2, descriptors_2);


	}
	if(method == 3) // SIFT descriptor
	{
		SiftFeatureDetector detector;
		detector.detect( img_1,keypts1);
		detector.detect( img_2, keypts2);

		//-- Step 2: Calculate descriptors (feature vectors)
		SiftDescriptorExtractor extractor;

		
		extractor.compute( img_1,keypts1, descriptors_1 );
		extractor.compute( img_2, keypts2, descriptors_2 );

	}
	if(method == 4) // KAZE descriptor
	{
		/*KAZEOptions options;
		options.img_width = img_1.cols;
		options.img_height = img_1.rows;
		KAZE evolution1(options);

		evolution1.Create_Nonlinear_Scale_Space(img_1);
		evolution1.Feature_Detection(keypts1);
		evolution1.Feature_Description(keypts1,descriptors_1);

		options.img_width = img_2.cols;
		options.img_height = img_2.rows;
		KAZE evolution2(options);

		evolution2.Create_Nonlinear_Scale_Space(img_2);
		evolution2.Feature_Detection(keypts2);
		evolution2.Feature_Description(keypts2,descriptors_2);*/
	}

	//-- Step 3: Matching descriptor vectors using BF matcher
	BFMatcher matcher(NORM_L2,true);
	std::vector< DMatch > matches_;
	if (matches == NULL) {
		matches = &matches_;
	}
	matcher.match( descriptors_1, descriptors_2, *matches ); // Match the feature points

	double max_dist = 0; double min_dist = 1000.0;
	//-- Quick calculation of max and min distances between keypoints
	for(unsigned int i = 0; i < matches->size(); i++ )
	{ 
		double dist = (*matches)[i].distance;
		if( dist < min_dist ) min_dist = dist;
		if( dist > max_dist ) max_dist = dist;
	}
	std::vector< DMatch > good_matches;
	vector<KeyPoint> imgpts1_good,imgpts2_good;

	if (min_dist <= 0) {
		min_dist = 10.0;
	}

	double cutoff = 4.0*min_dist;//4.0*min_dist;
	std::set<int> existing_trainIdx;
	for(unsigned int i = 0; i < matches->size(); i++ )
	{ 
		if ((*matches)[i].trainIdx <= 0) {
			(*matches)[i].trainIdx = (*matches)[i].imgIdx;
		}

		if( existing_trainIdx.find((*matches)[i].trainIdx) == existing_trainIdx.end() && 
			(*matches)[i].trainIdx >= 0 && (*matches)[i].trainIdx < (int)(keypts2.size()) &&
			(*matches)[i].distance > 0.0 && (*matches)[i].distance < cutoff ) 
		{
			good_matches.push_back( (*matches)[i]);
			keypts1_good.push_back(keypts1[(*matches)[i].queryIdx]);
			keypts2_good.push_back(keypts2[(*matches)[i].trainIdx]);
			existing_trainIdx.insert((*matches)[i].trainIdx);
		}
	}


		
}
示例#29
0
void Detect( Mat& img_scene ){
	LOGI("starting object detection");
	detector1.detect( img_scene, keypoints_scene );
	LOGI("Keypoints detected");

	extractor.compute( img_scene, keypoints_scene, descriptors_scene );
	LOGI("Descriptors extracted");

	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	matcher.match( descriptors_source, descriptors_scene, matches );
	LOGI("Matching done");

	//-- Quick calculation of max and min distances between keypoints
	double min_dist=1000, max_dist;
	for( int i = 0; i < descriptors_source.rows; i++ )
	{ double dist = matches[i].distance;
	if( dist < min_dist ) min_dist = dist;
	if( dist > max_dist ) max_dist = dist;
	}

	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;

	for( int i = 0; i < descriptors_source.rows; i++ )
	{ if( matches[i].distance <= 4*min_dist )
	{ good_matches.push_back( matches[i]); }
	}

	// GEOM FILTER

	good_matches.clear();
	vector<uchar> inliers;
	vector<Point2f> pts1, pts2;
	for (int i = 0; i < matches.size(); i++) {
		pts1.push_back(keypoints_source[matches[i].queryIdx].pt);
		pts2.push_back(keypoints_scene[matches[i].trainIdx].pt);
	}
	Mat F = findFundamentalMat(Mat(pts1), Mat(pts2),
			FM_RANSAC, 3, 0.99, inliers);
	for (int i = 0; i < inliers.size(); i++) {
		if ( (int)inliers[i] ) {
			good_matches.push_back(matches[i]);
		}
	}

	//-- Localize the object
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;

	for( int i = 0; i < good_matches.size(); i++ )
	{
		//-- Get the keypoints from the good matches
		obj.push_back( keypoints_source[ good_matches[i].queryIdx ].pt );
		scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
	}

	LOGI("Point Correspondence done");

	Mat img_matches;
	Mat img_object = imread("/sdcard/charminarAR/obj.jpg");
	drawMatches( img_object, keypoints_source, img_scene, keypoints_scene,
			good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
			vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
	imwrite("/sdcard/charminarAR/matches2.jpg", img_matches);
	LOGI("saved matches");

	prev_scene_points = source_points;
	HomographicTransformation(  obj, scene );
	points[1] = scene;
}
/*void normalize(Mat* srcMat, Mat* dstMat) {

	//cout<<"START#";
	int nRows = srcMat->rows;
	int nCols = srcMat->cols;

	vector<float> maxValues(nCols), minValues(nCols);
	srcMat->row(0).copyTo(maxValues);
	srcMat->row(0).copyTo(minValues);
	for(int col = 0; col < nCols; col++) {
		float max = maxValues[col];
		float min = minValues[col];
		float temp;
		for(int row = 0; row < nRows; row++) {
			temp = dstMat->at<float>(row,col);
			if(temp > max)
				max = temp;
			if(temp < min)
				min = temp;
		}
		maxValues[col] = max;
		minValues[col] = min;
	}

	for(int row = 0; row < nRows; row++) {
		for(int col = 0; col < nCols; col++) {
			dstMat->at<float>(row,col) = float(srcMat->at<float>(row,col) - minValues[col])/float(maxValues[col] - minValues[col]);
		}
	}
	for(int col = 0; col < nCols; col++) {
		cout<<maxValues[col]<<" ";
	}
	cout<<"\n";
	for(int col = 0; col < nCols; col++) {
			cout<<minValues[col]<<" ";
		}
	cout<<"===================================\n";
	//cout<<"END#";
}*/
int main()
{

	// parameters to set
	int num_classes = 10;
	int PCA_FLAG = 0;					// set this to use PCA
	int dimensionToReduceTo = 45;		// PCA good value 45,15

	// output:
	// 		SIFT_PCA_NaiveBayes_feature_train.txt
	// 		SIFT_PCA_NaiveBayes_label_train.txt
	// 		SIFT_PCA_NaiveBayes_feature_test.txt
	//      SIFT_PCA_NaiveBayes_label_test.txt
	// 		sift_pca_bagOfwords_results.txt
	if(PCA_FLAG==0)
		dimensionToReduceTo = 128;

	// Read parameters
	string raw_data_location;
	int num_training_samples_class;
	int num_testing_samples_class;

	vector<string> feature_train_image_names;
	vector<string> feature_test_image_names;

	ifstream fin;
	fin.open("dataDescription.txt");
	if(fin){
		string temp;
		getline(fin,raw_data_location);
		getline(fin,temp);
		num_training_samples_class = atoi(temp.c_str());
		getline(fin,temp);
		num_testing_samples_class = atoi(temp.c_str());
		fin.close();
	}
	else
	{
		cout<<"Unable to open dataDesciption.txt\n";
		exit(1);
	}

	// Make a list of all valid class names
	string class_name_array[num_classes];
	fin.open("feature_class_names.txt");
	string temp;
	if(fin){

		vector<string> validClassNames;
		while(getline(fin,temp)){
			temp = temp.substr(0, temp.find("\t"));
			validClassNames.push_back(temp);
		}
		fin.close();

		if( num_classes > validClassNames.size() ){
			cout<<"\nWe do not enough classes that required number of samples. \nPlease reduce the"
					"number of training and/or test samples you want to use";
		}
		else {
			for(int i = 0; i < num_classes; i++) {
				class_name_array[i] = validClassNames[i];
			}
			fin.open("feature_train_image_names.txt");
			if(fin){
				string temp;
				while(getline(fin,temp)){
					feature_train_image_names.push_back(temp);
				}
				fin.close();

			}
			else
			{
				cout<<"Unable to open feature_train_image_names.txt \nPlease run randomDataSubSampler.cpp first\n";
				exit(1);
			}
			fin.open("feature_test_image_names.txt");
			if(fin){
				string temp;
				while(getline(fin,temp)){
					feature_test_image_names.push_back(temp);
				}
				fin.close();

			}
			else
			{
				cout<<"Unable to open feature_test_image_names.txt \nPlease run randomDataSubSampler.cpp first\n";
				exit(1);
			}
		}
	}
	else
	{
		cout<<"Unable to open feature_class_names.txt. \nPlease run randomDataSubSampler.cpp first\n";
		exit(1);
	}

	// declare space to store SIFT features in 128X(total number of keypoints)
	//vector< vector<double> > sift_feature_matrix;
	Mat sift_feature_matrix;
	// store the number of keypoints in each image
	Mat_<int> num_keypoints_matrix(num_classes,num_training_samples_class);

	// iterate over each class one by one
	int cur_class = 0;
	int cum_image_num = 0;
	int labels_train[num_classes*num_training_samples_class];
	for(cur_class = 0; cur_class < num_classes; cur_class++) {

		string cur_class_raw_data_location = raw_data_location + class_name_array[cur_class] + "/";

		for(int cur_image_num = 0; cur_image_num < num_training_samples_class; cum_image_num++, cur_image_num++) {

			string cur_image_location = cur_class_raw_data_location + feature_train_image_names[cum_image_num];
		//	cout<<cur_image_location<<"\t";
			Mat cur_image = imread(cur_image_location,0);
		/*	imshow("curIMage",cur_image);
			waitKey(0);*/
			SiftFeatureDetector detector;
			vector<cv::KeyPoint> image_keypoints;
			detector.detect(cur_image, image_keypoints);
		/*	int a = image_keypoints.size();
			cout<<a<<"\t";*/
			Mat tempImg;
		/*	drawKeypoints(cur_image, image_keypoints, tempImg);
			imshow("img",tempImg);
			waitKey(0);*/

			num_keypoints_matrix[cur_class][cur_image_num] = image_keypoints.size();
//cout<<num_keypoints_matrix[cur_class][cur_image_num]<<"\n";
			// Calculate descriptors: For each of the key points
			// obtain the features describing the vicinity of the
			// the key points. This will be a 128 dimensional vector
			// at each key point

			SiftDescriptorExtractor extractor;
			Mat kepoint_descriptors;
			extractor.compute( cur_image, image_keypoints, kepoint_descriptors );
			sift_feature_matrix.push_back(kepoint_descriptors);

			labels_train[cum_image_num] = cur_class;
		}
	}

	// PCA to reduce dimensionality from 128 features to dimensionToReduceTo
	Mat_<float> pcaSIFT_feature_matrix;
	PCA pca(sift_feature_matrix, Mat(), CV_PCA_DATA_AS_ROW, dimensionToReduceTo);
	if(PCA_FLAG==1){
		int reducedDimension = dimensionToReduceTo;
		Size size_sift_feature_matrix = sift_feature_matrix.size();
		Mat_<float> projected(size_sift_feature_matrix.height,reducedDimension);
		pca.project(sift_feature_matrix,projected);
		projected.convertTo(pcaSIFT_feature_matrix,CV_32F);
	}
	else
	{
		pcaSIFT_feature_matrix = sift_feature_matrix;
	}

	// number of key points in each class
	vector<int> training_totalKeyPoints_class(num_classes,0);
	for(cur_class = 0; cur_class < num_classes; cur_class++) {
		int sum = 0;
		for(int imgNo = 0; imgNo < num_training_samples_class; imgNo++) {
			training_totalKeyPoints_class[cur_class] = training_totalKeyPoints_class[cur_class] + num_keypoints_matrix.at<int>(cur_class,imgNo);
		}
	}


	// ===============================================================
	// Read Test Images
	// ===============================================================

	Mat_<int> testing_num_keypoints_matrix(num_classes,num_testing_samples_class);
	Mat testing_sift_feature_matrix;
	int cum_image_index = 0;
	int testLabels[num_classes*num_testing_samples_class];
	for(cur_class = 0; cur_class < num_classes; cur_class++) {

		string cur_class_raw_data_location = raw_data_location + "/" + class_name_array[cur_class] + "/";

		//read image of the testing data of the current_class one at a time

		for(int cur_image_num = 0; cur_image_num < num_testing_samples_class; cum_image_index++,cur_image_num++) {
			string cur_image_location = cur_class_raw_data_location + feature_test_image_names[cum_image_index];

			Mat cur_image = imread(cur_image_location,0);

			SiftFeatureDetector detector;
			vector<cv::KeyPoint> image_keypoints;
			detector.detect(cur_image, image_keypoints);

			testing_num_keypoints_matrix[cur_class][cur_image_num] = image_keypoints.size();

			// Calculate descriptors: For each of the key points
			// obtain the features describing the vicinity of the
			// the key points. This will be a 128 dimensional vector
			// at each key point

			SiftDescriptorExtractor extractor;
			Mat kepoint_descriptors;
			extractor.compute( cur_image, image_keypoints, kepoint_descriptors );
			testing_sift_feature_matrix.push_back(kepoint_descriptors);
			testLabels[cum_image_index]=cur_class;
		}
	}

	// Project the test image SIFT feature to the PCA reduced
	// dimension plane
	Mat_<float> testing_pcaSIFT_feature_matrix;
	if(PCA_FLAG==1){
		Size size_testing_sift_feature_matrix = testing_sift_feature_matrix.size();
		Mat_<float> testing_projected(size_testing_sift_feature_matrix.height,dimensionToReduceTo);
		pca.project(testing_sift_feature_matrix,testing_projected);
		testing_projected.convertTo(testing_pcaSIFT_feature_matrix,CV_32F);
	}
	else{
		testing_pcaSIFT_feature_matrix = testing_sift_feature_matrix;
	}



	Size train_dimension = pcaSIFT_feature_matrix.size();
	Size test_dimension  = testing_pcaSIFT_feature_matrix.size();
	ofstream fout;
	// Write to file
	fout.open("SIFT_PCA_NaiveBayes_feature_test.txt");

	for(int i = 0; i < test_dimension.height;i++){
		for(int j = 0; j < test_dimension.width; j++){
			fout<<testing_pcaSIFT_feature_matrix.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.clear();
	fout.close();


	fout.open("SIFT_PCA_NaiveBayes_label_test.txt");
	fout.clear();
	for(int i = 0; i < num_testing_samples_class*num_classes;i++){
		fout<<testLabels[i]<<"\n";
	}
	fout.close();

	fout.open("SIFT_PCA_NaiveBayes_feature_train.txt");
	fout.clear();
	for(int i = 0; i < train_dimension.height;i++){
		for(int j = 0; j < train_dimension.width; j++){
			fout<<pcaSIFT_feature_matrix.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.close();

	fout.open("SIFT_PCA_NaiveBayes_label_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes; i++) {
		fout<<labels_train[i]<<"\n";
	}
	fout.close();

	fout.open("SIFT_PCA_NaiveBayes_label_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes; i++) {
		fout<<labels_train[i]<<"\n";
	}
	fout.close();



	fout.open("SIFT_PCA_NaiveBayes_train_keypoints_matrix.txt");
	fout.clear();

	for(int i = 0; i < num_classes;i++){
		for(int j = 0; j < num_training_samples_class; j++){
			fout<<num_keypoints_matrix.at<int>(i, j)<<" ";
		}
		fout<<"\n";
	}

	fout.close();


	fout.open("SIFT_PCA_NaiveBayes_test_keypoints_matrix.txt");
	fout.clear();

	for(int i = 0; i < num_classes;i++){
		for(int j = 0; j < num_testing_samples_class; j++){
			fout<<testing_num_keypoints_matrix.at<int>(i, j)<<" ";
		}
		fout<<"\n";
	}

	fout.close();









	/*cout<<"---1\n";

	// Construct KD Trees
	vector< KDTree > kdTrees(num_classes);
	int dimension = pcaSIFT_feature_matrix.size().width;
	int min_keypoint_class_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		cout<<"\t--->"<<curClass;
		int numKeyPointsCurClass = training_totalKeyPoints_class[curClass];
		Mat curClassDesriptors = pcaSIFT_feature_matrix(Rect(0,min_keypoint_class_index,dimension,min_keypoint_class_index+numKeyPointsCurClass));
		min_keypoint_class_index = min_keypoint_class_index + numKeyPointsCurClass;
		cout<<"|";
		KDTree kdCur(curClassDesriptors);
		kdTrees.push_back(kdCur);
		cout<<">\n";
	}


	cout<<"---2\n";


	Mat distMat;//(num_testing_samples_class*num_classes,num_classes);

	int min_keypoint_img_index = 0;

	int cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {

	//	int numKeyPointsCurClass = training_totalKeyPoints_class[curClass];
	//	Mat curClassDesriptors = pcaSIFT_feature_matrix(Rect(0,min_keypoint_class_index,dimension,min_keypoint_class_index+numKeyPointsCurClass));
	//	min_keypoint_class_index = min_keypoint_class_index + numKeyPointsCurClass;
		KDTree kd = kdTrees[curClass];

		for(int curImage = 0; curImage < num_testing_samples_class; cumImage_index++,curImage++) {

			int numKeyPointsCurTestImg = num_keypoints_matrix[curClass][curImage];

			Mat curTestImgDesriptorsMatrix = testing_pcaSIFT_feature_matrix(Rect(0, min_keypoint_img_index, dimension,min_keypoint_img_index+numKeyPointsCurTestImg));
			min_keypoint_img_index = min_keypoint_img_index + numKeyPointsCurTestImg;

			vector<int> NN_indices(curTestImgDesriptorsMatrix.size().height);
			kd.findNearest(curTestImgDesriptorsMatrix,1,32,NN_indices,noArray(),noArray());

			double dist_sum = 0;


			for(int i = 0; i < NN_indices.size(); i++){
				vector<float> ref(curTestImgDesriptorsMatrix.row(i));
				vector<float> NN_point(kd.getPoint(NN_indices[i]),kd.getPoint(NN_indices[i])+dimension);
				dist_sum = dist_sum + norm(ref,NN_point,NORM_L2);
			}
			distMat.at<float>(cumImage_index,curClass) = dist_sum;

		}

	}
	cout<<"---3\n";
	vector<int> predictedLabels(num_testing_samples_class*num_classes);
	for(int i = 0 ; i < num_testing_samples_class*num_classes; i++) {
		float min = distMat.at<float>(i,0);

		for(int j = 0; j < num_classes; j++){

			if(distMat.at<float>(i,j) < min){
				min = distMat.at<float>(i,j);
				predictedLabels[i] = j;
			}
		}
	}

	for(int i = 0 ; i < num_testing_samples_class*num_classes; i++) {
	cout<<predictedLabels[i]<<" ";
	}*/

	/*
	cout<<"---2\n";
	Mat distMat;
	int dimension = pcaSIFT_feature_matrix.size().width;
	int min_keypoint_class_index = 0;
	int min_keypoint_img_index = 0;
	int cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		cout<<"\t---"<<curClass<<"\n";
		int numKeyPointsCurClass = training_totalKeyPoints_class[curClass];
		Mat curClassDesriptors = pcaSIFT_feature_matrix(Rect(0,min_keypoint_class_index,dimension,min_keypoint_class_index+numKeyPointsCurClass));
		min_keypoint_class_index = min_keypoint_class_index + numKeyPointsCurClass;

		for(int curImage = 0; curImage < num_testing_samples_class; cumImage_index++,curImage++) {
			cout<<"\t\t---"<<curImage;
			int numKeyPointsCurTestImg = num_keypoints_matrix[curClass][curImage];

			Mat curTestImgDesriptorsMatrix = testing_pcaSIFT_feature_matrix(Rect(0, min_keypoint_img_index, dimension,min_keypoint_img_index+numKeyPointsCurTestImg));
			min_keypoint_img_index = min_keypoint_img_index + numKeyPointsCurTestImg;
			cout<<"D";
			FlannBasedMatcher flann_matcher;
			std::vector< DMatch > flann_matches;
			flann_matcher.match( curTestImgDesriptorsMatrix, curClassDesriptors, flann_matches );
			cout<<"F";
			double dist_sum = 0;

			for(int i = 0; i < numKeyPointsCurTestImg; i++){
				vector<float> ref(curTestImgDesriptorsMatrix.row(i));
				vector<float> NN_point(curClassDesriptors.row(flann_matches[i].trainIdx));
				dist_sum = dist_sum + norm(ref,NN_point,NORM_L2);
			}
			distMat.at<float>(cumImage_index,curClass) = dist_sum;
			cout<<">\n";
		}

	}

	cout<<"---3\n";
	vector<int> predictedLabels(num_testing_samples_class*num_classes);
	for(int i = 0 ; i < num_testing_samples_class*num_classes; i++) {
		float min = distMat.at<float>(i,0);

		for(int j = 0; j < num_classes; j++){

			if(distMat.at<float>(i,j) < min){
				min = distMat.at<float>(i,j);
				predictedLabels[i] = j;
			}
		}
	}

	 */




	/*
	// k means clustering
	// labels: vector storing the labels assigned to each vector
	//         (the pcaSIFT feature of a keypoint). Therefore labels
	//         is of size = total number of keypoints = size_sift_feature_matrix.height

	vector<int> labels;//(size_sift_feature_matrix.height);
	int attempts = 5;
	Mat centers;
	TermCriteria criteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20000, 0.0001);

	kmeans(pcaSIFT_feature_matrix, num_clusters, labels,criteria, attempts, KMEANS_RANDOM_CENTERS,centers );

	// Object Feature Vector
	// computing histograms of each image
	// the keypoint_matrix stores the number of keypoints of each image
	// each image has a different number of keypoints
	// using this matrix, we will compute the histogram for each image
	// Also, note that the pcaSIFT_matrix stores the pcaSift_features in
	// following order:
	// pcaSift_feature of keypoint 1 of image 1 of class 1
	// pcaSift_feature of keypoint 2 of image 1 of class 1
	// .
	// .
	// pcaSift_feature of keypoint 1 of image 2 of class 1
	// pcaSift_feature of keypoint 2 of image 2 of class 1
	// .
	// .
	// pcaSift_feature of keypoint 1 of image 1 of class 2
	// .
	// .
	// .
	// pcaSift_feature of last keypoint of last image of last class

	Mat histogram_images = Mat(num_training_samples_class*num_classes, num_clusters, CV_32F, float(0.0));

	vector<int> labels_train(num_training_samples_class*num_classes);
	int cImg = 0;

	int min_keypoint_index = 0;
	int cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		for(int curImage = 0; curImage < num_training_samples_class; curImage++) {

			int numKeypoints = num_keypoints_matrix[curClass][curImage];

			for(unsigned int i = 0; i < numKeypoints; i++) {

				int id = labels[min_keypoint_index+i];
				histogram_images.at<float>(cumImage_index,id) += 1.0;
			}

			min_keypoint_index = min_keypoint_index + numKeypoints;
			labels_train[cumImage_index] = curClass;
			cumImage_index++;
		}

	}

	ofstream fout;
		fout.open("histogram_images.txt");

		for(int i = 0; i < num_training_samples_class*num_classes;i++){
			for(int j = 0; j < num_clusters; j++){
				fout<<histogram_images.at<double>(i, j)<<" ";
			}
			fout<<"\n";
		}
		fout.clear();
		fout.close();
	// Normalize the histogram matrix
	Mat normalized_histogram_images;
	normalize(histogram_images, normalized_histogram_images);
	histogram_images = normalized_histogram_images;


	// ===============================================================
	// Read Test Images
	// ===============================================================

	Mat_<int> testing_num_keypoints_matrix(num_classes,num_testing_samples_class);
	Mat testing_sift_feature_matrix;
	int cum_image_index = 0;
	for(cur_class = 0; cur_class < num_classes; cur_class++) {

		string cur_class_raw_data_location = raw_data_location + "/" + class_name_array[cur_class] + "/";

		//read image of the testing data of the current_class one at a time

		for(int cur_image_num = 0; cur_image_num < num_testing_samples_class; cum_image_index++,cur_image_num++) {
			string cur_image_location = cur_class_raw_data_location + feature_test_image_names[cum_image_index];

			Mat cur_image = imread(cur_image_location,0);

			SiftFeatureDetector detector;
			vector<cv::KeyPoint> image_keypoints;
			detector.detect(cur_image, image_keypoints);

			testing_num_keypoints_matrix[cur_class][cur_image_num] = image_keypoints.size();

			// Calculate descriptors: For each of the key points
			// obtain the features describing the vicinity of the
			// the key points. This will be a 128 dimensional vector
			// at each key point

			SiftDescriptorExtractor extractor;
			Mat kepoint_descriptors;
			extractor.compute( cur_image, image_keypoints, kepoint_descriptors );
			testing_sift_feature_matrix.push_back(kepoint_descriptors);

		}
	}

	// Project the test image SIFT feature to the PCA reduced
	// dimension plane
	Size size_testing_sift_feature_matrix = testing_sift_feature_matrix.size();
	Mat_<float> testing_projected(size_testing_sift_feature_matrix.height,reducedDimension);
	pca.project(testing_sift_feature_matrix,testing_projected);

	Mat_<float> testing_pcaSIFT_feature_matrix;
	testing_projected.convertTo(testing_pcaSIFT_feature_matrix,CV_32F);


	Mat testing_histogram_images = Mat(num_testing_samples_class*num_classes, num_clusters, CV_32F, float(0.0));
	vector<int> labels_test(num_testing_samples_class*num_classes);
	cImg = 0;
	min_keypoint_index = 0;
	cumImage_index = 0;
	for(int curClass = 0; curClass < num_classes; curClass++) {
		for(int curImage = 0; curImage < num_testing_samples_class; curImage++) {

			int numKeypoints = testing_num_keypoints_matrix[curClass][curImage];

			Mat tempDescriptor=testing_pcaSIFT_feature_matrix(cv::Rect(0,min_keypoint_index,reducedDimension,numKeypoints));

			FlannBasedMatcher flann_matcher;
			std::vector< DMatch > flann_matches;
			flann_matcher.match( tempDescriptor, centers, flann_matches );

			for(unsigned int i = 0; i < flann_matches.size(); i++) {
				int id = flann_matches[i].trainIdx;
				testing_histogram_images.at<float>(cumImage_index,id) += 1.0;
			}

			min_keypoint_index = min_keypoint_index + numKeypoints;
			labels_test[cumImage_index] = curClass;
			cumImage_index++;
		}
	}

	// NORMALIZE HISTOGRAMS
	Mat normalized_testing_histogram_images;
	normalize(testing_histogram_images,normalized_testing_histogram_images);
	testing_histogram_images = normalized_testing_histogram_images;

	cout<<"\n\n===========BOW=======================\n\n";

	FlannBasedMatcher flann_matcher;
	vector< vector < DMatch > > flann_matches;

	Mat_<float> testHist = testing_histogram_images;
	Mat_<float> trainHist = histogram_images;

	flann_matcher.knnMatch( testHist, trainHist, flann_matches, k_nearest_neighbor );

	int predTestLabels[num_testing_samples_class*num_classes];
	for(int imgNo = 0; imgNo < num_testing_samples_class*num_classes; imgNo++) {
		vector < DMatch > temp = flann_matches[imgNo];

		float votes[num_clusters]={0.0};
		const int N = sizeof(votes) / sizeof(float);
		for(int neigh = 0; neigh < temp.size(); neigh++ ) {
			int id = temp[neigh].trainIdx;
			int ind = id;
			id = ind/num_training_samples_class;
			if(ind%num_training_samples_class == 0)
				id = id - 1;

			float dist = temp[neigh].distance;
			votes[id] = votes[id] + (1.0/dist);

		}
		predTestLabels[imgNo] = distance(votes, max_element(votes, votes + N));
	}


	// compute error
	vector<float> error(num_classes,0.0);
	float totalError=0.0;
	for(int i = 0; i < num_testing_samples_class*num_classes; i++) {

		if(predTestLabels[i] != labels_test[i])
		{
			error[labels_test[i]] = error[labels_test[i]] + 1.0;
			totalError = totalError + 1.0;
		}
	}


//	ofstream fout;
	// Write to file
	fout.open("feature_test.txt");

	for(int i = 0; i < num_testing_samples_class*num_classes;i++){
		for(int j = 0; j < num_clusters; j++){
			fout<<testing_histogram_images.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.clear();
	fout.close();


	fout.open("label_test.txt");
	fout.clear();
	for(int i = 0; i < num_testing_samples_class*num_classes;i++){
			fout<<labels_test[i]<<"\n";
	}
	fout.close();

	fout.open("feature_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes;i++){
		for(int j = 0; j < num_clusters; j++){
			fout<<histogram_images.at<float>(i, j)<<" ";
		}
		fout<<"\n";
	}
	fout.close();

	fout.open("label_train.txt");
	fout.clear();
	for(int i = 0; i < num_training_samples_class*num_classes; i++) {
		fout<<labels_train[i]<<"\n";
	}
	fout.close();

	fout.open("predictedLabels.txt");
	fout.clear();
	for(int i = 0; i < num_testing_samples_class*num_classes; i++) {
		fout<<predTestLabels[i]<<"\n";
	}
	fout.clear();
	fout<<"\nClass Wise Number of Miss-classifications ("<<num_testing_samples_class<<" test samples "
			"in each class)\n";
	for(int i = 0; i < num_classes; i++) {
		fout<<class_name_array[i]<<"\t: "<<error[i]<<"\n";
	}
	fout<<"Total Error(%)\n"<<totalError*100/(num_testing_samples_class*num_classes);
	fout.close();
	 */

}