Пример #1
0
bool findMatch(CvPoint &offset, FlannBasedMatcher matcher, SurfFeatureDetector detector, SurfDescriptorExtractor extractor, Mat des_object[])
{
	bool noMatch = true;
	Mat des_image, img_matches;
	vector<KeyPoint> kp_image;
	vector<vector<DMatch > > matches;
	vector<DMatch > good_matches;
	int iter = 0;
	Mat image = imread("/home/pi/opencv/photo.jpg" , CV_LOAD_IMAGE_GRAYSCALE );
	detector.detect( image, kp_image );
	extractor.compute( image, kp_image, des_image );
	while ( noMatch )
	{
		//printf("before kp and des detection 2\n");
	    	
		
		matcher.knnMatch(des_object[iter], des_image, matches, 2);
		for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
		{
		    if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
		    {
			good_matches.push_back(matches[i][0]);
		    }
		}
		
		//printf("Number of matches: %d\n", good_matches.size());
		if (good_matches.size() >= 10)
		{
			CvPoint center = cvPoint(0,0);
			for ( int z = 0 ; z < good_matches.size() ; z++ )
			{
				int index = good_matches.at(z).trainIdx;
				center.x += kp_image.at(index).pt.x;
				center.y += kp_image.at(index).pt.y;
			}
			center.x = center.x/good_matches.size();
			center.y = center.y/good_matches.size();
			int radius = 5;
			circle( image, center, radius, {0,0,255}, 3, 8, 0 );
			namedWindow("test");
			imshow("test", image);
			imwrite("centerPoint.jpg", image);
			waitKey(5000);
			int offsetX = center.x - image.cols/2;
			int offsetY = center.y - image.rows/2;
			offset = cvPoint(offsetX, offsetY);			
			noMatch = false;
		}
		//printf("draw good matches\n");
		//Show detected matches
		if ( iter++ == 3 || !noMatch )
			break;
		
		good_matches.clear();
	}
	return noMatch;
}
    void tryFindImage_features(Mat input)
    {
    	/* Сравниваем входящее изрображение с набором эталонов и выбираем наиболее подходящее */

    	resize(input, input, Size(SIGN_SIZE, SIGN_SIZE), 0, 0);

    	vector<KeyPoint> keyPoints;
    	_detector.detect(input, keyPoints);

    	Mat descriptors;
    	_extractor.compute(input, keyPoints, descriptors);

    	int max_value = 0, max_position = 0; 

    	for(int i=0; i < 5; i++)
    	{
    		vector< vector<DMatch> > matches;

    		_matcher.knnMatch(descriptors, _train_descriptors[i], matches, 50);

    		int good_matches_count = 0;
		   
		    for (size_t j = 0; j < matches.size(); ++j)
		    { 
		        if (matches[j].size() < 2)
		                    continue;
		       
		        const DMatch &m1 = matches[j][0];
		        const DMatch &m2 = matches[j][1];
		            
		        if(m1.distance <= 0.7 * m2.distance)        
		            good_matches_count++;    
		    }

		    if(good_matches_count > max_value)
		    {
		    	max_value = good_matches_count;
		    	max_position = i;
		    }
    	}

    	cout << STATUS_STR << "Detected sign: " << _train_sign_names[max_position] << endl;
    }
//static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                       //vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher )
static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                       vector<DMatch>& matches, FlannBasedMatcher& descriptorMatcher, const vector<Mat>& trainImages, const vector<string>& trainImagesNames )

{
    cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;

    descriptorMatcher.add( trainDescriptors );
    descriptorMatcher.train();

    descriptorMatcher.match( queryDescriptors, matches );

    CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );

    cout << "Number of matches: " << matches.size() << endl;
    cout << ">" << endl;

    for( int i = 0; i < trainDescriptors.size(); i++){

        std::vector< std::vector< DMatch> > matches2;

        std::vector< DMatch > good_matches;

        descriptorMatcher.knnMatch( queryDescriptors, trainDescriptors[i], matches2, 2);
        CV_Assert( queryDescriptors.rows == (int)matches2.size() || matches2.empty() );

        for (int j = 0; j < matches2.size(); ++j){
            const float ratio = 0.8; // As in Lowe's paper; can be tuned
            if (matches2[j][0].distance < ratio * matches2[j][1].distance){
                good_matches.push_back(matches2[j][0]);
            }

        }

        cout << "currentMatchSize : " << good_matches.size() << endl;

    }

    
}
//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main( ) 
{
	//【0】改变console字体颜色
	system("color 6F"); 

	void ShowHelpText();

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图",trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测Surf关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescriptor;
	SurfFeatureDetector featureDetector(80);
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SurfDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescriptor);

	//【3】创建基于FLANN的描述符匹配对象
	FlannBasedMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescriptor);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while(char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		int64 time0 = getTickCount();
		Mat testImage, testImage_gray;
		cap >> testImage;//采集视频到testImage中
		if(testImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(testImage, testImage_gray, CV_BGR2GRAY);

		//<3>检测S关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(testImage_gray, test_keyPoint);
		featureExtractor.compute(testImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for(unsigned int i = 0; i < matches.size(); i++)
		{
			if(matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(testImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}
Пример #5
0
/** @function main */
int matchKeypoints( int argc, char** argv )
{
//  if( argc != 3 )
//  { readme(); return -1; }
  cv::initModule_nonfree();

  Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
  Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
  Codebook codebook;
  //codebook.readInCSV(string(argv[3]));

  if( !img_1.data || !img_2.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 15000;

  //SurfFeatureDetector detector( minHessian);
  SURF* detector = new SURF(minHessian,1,4,true,true);

  std::vector<KeyPoint> keypoints_1, keypoints_2;

  assert(img_1.size[0]>0 && img_1.size[1]>0 && img_2.size[0]>0 && img_2.size[1]>0);
  
  (*detector)( img_1, Mat(), keypoints_1 );
  (*detector)( img_2, Mat(), keypoints_2 );
  
  Mat img_keypoints_1; Mat img_keypoints_2;
//  drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//  drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
  cvtColor(img_1,img_keypoints_1,CV_GRAY2RGB);
  for (KeyPoint k :keypoints_1)
  {
//      circle(img_keypoints_1,k.pt,k.size,Scalar(rand()%256,rand()%256,rand()%256));
//      cout<<k.size<<endl;
      Rect rec(k.pt.x-(k.size/2),k.pt.y-(k.size/2),k.size,k.size);
      rectangle(img_keypoints_1,rec,Scalar(rand()%256,rand()%256,rand()%256));
  }
  
  cvtColor(img_2,img_keypoints_2,CV_GRAY2RGB);
  for (KeyPoint k :keypoints_2)
  {
//      circle(img_keypoints_2,k.pt,k.size,Scalar(rand()%256,rand()%256,rand()%256));
      Rect rec(k.pt.x-(k.size/2),k.pt.y-(k.size/2),k.size,k.size);
      rectangle(img_keypoints_2,rec,Scalar(rand()%256,rand()%256,rand()%256));
  }
  
  
    //-- Show detected (drawn) keypoints
    imshow("Keypoints 1", img_keypoints_1 );
    imshow("Keypoints 2", img_keypoints_2 );
    waitKey(0);

  //-- Step 2: Calculate descriptors (feature vectors)
    //SurfDescriptorExtractor extractor;
  
    Mat descriptors_1, descriptors_2;
  
    detector->compute( img_1, keypoints_1, descriptors_1 );
    detector->compute( img_2, keypoints_2, descriptors_2 );
  
    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< std::vector< DMatch > > matches;
    matcher.knnMatch( descriptors_1, descriptors_2, matches, 10 );
  
    double max_dist = 0; double min_dist = 100;
  
    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < matches.size(); i++ )
    {
        for (int j=0; j < matches[i].size(); j++)
        {
            double dist = matches[i][j].distance;
            if( dist < min_dist ) min_dist = dist;
            if( dist > max_dist ) max_dist = dist;
        }
    }
  
    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );
  
    //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
    //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
    //-- small)
    //-- PS.- radiusMatch can also be used here.
    std::vector< DMatch > good_matches;
  
    for( int i = 0; i < matches.size(); i++ )
    {
        for (int j=0; j < matches[i].size(); j++)
            //if( matches[i][j].distance <= max(2*min_dist, 0.02) )
            if( matches[i][j].distance <= max((max_dist-min_dist)/4.0 + min_dist, 0.02) )
            { good_matches.push_back( matches[i][j]); }
            else
                printf("discard(%d,%d)\n",i,j);
    }
  
    //-- Draw only "good" matches
    Mat img_matches;
    drawMatches( img_1, keypoints_1, img_2, keypoints_2,
                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
  
    //-- Show detected matches
    imshow( ".... Matches", img_matches );
  
    for( int i = 0; i < (int)good_matches.size(); i++ )
    { printf( "-- .... Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
  
    waitKey(0);
    
//    vector<Point2f> corners;
//      double qualityLevel = 0.01;
//      double minDistance = 10;
//      int blockSize = 3;
//      bool useHarrisDetector = false;
//      double k = 0.04;
//      int maxCorners = 23;
//      int maxTrackbar = 100;
//    goodFeaturesToTrack( src_gray,
//                   corners,
//                   maxCorners,
//                   qualityLevel,
//                   minDistance,
//                   Mat(),
//                   blockSize,
//                   useHarrisDetector,
//                   k );
    
  return 0;
  }
Пример #6
0
int main(int argc, char** argv)
{

    sourceORIG = imread( argv[1] );
    sourceORIG2 = imread( argv[2] );
    cvtColor( sourceORIG, sourceGRAY, CV_BGR2GRAY );
    cvtColor( sourceORIG2, sourceGRAY2, CV_BGR2GRAY );

    GaussianBlur( sourceGRAY, sourceGRAY_BLUR, Size(3,3), 3.0 );
    GaussianBlur( sourceGRAY2, sourceGRAY_BLUR2, Size(7,7), 3.0 );

    Canny( sourceGRAY_BLUR, cannyOut, lowThreshold, lowThreshold*ratio, kernel_size );
    cv::dilate(cannyOut, cannyOut, cv::Mat(), cv::Point(-1,-1));

    findContours( cannyOut, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );

    drawing = Mat::zeros( cannyOut.size(), CV_8UC3 );

    vector<Point> approxShape;
    for(size_t i = 0; i < contours.size(); i++) {
        approxPolyDP(contours[i], approxShape, 5, true);
        drawContours(drawing, contours, i, Scalar(255, 255, 255), CV_FILLED);
    }
    bitwise_not ( drawing, drawing );

    vector<vector<Point> > contours_poly( contours.size() );
    for( int i = 0; i < contours.size(); i++ )
    {
        double area = contourArea(contours[i]);
        if(area > maxArea) {
            maxArea = area;
            approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
            boundRect = boundingRect( Mat(contours_poly[i]) );
        }
    }

    drawing = drawing(boundRect);
    mytemplate = drawing;

    // http://stackoverflow.com/questions/24539273/how-to-find-out-how-good-a-match-is-in-opencv

    // GaussianBlur( mytemplate, mytemplate, Size(7,7), 3.0 );

    detector.detect( mytemplate, keypointsTMPL );
    detector.detect( sourceGRAY_BLUR2, keypointsSCENE );

    extractor.compute( mytemplate, keypointsTMPL, descriptorsTMPL );
    extractor.compute( sourceGRAY_BLUR2, keypointsSCENE, descriptorsSCENE );

    obj_corners[0] = (cvPoint(0,0));
    obj_corners[1] = (cvPoint(mytemplate.cols,0));
    obj_corners[2] = (cvPoint(mytemplate.cols,mytemplate.rows));
    obj_corners[3] = (cvPoint(0, mytemplate.rows));

    if(descriptorsSCENE.type()!=CV_32F) {
        descriptorsSCENE.convertTo(descriptorsSCENE, CV_32F);
    }
    if(descriptorsTMPL.type()!=CV_32F) {
        descriptorsTMPL.convertTo(descriptorsTMPL, CV_32F);
    }

    // if ( descriptorsTMPL.empty() )
    //    cvError(0,"MatchFinder","1st descriptor empty",__FILE__,__LINE__);
    // if ( descriptorsSCENE.empty() )
    //    cvError(0,"MatchFinder","2nd descriptor empty",__FILE__,__LINE__);

    matcher.knnMatch( descriptorsTMPL, descriptorsSCENE, matches, 2);

    for(int i = 0; i < cv::min(sourceGRAY_BLUR2.rows-1,(int) matches.size()); i++)  {
        if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))  {
            good_matches.push_back(matches[i][0]);
            cout << "GOOD MATCHES" << endl;
        }
        cout << "MATCHES: " << matches[i].size() << endl;
    }
    // for( int i = 0; i < descriptorsTMPL.rows; i++ )
    // {
    //   if( matches[i][0].distance < 100 )
    //   {
    //       good_matches.push_back( matches[i][0]);
    //   }
    // }

    drawMatches( sourceGRAY_BLUR2, keypointsSCENE, mytemplate, keypointsTMPL, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
                 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    if (good_matches.size() >= 4)  {

        for( int i = 0; i < good_matches.size(); i++ ) {
            //Get the keypoints from the good matches
            obj.push_back( keypointsTMPL[ good_matches[i].queryIdx ].pt );
            scene.push_back( keypointsSCENE[ good_matches[i].trainIdx ].pt );
        }

        H = findHomography( obj, scene, CV_RANSAC );

        perspectiveTransform( obj_corners, scene_corners, H);

        //Draw lines between the corners (the mapped object in the scene image )
        line( img_matches, scene_corners[0], scene_corners[1], cvScalar(0, 255, 0), 4 );

        line( img_matches, scene_corners[1], scene_corners[2], cvScalar( 0, 255, 0), 4 );

        line( img_matches, scene_corners[2], scene_corners[3], cvScalar( 0, 255, 0), 4 );

        line( img_matches, scene_corners[3], scene_corners[0], cvScalar( 0, 255, 0), 4 );
    }

    imshow( "Good Matches & Object detection", img_matches );

    // UIImage *resultimage = [UIImage imageWithCVMat:img_matches];
    // UIImageView *imageview = [[UIImageView alloc] initWithImage:resultimage];
    // imageview.frame = CGRectMake(0, 0, 320, 240);

    // UIImage *resultimage2 = [UIImage imageWithCVMat:mytemplate];
    // UIImageView *imageview2 = [[UIImageView alloc] initWithImage:resultimage2];
    // imageview2.frame = CGRectMake(0, 240, 320, 240);

    // GaussianBlur( sourceGRAY2, sourceGRAY_BLUR2, Size(7,7), 3.0 );
    // imshow( "image", sourceGRAY_BLUR2 );

    // track();

    // imshow("image", sourceGRAY2);

    // imshow(windowName,drawing);
    waitKey(0);

    return 0;
}
Пример #7
0
/** @function main */
int main(int argc, char** argv)
{

	string filepath = "..\\Images\\IMG_20150313_152723.jpg";
	bool vis = TRUE;
	double margin = 0.1;


	if (argc > 1){
		filepath = argv[1];
		string visualise = argv[2];
		if (visualise == "1"){
			vis = TRUE;
		}
		string marg = argv[3];
		margin = ::atof(marg.c_str());
	}
	//string filepath = "..\\Images\\IMG_20150313_152723.jpg";
	////////////////
	Mat sceneMat = imread("..\\Images\\cover.jpg", IMREAD_GRAYSCALE);
	sceneMat = imread(filepath, IMREAD_GRAYSCALE);

	//objectMat = imread("..\\Images\\Photos\\IMG_20150311_154958.jpg", IMREAD_GRAYSCALE);
	std::vector<KeyPoint> keypointsO; //keypoints for object
	std::vector<KeyPoint> keypointsS; //keypoints for scene
	//Descriptor matrices
	Mat descriptors_object, descriptors_scene;
	SurfFeatureDetector surf(1500);
	surf.detect(sceneMat, keypointsS);

	SurfDescriptorExtractor extractor;
	extractor.compute(sceneMat, keypointsS, descriptors_scene);


	printf("Keypoints in foto: %i \n", keypointsS.size());

	if (vis){
		cv::Mat output;
		cv::drawKeypoints(sceneMat, keypointsS, output);
		imshow("Keypoints on photo", output);
		waitKey(0);
	}

	int most_amount_of_matches_overall = 0;

	vector<std::string> folder_names = get_all_folder_names_within_folder("..\\Images\\Photos\\");
	//vector<std::string> folder_names = get_all_folder_names_within_folder("C:\\temp\\");

	vector< vector<int> > good_matches_per_folder(folder_names.size() - 2);


	for (int i = 2; i < folder_names.size(); i++) {

		std::string fon = folder_names[i];
		printf("-- Folder : %s \n", fon.c_str());

		char folder_path[80];
		strcpy_s(folder_path, "..\\Images\\Photos\\");
		strcat_s(folder_path, fon.c_str());
		strcat_s(folder_path, "\\");
		puts(folder_path);

		vector<std::string> files_names = get_all_files_names_within_folder(folder_path);





		for (int j = 0; j < files_names.size(); j++) {
			std::string fn = files_names[j];
			printf("-- Max dist : %s \n", fn.c_str());


			char object_path[80];
			strcpy_s(object_path, "..\\Images\\Photos\\");
			strcat_s(object_path, fon.c_str());
			strcat_s(object_path, "\\");
			strcat_s(object_path, fn.c_str());
			puts(object_path);


			//	Mat img_object = imread(object_path, IMREAD_GRAYSCALE);
			Mat objectMat = imread(object_path, IMREAD_GRAYSCALE);


			if (!objectMat.data || !sceneMat.data)
			{
				std::cout << " --(!) Error reading images " << std::endl; return -1;
			}



			//detector.detect(img_object, keypoints_object);
			surf.detect(objectMat, keypointsO);
			extractor.compute(objectMat, keypointsO, descriptors_object);



			//Declering flann based matcher
			FlannBasedMatcher matcher;
			//BFMatcher for SURF algorithm can be either set to NORM_L1 or NORM_L2.
			//But if you are using binary feature extractors like ORB, instead of NORM_L* you use "hamming"
			//	BFMatcher matcher(NORM_L1);

			vector< vector< DMatch >> matches;
			matcher.knnMatch(descriptors_object, descriptors_scene, matches, 2); // find the 2 nearest neighbors

			vector< DMatch > good_matches;
			good_matches.reserve(matches.size());
			float nndrRatio = 0.7f;

			for (size_t i = 0; i < matches.size(); ++i)
			{
				if (matches[i].size() < 2)
					continue;

				const DMatch &m1 = matches[i][0];
				const DMatch &m2 = matches[i][1];

				if (m1.distance <= nndrRatio * m2.distance)
					good_matches.push_back(m1);
			}

			printf("-- Amount of good matches : %d \n", good_matches.size());
			good_matches_per_folder[i - 2].push_back(good_matches.size());

			if (good_matches.size() > most_amount_of_matches_overall){
				most_amount_of_matches_overall = good_matches.size();
			}

			if (vis){
				Mat img_matches;
				drawMatches(objectMat, keypointsO, sceneMat, keypointsS,
					good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
					vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
				imshow("Good Matches & Object detection", img_matches);
				waitKey(0);
			}


		}

	}


	vector<int> amount_of_good_matches_per_folder(folder_names.size() - 2);

	for (int i = 0; i < good_matches_per_folder.size(); ++i)
	{
		vector<int> matches = good_matches_per_folder[i];
		int good_matches = 0;
		for (int j = 0; j < matches.size(); ++j)
		{
			if (matches[j]>0.8*most_amount_of_matches_overall){
				good_matches = good_matches + 1;
			}
		}
		amount_of_good_matches_per_folder[i] = good_matches;
	}

	if (most_amount_of_matches_overall > keypointsS.size()*margin){

		auto biggest = std::max_element(std::begin(amount_of_good_matches_per_folder), std::end(amount_of_good_matches_per_folder));
		std::cout << "Max element is " << *biggest
			<< " at position " << std::distance(std::begin(amount_of_good_matches_per_folder), biggest) << std::endl;

		int best = std::distance(std::begin(amount_of_good_matches_per_folder), biggest);

		std::cout << "Best match is " << folder_names[best + 2];
	}

	else{
		std::cout << "No match found";
	}



	return 0;
}
Пример #8
0
int main()
{
//for serial port
    int fd = open_port();
    fd_set readfs;
    if (fd == -1)
    {
        std::cout << "Could not open /dev/ttyUSB1" << std::endl;
        return -1;
    }

    struct timeval Timeout;
    Timeout.tv_usec = 0;  /* milliseconds */
    Timeout.tv_sec  = 0;  /* seconds */

//for stereo
	int w=640, h=480;

	Mat M1, M2, D1, D2, R1, R2, P1, P2, mx1, mx2, my1, my2, Q;
	FileStorage fs;

	fs.open("intrinsics.xml", FileStorage::READ);
	fs["M1"] >> M1;
	fs["D1"] >> D1;
	fs["M2"] >> M2;
	fs["D2"] >> D2;
	fs.release();

	fs.open("extrinsics.xml", FileStorage::READ);
	fs["R1"] >> R1;
	fs["P1"] >> P1;
	fs["R2"] >> R2;
	fs["P2"] >> P2;
	fs["Q"]  >> Q;
	fs.release();

    Q.convertTo(Q, CV_32F);

	initUndistortRectifyMap(M1, D1, R1, P1, Size(w, h), CV_32FC1, mx1, my1);
    initUndistortRectifyMap(M2, D2, R2, P2, Size(w, h), CV_32FC1, mx2, my2);

    int ndisp = 192; //288 96 192 48
	StereoBM bm(StereoBM::BASIC_PRESET);
	bm.state->preFilterCap = 31;
    bm.state->SADWindowSize = 41;
    bm.state->minDisparity = 48;
    bm.state->numberOfDisparities = ndisp;
    bm.state->textureThreshold = 10;
    bm.state->uniquenessRatio = 15;
    bm.state->speckleWindowSize = 100;
    bm.state->speckleRange = 32;
    bm.state->disp12MaxDiff = 1;


//for SURF
	Mat object = imread("photo20.jpg", CV_LOAD_IMAGE_GRAYSCALE);

    if( !object.data )
    {
        std::cout<< "Error reading object " << std::endl;
        return -1;
    }

    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0, 0);
    obj_corners[1] = cvPoint(object.cols, 0);
    obj_corners[2] = cvPoint(object.cols, object.rows);
    obj_corners[3] = cvPoint(0, object.rows);

    SurfFeatureDetector detector(400);
    SurfDescriptorExtractor extractor;
    FlannBasedMatcher matcher;

    std::vector<KeyPoint> kp_object;
    Mat des_object;
    detector.detect(object, kp_object);
    extractor.compute(object, kp_object, des_object);

//miscellaneous
    char key = 'a'; int framecount = 0, count = 0;
	Camera cl("/dev/video2", w, h, 20);
    Camera cr("/dev/video1", w, h, 20);

	namedWindow("disparity");
	namedWindow("object detection");

	int window = 25, offset = 125; //35 pixels on either side, centre 25 pixels to the right of 240
	bool is_open = false, isforward = false, nointerrupt = false;

	while (key != 27)
	{
	    //flags
	    bool ask = false, left = false, right = false, middle = false, inrange = false, detect = false, stop = false;

	    //serial receiving
	    FD_SET(fd, &readfs);
        select(fd + 1, &readfs, NULL, NULL, &Timeout);
        void *buf; int rx = 0;
        if (FD_ISSET(fd, &readfs))
        {
            rx = read(fd, buf, 1);
            tcflush(fd, TCIFLUSH);
            ask = true; //isforward = false;
        }

	    char extent = 0; float depth;

	    IplImage *l=cvCreateImage(Size(w, h), 8, 3);
	    IplImage *r=cvCreateImage(Size(w, h), 8, 3);
	    cl.Update(&cr);

	    if (framecount < 5)
	    {
	        framecount++;
	        continue;
	    }

	    cl.toIplImage(l);
	    cr.toIplImage(r);
        Mat lg, rg, lge, rge;
        cvtColor(Mat(l), lg, CV_RGB2GRAY);
        cvtColor(Mat(r), rg, CV_RGB2GRAY);

        equalizeHist(lg, lge);
        equalizeHist(rg, rge);

        line(lg, Point(w/2 - window + offset, 0), Point(w/2 - window + offset, h), Scalar(0, 255, 0), 2);
        line(lg, Point(w/2 + window + offset, 0), Point(w/2 + window + offset, h), Scalar(0, 255, 0), 2);

        line(rg, Point(w/2 - window - offset, 0), Point(w/2 - window - offset, h), Scalar(0, 255, 0), 2);
        line(rg, Point(w/2 + window - offset, 0), Point(w/2 + window - offset, h), Scalar(0, 255, 0), 2);

        Mat des_image, img_matches, H;
        std::vector<KeyPoint> kp_image;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);

        detector.detect(lg, kp_image);
        extractor.compute(lg, kp_image, des_image);
        matcher.knnMatch(des_object, des_image, matches, 2);

        for(int i = 0; i < matches.size(); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if(matches[i].size()==2 && (matches[i][0].distance < 0.8*(matches[i][1].distance)) )
            {
                good_matches.push_back(matches[i][0]);
                obj.push_back(kp_object[matches[i][0].queryIdx].pt);
                scene.push_back(kp_image[matches[i][0].trainIdx].pt);
            }
        }

        if (good_matches.size() >= 35)
        {
            H = findHomography(obj, scene, CV_RANSAC);
            perspectiveTransform(obj_corners, scene_corners, H);

            RotatedRect box = minAreaRect(scene_corners);
            Point2f rect_corners[4];
            box.points(rect_corners);
            Rect roi = box.boundingRect();

            if (roi.area() > 3000)
            {
                detect = true; count = 0;
                for (int i = 0; i < 4; i++)
                {
                    line(lg, rect_corners[i], rect_corners[(i+1)%4], Scalar(255, 255, 255), 4);
                }
                line(lg, box.center, box.center, Scalar(255, 255, 255), 8);

                if (box.center.x < w/2 - window + offset)
                {
                    left = true;
                    //write(fd, "a", 1);
                    extent = (w/2 + offset - box.center.x) / 5; //std::cout << int(extent) << std::endl;
                    if (extent < 6) extent = 6;
                    if (isforward == true) stop = true;
                }

                else if (box.center.x > w/2 + window + offset)
                {
                    right = true;
                    //write(fd, "d", 1);
                    extent = (box.center.x - (w/2 + offset)) / 5; //std::cout << int(extent) << std::endl;
                    if (extent < 6) extent = 6;
                    if (isforward == true) stop = true;
                }

                else if (box.center.x > w/2 - window + offset && box.center.x < w/2 + window + offset)
                {
                    middle = true;
                    Mat lr, rr;
                    //imshow("l", lg); imshow("r", rg);

                    remap(lge, lr, mx1, my1, INTER_LINEAR);
                    remap(rge, rr, mx2, my2, INTER_LINEAR);

                    Mat disp, vdisp, disp32;
                    bm(lr, rr, disp);
                    disp.convertTo(vdisp, CV_8U, 255/(ndisp*16.));
                    disp.convertTo(disp32, CV_32F, 1.f/16.f);

                    for (int i = 0; i < 4; i++)
                    {
                        line(vdisp, rect_corners[i], rect_corners[(i+1)%4], Scalar(255, 255, 255), 4);
                    }
                    line(vdisp, box.center, box.center, Scalar(255, 255, 255), 8);

                    imshow("disparity", vdisp);

                    Mat xyz;
                    reprojectImageTo3D(disp32, xyz, Q, true);
                    int ch[] = {2, 0};
                    Mat z(xyz.size(), CV_32FC1);
                    mixChannels(&xyz, 1, &z, 1, ch, 1);

                    float dist = 0; unsigned long int npts = 0;
                    for (int i = 0; i < z.rows; i++)
                    {
                        for (int j = 0; j < z.cols; j++)
                        {
                            if (roi.contains(Point(j, i)) && z.at<float>(i, j) > -500.0 && z.at<float>(i, j) < 500.0)
                            {
                                dist += z.at<float>(i, j);
                                npts++;
                            }
                        }
                    }

                    //std::cout << -dist/npts << std::endl;
                    depth = 1.1561 * ((-dist/npts) - 0.124);
                    std::cout << depth << std::endl;
                    extent = depth > 0 && depth < 45 ? (char(depth) - 6) / 3 : 2;

                    if (depth > 0 && depth < 35) inrange = true;
                    if (depth > 0 && depth < 30) nointerrupt = true;
                }
            }
        }

//decisions and commanding
        if (stop && !nointerrupt) {write(fd, "x", 1); write(fd, &extent, 1); isforward = false; std::cout << "stop" << std::endl;}

        if (ask)
        {
            std::cout << "got" << std::endl;
            if (left)   {write(fd, "a", 1); write(fd, &extent, 1); tcflush(fd, TCIFLUSH);}
            if (right)  {write(fd, "d", 1); write(fd, &extent, 1); tcflush(fd, TCIFLUSH);}
            //if ((middle && !inrange) || (middle && is_open)) {write(fd, "w", 1); write(fd, &extent, 1); tcflush(fd, TCIFLUSH); isforward = true;}
            //if (inrange && !is_open){write(fd, "o", 1); write(fd, &extent, 1); is_open = true; std::cout << "open" << std::endl;}
            if (middle)
            {
                if (!is_open) {write(fd, "o", 1); write(fd, &extent, 1); is_open = true; std::cout << "open" << std::endl;}
                else {write(fd, "w", 1); write(fd, &extent, 1); tcflush(fd, TCIFLUSH); isforward = true;}
            }
            if (!detect)
            {
                count ++;
                if (count > 14)
                {
                    count = 0;
                    if (is_open) {write(fd, "c", 1); write(fd, &extent, 1); is_open = false; nointerrupt = false; std::cout << "close" << std::endl;}
                }
            }
        }

        imshow("object detection", lg);

        key = cv::waitKey(1);
        cvReleaseImage(&l);
        cvReleaseImage(&r);
	}
	return 0;
}
Пример #9
0
JNIEXPORT bool JNICALL Java_com_example_mipatternrecognition_MainActivity_FindObject(
		JNIEnv*, jobject, jlong addrGray, jlong addrRgba, jint TypeDetection) {
	Mat& mGr = *(Mat*) addrGray;
	Mat& mRgb = *(Mat*) addrRgba;
	Mat& objeto = *(Mat*) objeto_long;
	int minHessian = 500;
	SurfFeatureDetector detector_Surf(minHessian);
	SiftFeatureDetector detector_Sift(minHessian);

	FastFeatureDetector detector_Fast(50);

	OrbFeatureDetector detector_Orb(500, 1.2f, 8, 14, 0, 2, 0, 14);

	MserFeatureDetector detector_Mser(5, 60, 14400, 0.25, 0.2, 200, 1.01, 0.003,
			5);

	int maxCorners = 1000;
	double qualityLevel = 0.01;
	double minDistance = 1.;
	int blockSize = 3;
	bool useHarrisDetector;
	double k2 = 0.04;
	useHarrisDetector = false;
	GoodFeaturesToTrackDetector detector_Gftt(maxCorners, qualityLevel,
			minDistance, blockSize, useHarrisDetector, k2);
	useHarrisDetector = true;
	GoodFeaturesToTrackDetector detector_Harris(maxCorners, qualityLevel,
			minDistance, blockSize, useHarrisDetector, k2);

	int maxSize = 45;
	int responseThreshold = 30;
	int lineThresholdProjected = 10;
	int lineThresholdBinarized = 8;
	int suppressNonmaxSize = 5;
	StarFeatureDetector detector_Star(maxSize, responseThreshold,
			lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize);

	//http://stackoverflow.com/questions/14808429/classification-of-detectors-extractors-and-matchers

	SurfDescriptorExtractor extractor_Surf;
	SiftDescriptorExtractor extractor_Sift;
	OrbDescriptorExtractor extractor_Orb;
	FREAK extractor_Freak;

	switch (TypeDetection) {
	case SURF_DETECTION:
		detector_Surf.detect(mGr, keyPoints_2);
		extractor_Surf.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case SIFT_DETECTION:
		detector_Sift.detect(mGr, keyPoints_2);
		extractor_Sift.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case FAST_DETECTION:
		detector_Fast.detect(mGr, keyPoints_2);
		extractor_Freak.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case ORB_DETECTION:
		detector_Orb.detect(mGr, keyPoints_2);
		extractor_Orb.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case MSER_DETECTION:
		detector_Mser.detect(mGr, keyPoints_2);
		extractor_Surf.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case GFTT_DETECTION:
		detector_Gftt.detect(mGr, keyPoints_2);
		extractor_Sift.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case HARRIS_DETECTION:
		detector_Harris.detect(mGr, keyPoints_2);
		extractor_Orb.compute(mGr, keyPoints_2, descriptors_2);
		break;
	case STAR_DETECTION:
		detector_Star.detect(mGr, keyPoints_2);
		extractor_Orb.compute(mGr, keyPoints_2, descriptors_2);
		break;
	}

	if (descriptors_2.rows == 0 || descriptors_1.rows == 0
			|| keyPoints_2.size() == 0 || keyPoints_1.size() == 0) {
		return false;
	}

	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matches;
	matcher.knnMatch(descriptors_1, descriptors_2, matches, 2);

	//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
	//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
	//-- small)
	//-- PS.- radiusMatch can also be used here.
	vector<DMatch> good_matches;

	for (int i = 0; i < min(descriptors_1.rows - 1, (int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
			{
		if ((matches[i][0].distance < 0.6 * (matches[i][1].distance))
				&& ((int) matches[i].size() <= 2 && (int) matches[i].size() > 0)) {
			good_matches.push_back(matches[i][0]);
		}
	}

	char cVal[50];
	sprintf(cVal, "%i", good_matches.size());
	putText(mRgb, cVal, Point2f(110, 100), FONT_HERSHEY_PLAIN, 2,
			Scalar(0, 255, 0, 255), 2);

	Mat H;
	float reprojectionThreshold = 3;
	try {
		bool encontrado = refineMatchesWithHomography(keyPoints_1, keyPoints_2,
				reprojectionThreshold, good_matches, H);

		if (encontrado) {

			vector<Point2f> obj_corners(4);
			obj_corners[0] = cvPoint(0, 0);
			obj_corners[1] = cvPoint(objeto.cols, 0);
			obj_corners[2] = cvPoint(objeto.cols, objeto.rows);
			obj_corners[3] = cvPoint(0, objeto.rows);
			vector<Point2f> scene_corners(4);

			perspectiveTransform(obj_corners, scene_corners, H);

			line(mRgb, scene_corners[0], scene_corners[1], Scalar(0, 255, 0),
					4);
			line(mRgb, scene_corners[1], scene_corners[2], Scalar(255, 0, 0),
					4);
			line(mRgb, scene_corners[2], scene_corners[3], Scalar(0, 0, 255),
					4);
			line(mRgb, scene_corners[3], scene_corners[0],
					Scalar(255, 255, 255), 4);

			for (unsigned int i = 0; i < scene.size(); i++) {
				const Point2f& kp = scene[i];
				circle(mRgb, Point(kp.x, kp.y), 10, Scalar(255, 0, 0, 255));
			}
		} else {
			char cVal[50];
			sprintf(cVal, "%i", good_matches.size());
			putText(mRgb, cVal, Point2f(100, 100), FONT_HERSHEY_PLAIN, 2,
					Scalar(0, 0, 255, 255), 2);
		}

	} catch (Exception e) {

	}
//else
	return false;
}
Пример #10
0
int main()
{
    Mat object = imread( "photo.jpg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !object.data )
    {
        std::cout<< "Error reading object " << std::endl;
        return -1;
    }

    //Detect the keypoints using SURF Detector
    int minHessian = 500;
    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kp_object;
    detector.detect( object, kp_object );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( object, kp_object, des_object );

    FlannBasedMatcher matcher;

    VideoCapture cap(0);

    namedWindow("Good Matches");

    std::vector<Point2f> obj_corners(4);

    //Get the corners from the object
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );

    char key = 'a';
    int framecount = 0;
    while (key != 27)
    {
        Mat frame;
        cap >> frame;

        if (framecount < 5)
        {
            framecount++;
            continue;
        }

        Mat des_image, img_matches;
        std::vector<KeyPoint> kp_image;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
        }

        //Show detected matches
        imshow( "Good Matches", img_matches );

        key = waitKey(1);
    }
    return 0;
}
Пример #11
0
//Takes Mat object and finds its keypoints, then compares against the keypoints in segmentedCapture
//If there are 4 or more matching keypoints, then it reports a match
bool match(Mat object, IplImage* segmentedCapture, int i)
{
	printf("Size check of segmented capture: height: %d, width: %d\n", segmentedCapture->height, segmentedCapture->width);
	printf("attempting to read object now\n");

	bool matchFound = false;
	if( !object.data )
	{
		std::cout<< "Error reading object " << std::endl;
		return -1;
	}
	int minHessian = 500;

	SurfFeatureDetector detector(minHessian);
	//Detect the keypoints using SURF Detector

	std::vector<KeyPoint> kp_object;
	detector.detect( object, kp_object );

	//Calculate descriptors (feature vectors)
	Mat des_object;
	SurfDescriptorExtractor extractor;

	extractor.compute( object, kp_object, des_object );
	printf("Number of descriptors found for initial object: %d\n", (int)kp_object.size());

	FlannBasedMatcher matcher;

	char *windowName = new char[20];
	sprintf(windowName, "Match %d", i);
	destroyWindow(windowName);
	namedWindow(windowName);

	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0,0);
	obj_corners[1] = cvPoint( object.cols, 0 );
	obj_corners[2] = cvPoint( object.cols, object.rows );
	obj_corners[3] = cvPoint( 0, object.rows );

	Mat des_image, img_matches;
	std::vector<KeyPoint> kp_image;
	std::vector<vector<DMatch > > matches;
	std::vector<DMatch > good_matches;
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;
	std::vector<Point2f> scene_corners(4);
	Mat H;
	Mat image;


	cvResetImageROI(segmentedCapture);
	printf("creating image to store it in");
	//	IplImage *image2 = cvCreateImage(cvSize(segmentedCapture->width, segmentedCapture->height), IPL_DEPTH_8U,1);
	printf("about to convert to gray\n");
	//	cvCvtColor(segmentedCapture, image2, CV_BGR2GRAY);
	//
	//	printf("converted to gray\n");
	Mat matCon(segmentedCapture);
	image = segmentedCapture;
	//	printf("before detection\n");
	detector.detect( image, kp_image );
	//	printf("after detection, number of descriptors for detected object: %d\n", kp_image.size());
	extractor.compute( image, kp_image, des_image );
	//	printf("after computation  of extraction\n");

	if(des_image.empty()){
		printf("key points from capture frame are empty\n");
	} else {

		matcher.knnMatch(des_object, des_image, matches, 2);
		//		matcher.match(des_object, des_image, matches);
		printf("after knnmatch: matches.size() is %d\n", matches.size());
		for(int j = 0; j < min(des_image.rows-1,(int) matches.size()); j++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
		{
			if((matches[j][0].distance < 0.5*(matches[j][1].distance)) && ((int) matches[j].size()<=2 && (int) matches[j].size()>0))
			{
				good_matches.push_back(matches[j][0]);
				//			printf("Outer loop is on: %d, Number of matches is: %d\n", i, (int)good_matches.size());
			}
		}

		//Draw only "good" matches
		drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

		if (good_matches.size() >= 4)
		{
			matchFound = true;
			printf("Found %d matched points for detectedObject %d", good_matches.size(), i );
			for( int i = 0; i < good_matches.size(); i++ )
			{
				//Get the keypoints from the good matches
				obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
				scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
			}

			H = findHomography( obj, scene, CV_RANSAC );

			perspectiveTransform( obj_corners, scene_corners, H);

			//Draw lines between the corners (the mapped object in the scene image )
			line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
			line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
			line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
			line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
		}
		imshow( windowName, img_matches );

	}
	return matchFound;
}
Пример #12
0
bool detectLogo(Mat person, Mat desObject, Mat object, vector<KeyPoint> kpObject, vector<Point2f> objCorners)
{
    // scale up the image
    resize(person, person, Size(), 4, 4, CV_INTER_CUBIC);
    
    // sharpen the image
    Mat image;
    GaussianBlur(person, image, cv::Size(0, 0), 3);
    addWeighted(person, 1.75, image, -0.75, 0, image);

    GaussianBlur(person, image, cv::Size(0, 0), 3);
    addWeighted(person, 1.75, image, -0.75, 0, image);

    // detect key points in the input frame
    vector<KeyPoint> kpFrame;
    detector.detect(person, kpFrame);
    
    // extract feature descriptors for the detected key points
    Mat desFrame;
    extractor.compute(person, kpFrame, desFrame);
    if(desFrame.empty() or desObject.empty())
        return false;
    
    // match the key points with object
    FlannBasedMatcher matcher;
    vector< vector <DMatch> > matches;
    matcher.knnMatch(desObject, desFrame, matches, 2);
    
    // compute the good matches among the matched key points
    vector<DMatch> goodMatches;
    for(int i=0; i<desObject.rows; i++)
    {
        if(matches[i][0].distance < 0.6 * matches[i][1].distance)
        {
            goodMatches.push_back(matches[i][0]);
        }
    }
    
    if(goodMatches.size() >= 8)
    {
        vector<Point2f> obj;
        vector<Point2f> scene;
        
        for( int i = 0; i < goodMatches.size(); i++ )
        {
            // get the keypoints from the good matches
            obj.push_back( kpObject[ goodMatches[i].queryIdx ].pt );
            scene.push_back( kpFrame[ goodMatches[i].trainIdx ].pt );
        }
        
        Mat H;
        H = findHomography(obj, scene);
        
        vector<Point2f> sceneCorners(4);
        perspectiveTransform( objCorners, sceneCorners, H);
        
        // draw lines between the corners (the mapped object in the scene image )
        line(person, sceneCorners[0], sceneCorners[1], Scalar(255, 255, 255), 4);
        line(person, sceneCorners[1], sceneCorners[2], Scalar(255, 255, 255), 4);
        line(person, sceneCorners[2], sceneCorners[3], Scalar(255, 255, 255), 4);
        line(person, sceneCorners[3], sceneCorners[0], Scalar(255, 255, 255), 4);
        
        imshow("Person", person);
        cout << "[MESSAGE] LOGO DETECTED" << endl;
        return true;
    }
    return false;
}
Пример #13
0
int main(int argc, char * argv[])
{
	if(argc < 2)
	{
		std::cout << "Use: tracker <target_image>" << std::endl;
        return -1;
	}
	
    Mat mTarget = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );

    if( !mTarget.data )
    {
        std::cout<< "Error reading target image." << std::endl;
        return -1;
    }

    //Detect the keypoints using SURF Detector
    int minHessian = 500;

    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kpTarget;

    detector.detect( mTarget, kpTarget );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( mTarget, kpTarget, des_object );

    FlannBasedMatcher matcher;

    //VideoCapture cap("http://192.168.1.200/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");
	VideoCapture cap("http://nidq.no-ip.org/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");

    namedWindow("Capture");

    std::vector<Point2f> tgt_corners(4);

    //Get the corners from the object
    tgt_corners[0] = cvPoint(0,0);
    tgt_corners[1] = cvPoint( mTarget.cols, 0 );
    tgt_corners[2] = cvPoint( mTarget.cols, mTarget.rows );
    tgt_corners[3] = cvPoint( 0, mTarget.rows );

    char key = 'a';
    int framecount = 0;
    while (key != 27)
    {
        Mat frame;
        cap >> frame;

        if (framecount < 5)
        {
            framecount++;
            continue;
        }

        Mat des_image, img_matches;
        std::vector<KeyPoint> kpImage;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kpImage );
        extractor.compute( image, kpImage, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        drawMatches( mTarget, kpTarget, image, kpImage, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kpTarget[ good_matches[i].queryIdx ].pt );
                scene.push_back( kpImage[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( tgt_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( img_matches, scene_corners[0] + Point2f( mTarget.cols, 0), scene_corners[1] + Point2f( mTarget.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( mTarget.cols, 0), scene_corners[2] + Point2f( mTarget.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( mTarget.cols, 0), scene_corners[3] + Point2f( mTarget.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( mTarget.cols, 0), scene_corners[0] + Point2f( mTarget.cols, 0), Scalar( 0, 255, 0), 4 );
        }

        //Show detected matches
        imshow( "Capture", img_matches );

        key = waitKey(1);
    }
    return 0;
}
Пример #14
0
    /*
     * @brief コールバック(動画処理の基本呼び出し部分)
     */
    void imageCb(const sensor_msgs::ImageConstPtr &msg)
    {
        cv_bridge::CvImagePtr cap;
        Mat in_img;
        Mat grayframe;
        Mat img_matches;
        Mat cap_descriptors;
        Mat H;
        Mat lastimage;

        std::vector<KeyPoint> cap_keypoint;
        std::vector<std::vector<DMatch> > matches;
        std::vector<DMatch> good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);

        //Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
        FlannBasedMatcher matcher;

        try{
            cap = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
            in_img = cap->image;
        }
        catch(cv_bridge::Exception& e){
            ROS_ERROR("cv_brige exception: %s", e.what());
            return;
        }

        if(in_img.empty()) {
            std::cerr<<"No capture frame img"<<std::endl;
        }
        else{
            cvtColor(cap->image, grayframe, CV_BGR2GRAY);
            computeSURF(grayframe, cap_keypoint, cap_descriptors);
            //drawAKAZEKeypoint(&cap->image, cap_keypoint, Scalar(0, 0, 255));
            obj_corners[0] = cvPoint(0,0);
            obj_corners[1] = cvPoint( in_img.cols, 0 );
            obj_corners[2] = cvPoint( in_img.cols, in_img.rows );
            obj_corners[3] = cvPoint( 0, in_img.rows );

            std::cout<<imagenum<<std::endl;
            matcher.knnMatch(cap_descriptors, descriptors_0, matches, 2);
            for(int i = 0; i < min(descriptors_0.rows-1,(int) matches.size()); i++){
                if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)){
                    good_matches.push_back(matches[i][0]);
                }
            }

            drawMatches(grayframe, cap_keypoint, object_img, keypoint_0, good_matches, img_matches);

            if(good_matches.size() >= 4){
                for(int i = 0; i < good_matches.size(); i++){
                    obj.push_back(keypoint_0[good_matches[i].queryIdx].pt);
                    scene.push_back(cap_keypoint[good_matches[i].trainIdx].pt);
                }
            }

            H = findHomography(obj, scene, CV_RANSAC);
            (obj_corners, scene_corners, H);
            line( img_matches, scene_corners[0] + Point2f( object_img.cols, 0), scene_corners[1] + Point2f( object_img.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( object_img.cols, 0), scene_corners[2] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( object_img.cols, 0), scene_corners[3] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( object_img.cols, 0), scene_corners[0] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );



#ifdef MULTI
            for(int i = 0; i < IMAGENUM; i++){
                matcher->match(cap_descriptors, trainDescCollection[i], matches);
                drawMatches(cap->image, cap_keypoint, trainImgCollection[i], trainPointCollection[i], matches, img_matches);
            }
#endif

        }

        imshow(OPENCV_WINDOW, img_matches);
        waitKey(3);
        image_pub_.publish(cap->toImageMsg());
    }
void identifyObject( Mat& frame, Mat& object, const string& objectName ) {

    //Detect the keypoints using SURF Detector
    int minHessian = 500;
    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kp_object;
    detector.detect( object, kp_object );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;
    extractor.compute( object, kp_object, des_object );
    FlannBasedMatcher matcher;


    //Get the corners from the object
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );


    // Match descriptors to frame
    Mat des_image, img_matches;
    std::vector<KeyPoint> kp_image;
    std::vector<vector<DMatch > > matches;
    std::vector<DMatch > good_matches;
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;
    std::vector<Point2f> scene_corners(4);
    Mat H;
    Mat image;

    cvtColor(frame, image, CV_RGB2GRAY);

    detector.detect( image, kp_image );
    extractor.compute( image, kp_image, des_image );

    matcher.knnMatch(des_object, des_image, matches, 2);

    for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
    {
        if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
        {
            good_matches.push_back(matches[i][0]);
        }
    }

    //Draw only "good" matches
    drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    if (good_matches.size() >= 4)
    {
        for( int i = 0; i < good_matches.size(); i++ )
        {
            //Get the keypoints from the good matches
            obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
            scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
        }

        H = findHomography( obj, scene, CV_RANSAC );

        perspectiveTransform( obj_corners, scene_corners, H);

        //Draw lines between the corners (the mapped object in the scene image )
        line( frame, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4 );
        line( frame, scene_corners[1], scene_corners[2], Scalar( 0, 255, 0), 4 );
        line( frame, scene_corners[2], scene_corners[3], Scalar( 0, 255, 0), 4 );
        line( frame, scene_corners[3], scene_corners[0], Scalar( 0, 255, 0), 4 );
    }

    //Show detected matches
    Point2f textPoint = cvPoint( (scene_corners[0].x+scene_corners[1].x+scene_corners[2].x+scene_corners[3].x )/4.0 , (scene_corners[0].y+scene_corners[1].y+scene_corners[2].y+scene_corners[3].y )/4.0 );
    putText( frame, objectName, textPoint, FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,250,150), 1, CV_AA );

}
Пример #16
0
int main( int argc, char** argv ) {
    

    if( argc != 3 )
    { readme(); return -1; }

    int hessianValue = 400; 
    Mat objectMat = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
    Mat sceneMat = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );

    if( !objectMat.data || !sceneMat.data )
    { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

    bool objectFound = false;
    float nndrRatio = 0.7f;
    //vector of keypoints   
    vector< cv::KeyPoint > keypointsO;
    vector< cv::KeyPoint > keypointsS;    
    
Mat descriptors_object, descriptors_scene;      
  
   //-- Step 1: Extract keypoints
   //SurfFeatureDetector surf(hessianValue); 
   Ptr<SURF> detector = SURF::create(1000); 
     
   detector->detect(sceneMat,keypointsS);
    if(keypointsS.size() < 7) return false; //Not enough keypoints, object not found
    detector->detect(objectMat,keypointsO);
    if(keypointsO.size() < 7) return false; //Not enough keypoints, object not found
 
    //-- Step 2: Calculate descriptors (feature vectors)
  //SurfDescriptorExtractor extractor;
  Ptr<SURF> extractor = SURF::create(1000); 
  extractor->compute( sceneMat, keypointsS, descriptors_scene );
  extractor->compute( objectMat, keypointsO, descriptors_object );
 
  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;  
  descriptors_scene.size(), keypointsO.size(), keypointsS.size());
  std::vector< vector< DMatch >  > matches;
  matcher.knnMatch( descriptors_object, descriptors_scene, matches, 2 );    
  vector< DMatch > good_matches;
  good_matches.reserve(matches.size());  
     
  for (size_t i = 0; i < matches.size(); ++i)
  { 
      if (matches[i].size() < 2)
                  continue;
     
      const DMatch &m1 = matches[i][0];
      const DMatch &m2 = matches[i][1];
     
      if(m1.distance <= nndrRatio * m2.distance)        
      good_matches.push_back(m1);     
  }
 
   
   
  if( (good_matches.size() >=7))
  {
 
    cout << "OBJECT FOUND!" << endl;
 
    std::vector< Point2f > obj;
    std::vector< Point2f > scene;
 
    for( unsigned int i = 0; i < good_matches.size(); i++ )
    {
        //-- Get the keypoints from the good matches
        obj.push_back( keypointsO[ good_matches[i].queryIdx ].pt );
        scene.push_back( keypointsS[ good_matches[i].trainIdx ].pt );
    }
 
    Mat H = findHomography( obj, scene, CV_RANSAC );
 
   
 
    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector< Point2f > obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( objectMat.cols, 0 );
    obj_corners[2] = cvPoint( objectMat.cols, objectMat.rows ); obj_corners[3] = cvPoint( 0, objectMat.rows );
    std::vector< Point2f > scene_corners(4);
 
    perspectiveTransform( obj_corners, scene_corners, H);
         
 
    //-- Draw lines between the corners (the mapped object in the scene - image_2 ) 
    line( outImg, scene_corners[0] , scene_corners[1], color, 2 ); //TOP line
    line( outImg, scene_corners[1] , scene_corners[2], color, 2 );
    line( outImg, scene_corners[2] , scene_corners[3], color, 2 );
    line( outImg, scene_corners[3] , scene_corners[0] , color, 2 ); 
    objectFound=true;
  }
  else {
      cout << "OBJECT NOT FOUND!" << endl;
  }
 
     
    cout << "Matches found: " << matches.size() << endl;  
    cout << "Good matches found: " << good_matches.size() << endl;
     
    return 0;
}
Пример #17
0
int main()
{
	Mat object = imread( "/home/pi/opencv/darkL2.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    //Mat inputImg = imread( "/home/pi/opencv/block.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	//Mat object;
	//resize(inputImg,object,Size(0,0),0.03,0.03);

    if( !object.data )
    {
        cout<< "Error reading object " << endl;
        return -1;
    }
	printf("read image\n");
    //Detect the keypoints using SURF Detector
    int minHessian = 300;

    SurfFeatureDetector detector( minHessian );
    vector<KeyPoint> kp_object;

    detector.detect( object, kp_object );
	//printf("detect keypoints\n");

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( object, kp_object, des_object );

    FlannBasedMatcher matcher;

    //VideoCapture cap(0);
	//VideoCapture cap("/home/pi/opencv/mouse2.mp4" );
	Mat cap = imread( "/home/pi/opencv/photo.jpg", CV_LOAD_IMAGE_COLOR );

	//printf("%f \n",cap.get(CV_CAP_PROP_FPS));

	namedWindow("Good Matches");

    vector<Point2f> obj_corners(4);
	
	//printf("calculate descriptors\n");	
    //Get the corners from the object
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );
	//printf("get corners\n");
    char key = 'a';
    int framecount = 0;

	int angle = 0;
	Mat frame;
	int numCycle = 0;

    while (key != 27)
    {
	/*
	Mat inputVid;	
	Mat frame;
        cap.read(inputVid);
	resize(inputVid, frame, Size(0,0), 0.3,0.3);
	
	
	angle += 10;
	printf("while loop\n");
	int cols = frame.cols;
	int rows = frame.rows;
	Point2f abc(cols/2,rows/2);
	Mat M = getRotationMatrix2D(abc,angle,1);
        warpAffine(cap,frame,M,Size(cols,rows));
	*/
	frame = cap;
        if (framecount < 5 )
        {
            framecount++;
            continue;
        }
	

        Mat des_image, img_matches;
        vector<KeyPoint> kp_image;
        vector<vector<DMatch > > matches;
        vector<DMatch > good_matches;
        vector<Point2f> obj;
        vector<Point2f> scene;
        vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;
	//printf("before color call\n");
        cvtColor(frame, image, CV_BGR2GRAY);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);
	//printf("before segfault loop\n");
        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }
	//printf("segfault sensitive loop\n");
        //Draw only "good" matches
        drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), 						DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	printf("Number of matches: %d\n", good_matches.size());
        if (good_matches.size() >= 10)

        {
		printf("good matches >= 10\n");
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
        }
	//printf("draw good matches\n");
        //Show detected matches
        imshow( "Good Matches", img_matches );
	if ( numCycle < 20 )
	{
		stringstream name;
		name << numCycle;
		string filename = string("Match_")+name.str()+string(".jpg");
		imwrite( filename, img_matches );
		numCycle++;
	}

        key = waitKey(33);
    }
    return 0;
}
Пример #18
0
int main(int argc, char * argv[])
{
	if(argc < 2)
	{
		std::cout << "Use: tracker <target_image>" << std::endl;
        return -1;
	}
	std::cout << "Load target: " << std::endl;
    Mat mTarget = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );

    if( !mTarget.data )
    {
        std::cout<< "Error reading target image." << std::endl;
        return -1;
    }
	std::cout << "\t\t\tDone" << std::endl;
    //Detect the keypoints using SURF Detector
    int minHessian = 500;
	
    SurfFeatureDetector detector( minHessian );
    std::vector<KeyPoint> kpTarget;
	
	std::cout << "Analize target image: " << std::endl;
    detector.detect( mTarget, kpTarget );

    //Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat des_object;

    extractor.compute( mTarget, kpTarget, des_object );
	std::cout << "\t\t\tDone" << std::endl;
	std::cout << "Creating macher: " << std::endl;
    FlannBasedMatcher matcher;
    std::cout << "\t\t\tDone" << std::endl;
	std::cout << "Init capture: " << std::endl;
    VideoCapture cap("http://192.168.1.200/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");
	//VideoCapture cap("http://nidq.no-ip.org/videostream.cgi?user=admin&pwd=31415LAS&resolution=32&dummy=.mjpg");
	std::cout << "\t\t\tDone" << std::endl;
	
    namedWindow("Capture");

    std::vector<Point2f> tgt_corners(4);

    //Get the corners from the object
    tgt_corners[0] = cvPoint(0,0);
    tgt_corners[1] = cvPoint( mTarget.cols, 0 );
    tgt_corners[2] = cvPoint( mTarget.cols, mTarget.rows );
    tgt_corners[3] = cvPoint( 0, mTarget.rows );

    char key = 'a';
    while (key != 27)
    {
        Mat frame;
        
        for(int i = 0; i < 5; i++) cap >> frame;
        Mat show = frame.clone();
        Mat des_image, img_matches;
        std::vector<KeyPoint> kpImage;
        std::vector<vector<DMatch > > matches;
        std::vector<DMatch > good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);
        Mat H;
        Mat image;

        cvtColor(frame, image, CV_RGB2GRAY);

        detector.detect( image, kpImage );
        extractor.compute( image, kpImage, des_image );

        matcher.knnMatch(des_object, des_image, matches, 2);

        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        if (good_matches.size() >= 4)
        {
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kpTarget[ good_matches[i].queryIdx ].pt );
                scene.push_back( kpImage[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( tgt_corners, scene_corners, H);
            
            Point2f center(0,0);
            for(int i = 0; i < 4; i++)
            {
				center.x += scene_corners[i].x;
				center.y += scene_corners[i].y;
			}
			center.x/=4; center.y/=4;

			circle( show, center, 5, Scalar( 255, 255, 0) );
			
			adjust(center, frame.cols, frame.rows);
        }
		line( show,  Point2f( show.cols/2, 0), Point2f( show.cols/2, show.rows), Scalar(0, 255, 0), 4 );
        line( show,  Point2f( 0, show.rows/2), Point2f( show.cols, show.rows/2), Scalar(0, 255, 0), 4 );
            
        //Show detected matches
        imshow( "Capture", show );

        key = waitKey(1);
    }
    return 0;
}