Beispiel #1
0
void Target::draw(cv::Mat cam_img, std::vector< cv::KeyPoint > cam_keypoints,
                  std::vector< cv::DMatch > good_matches, cv::Mat& img_matches)
{
  cv::drawMatches(image, keypoints, cam_img, cam_keypoints, good_matches, img_matches,
                  cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector< char >(),
                  cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

  // step 6: Localize the object
  std::vector< cv::Point2f > obj;
  std::vector< cv::Point2f > scene;

  for (int i = 0; i < good_matches.size(); i++)
  {
    // Get the keypoints from the good matches
    obj.push_back(keypoints[good_matches[i].queryIdx].pt);
    scene.push_back(cam_keypoints[good_matches[i].trainIdx].pt);
  }

  cv::Mat H = cv::findHomography(obj, scene, CV_RANSAC, 2);

  // step 7: Get the corners from the target
  std::vector< cv::Point2f > scene_corners(4);

  cv::perspectiveTransform(centerAndCorners, scene_corners, H);

  // step 8: Draw lines between the corners (the mapped object in the scene - image_2 )
  cv::line(img_matches, scene_corners[0] + centerAndCorners[1],
           scene_corners[1] + centerAndCorners[1], cv::Scalar(0, 255, 0), 4);
  cv::line(img_matches, scene_corners[1] + centerAndCorners[1],
           scene_corners[2] + centerAndCorners[1], cv::Scalar(0, 255, 0), 4);
  cv::line(img_matches, scene_corners[2] + centerAndCorners[1],
           scene_corners[3] + centerAndCorners[1], cv::Scalar(0, 255, 0), 4);
  cv::line(img_matches, scene_corners[3] + centerAndCorners[1],
           scene_corners[0] + centerAndCorners[1], cv::Scalar(0, 255, 0), 4);

  ROS_DEBUG("corner[0] %f %f", scene_corners[0].x, scene_corners[0].y);
  ROS_DEBUG("corner[1] %f %f", scene_corners[1].x, scene_corners[1].y);
  ROS_DEBUG("corner[2] %f %f", scene_corners[2].x, scene_corners[2].y);
  ROS_DEBUG("corner[3] %f %f", scene_corners[3].x, scene_corners[3].y);

  ROS_DEBUG("end Target::draw");
}
Beispiel #2
0
bool orbMatch(cv::Mat& inImageScene, cv::Mat& inImageObj, cv::Rect& outBoundingBox, unsigned int inMinMatches=2, float inKnnRatio=0.7)
{
	//vector of keypoints
	std::vector< cv::KeyPoint > keypointsO;
	std::vector< cv::KeyPoint > keypointsS;

	cv::Mat descriptors_object, descriptors_scene;

	cv::Mat outImg;
	inImageScene.copyTo(outImg);

	//-- Step 1: Extract keypoints
	cv::OrbFeatureDetector orb(ORB_NUM_FEATURES);
	orb.detect(inImageScene, keypointsS);
	if (keypointsS.size() < ORB_MIN_MATCHES)
	{
		//cout << "Not enough keypoints S, object not found>" << keypointsS.size() << endl;
		return false;
	}
	orb.detect(inImageObj, keypointsO);
	if (keypointsO.size() < ORB_MIN_MATCHES)
	{
		//cout << "Not enough keypoints O, object not found>" << keypointsO.size() << endl;
		return false;
	}

	//Calculate descriptors (feature vectors)
	cv::OrbDescriptorExtractor extractor;
	extractor.compute(inImageScene, keypointsS, descriptors_scene);
	extractor.compute(inImageObj, keypointsO, descriptors_object);

	//Matching descriptor vectors using FLANN matcher
	cv::BFMatcher matcher;
	//descriptors_scene.size(), keypointsO.size(), keypointsS.size();
	std::vector< std::vector< cv::DMatch >  > matches;
	matcher.knnMatch(descriptors_object, descriptors_scene, matches, 2);
	std::vector< cv::DMatch > good_matches;
	good_matches.reserve(matches.size());

	for (size_t i = 0; i < matches.size(); ++i)
	{
		if (matches[i].size() < 3)
			continue;

		const cv::DMatch &m1 = matches[i][0];
		const cv::DMatch &m2 = matches[i][1];

		if (m1.distance <= inKnnRatio * m2.distance)
			good_matches.push_back(m1);
	}

	if ((good_matches.size() >= inMinMatches))
	{
		std::vector< cv::Point2f > obj;
		std::vector< cv::Point2f > scene;

		for (unsigned int i = 0; i < good_matches.size(); i++)
		{
			// Get the keypoints from the good matches
			obj.push_back(keypointsO[good_matches[i].queryIdx].pt);
			scene.push_back(keypointsS[good_matches[i].trainIdx].pt);
		}

		cv::Mat H = findHomography(obj, scene, CV_RANSAC);

		// Get the corners from the image_1 ( the object to be "detected" )
		std::vector< cv::Point2f > obj_corners(4);
		obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(inImageObj.cols, 0);
		obj_corners[2] = cvPoint(inImageObj.cols, inImageObj.rows); obj_corners[3] = cvPoint(0, inImageObj.rows);
		std::vector< cv::Point2f > scene_corners(4);

		perspectiveTransform(obj_corners, scene_corners, H);

		// Draw lines between the corners (the mapped object in the scene - image_2 )
		line(outImg, scene_corners[0], scene_corners[1], cv::Scalar(255, 0, 0), 2); //TOP line
		line(outImg, scene_corners[1], scene_corners[2], cv::Scalar(255, 0, 0), 2);
		line(outImg, scene_corners[2], scene_corners[3], cv::Scalar(255, 0, 0), 2);
		line(outImg, scene_corners[3], scene_corners[0], cv::Scalar(255, 0, 0), 2);

		//imshow("Scene", outImg);
		//imshow("Obj", inImageObj);
		//cvWaitKey(5);

		return true;
	}

	return false;
}
Beispiel #3
0
bool RelicDetect::Match(RelicDetect obj,RelicDetect scn)
{
	FlannBasedMatcher matcher;
	std::vector< DMatch > matches;
	
	matcher.match(obj.descriptors, scn.descriptors, matches);
	double max_dist = 0; double min_dist = 100;
	//-- Quick calculation of max and min distances between keypoints
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;
	}
	printf("-- Max dist : %f \n", max_dist);
	printf("-- Min dist : %f \n", min_dist);
	//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
	std::vector< DMatch > good_matches;
	for (int i = 0; i < obj.descriptors.rows; i++)
	{
		if (matches[i].distance <= 3 * min_dist)
		{
			good_matches.push_back(matches[i]);
		}
	}
	max_dist = 0;min_dist = 100;double total_min_dist = 0;
	for (int i = 0; i < good_matches.size(); i++)
	{
		double dist = good_matches[i].distance;
		total_min_dist += dist;
		if (dist < min_dist) min_dist = dist;
		if (dist > max_dist) max_dist = dist;

	}
	printf("-- good matches Max dist : %f \n", max_dist);
	printf("-- good matches Min dist : %f \n", min_dist);
	printf("-- good matches total Min dist : %f \n", total_min_dist);
	cout << "-- good matches size " << good_matches.size() << endl;
	cout << "-- dist per match" << total_min_dist / (double)good_matches.size() << endl;
	Mat img_matches;
	drawMatches(obj.img_color, obj.keypoints, scn.img_color, scn.keypoints,
		good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
		std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	//imshow("matches", img_matches);
	//-- Localize the object
	std::vector<Point2f> obj_points;
	std::vector<Point2f> scn_points;
	for (size_t i = 0; i < good_matches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_points.push_back(obj.keypoints[good_matches[i].queryIdx].pt);
		scn_points.push_back(scn.keypoints[good_matches[i].trainIdx].pt);
	}
	Mat H = cv::findHomography(obj_points, scn_points, RANSAC);
	cout << "H:" << endl;
	for (int i = 0;i < H.rows;i++)
	{
		for (int j = 0;j < H.cols;j++)
		{
			cout << H.at<double>(i, j) << " ";
		}
		cout << endl;
	}
	//-- Get the corners from the image_1 ( the object to be "detected" )
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(obj.img_color.cols, 0);
	obj_corners[2] = cvPoint(obj.img_color.cols, obj.img_color.rows);
	obj_corners[3] = cvPoint(0, obj.img_color.rows);
	std::vector<Point2f> scene_corners(4);
	perspectiveTransform(obj_corners, scene_corners, H);
	cout << "object area" << contourArea(obj_corners) << endl;
	cout << "scene detected area" << contourArea(scene_corners) << endl;
	auto scene_area = contourArea(scene_corners);
	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line(img_matches, scene_corners[0] + Point2f(obj.img_color.cols, 0), scene_corners[1] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[1] + Point2f(obj.img_color.cols, 0), scene_corners[2] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[2] + Point2f(obj.img_color.cols, 0), scene_corners[3] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	line(img_matches, scene_corners[3] + Point2f(obj.img_color.cols, 0), scene_corners[0] + Point2f(obj.img_color.cols, 0), Scalar(0, 255, 0), 4);
	//-- Show detected matches
	imshow("Good Matches & Object detection", img_matches);
	waitKey(0);
	if (scene_area>1000)
	{
		return true;
	} 
	else
	{
		return false;
	}
}
/*
 * @function surf_feature_detect_RANSAC SURF特征提取及匹配,RANSAC错误点消除以及物体标记
 * @return null
 * @method SURF feature detector
 * @method SURF descriptor
 * @method findFundamentalMat RANSAC错误去除
 * @method findHomography 寻找透视变换矩阵
 */
void surf_feature_detect_bruteforce_RANSAC_Homography(Mat SourceImg, Mat SceneImg, Mat imageMatches, char* string)
{
	vector<KeyPoint> keyPoints1, keyPoints2;
	SurfFeatureDetector detector(400);
	detector.detect(SourceImg, keyPoints1); //标注原图特征点
	detector.detect(SceneImg, keyPoints2); //标注场景图特征点

	SurfDescriptorExtractor surfDesc;
	Mat SourceImgDescriptor, SceneImgDescriptor;
	surfDesc.compute(SourceImg, keyPoints1, SourceImgDescriptor); //描述原图surf特征点
	surfDesc.compute(SceneImg, keyPoints2, SceneImgDescriptor); //描述场景图surf特征点

	//计算两张图片的特征点匹配数
	BruteForceMatcher<L2<float>>matcher;
	vector<DMatch> matches;
	matcher.match(SourceImgDescriptor, SceneImgDescriptor, matches);
	std::nth_element(matches.begin(), matches.begin() + 29 ,matches.end());
	matches.erase(matches.begin() + 30, matches.end());

	//FLANN匹配检测算法
	//vector<DMatch> matches;
	//DescriptorMatcher *pMatcher = new FlannBasedMatcher;
	//pMatcher->match(SourceImgDescriptor, SceneImgDescriptor, matches);
	//delete pMatcher;

	//keyPoints1 图1提取的关键点
	//keyPoints2 图2提取的关键点
	//matches 关键点的匹配
	int ptCount = (int)matches.size();
	Mat p1(ptCount, 2, CV_32F);
	Mat p2(ptCount, 2, CV_32F);
	Point2f pt;
	for(int i = 0; i < ptCount; i++)
	{
		pt = keyPoints1[matches[i].queryIdx].pt;
		p1.at<float>(i, 0) = pt.x;
		p1.at<float>(i, 1) = pt.y;

		pt = keyPoints2[matches[i].trainIdx].pt;
		p2.at<float>(i, 0) = pt.x;
		p2.at<float>(i, 1) = pt.y;
	}
	Mat m_Fundamental;
	vector<uchar> m_RANSACStatus;
	m_Fundamental = findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);
	int OutlinerCount = 0;
	for(int i = 0; i < ptCount; i++)
	{
		if(m_RANSACStatus[i] == 0)
		{
			OutlinerCount++;
		}
	}

	// 计算内点
	vector<Point2f> m_LeftInlier;
	vector<Point2f> m_RightInlier;
	vector<DMatch> m_InlierMatches;

	// 上面三个变量用于保存内点和匹配关系
	int InlinerCount = ptCount - OutlinerCount;
	m_InlierMatches.resize(InlinerCount);
	m_LeftInlier.resize(InlinerCount);
	m_RightInlier.resize(InlinerCount);
	InlinerCount = 0;
	for (int i=0; i<ptCount; i++)
	{
		if (m_RANSACStatus[i] != 0)
		{
			m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
			m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
			m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
			m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
			m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
			m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
			InlinerCount++;
		}
	}

	// 把内点转换为drawMatches可以使用的格式
	vector<KeyPoint> key1(InlinerCount);
	vector<KeyPoint> key2(InlinerCount);
	KeyPoint::convert(m_LeftInlier, key1);
	KeyPoint::convert(m_RightInlier, key2);

	//显示计算F过后的内点匹配
	drawMatches(SourceImg, key1, SceneImg, key2, m_InlierMatches, imageMatches);
	//drawKeypoints(SourceImg, key1, SceneImg, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	
	vector<Point2f> obj;
	vector<Point2f> scene;
	for(unsigned int i = 0; i < m_InlierMatches.size(); i++)
	{
		obj.push_back(key1[m_InlierMatches[i].queryIdx].pt); //查询图像,即目标图像的特征描述
		scene.push_back(key2[m_InlierMatches[i].trainIdx].pt); //模版图像,即场景图像的特征描述
	}
	//求解变换矩阵
	//作用同getPerspectiveTransform函数,输入原始图像和变换之后图像中对应的4个点,然后建立起变换映射关系,即变换矩阵
	//findHomography直接使用透视平面来找变换公式
	Mat H = findHomography(obj, scene, CV_RANSAC);
	vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0, 0);
	obj_corners[1] = cvPoint(SourceImg.cols, 0);
	obj_corners[2] = cvPoint(SourceImg.cols, SourceImg.rows);
	obj_corners[3] = cvPoint(0, SourceImg.rows);
	vector<Point2f> scene_corners(4);
	//透视变换,将图片投影到一个新的视平面
	//根据以求得的变换矩阵
	perspectiveTransform(obj_corners, scene_corners, H);

	line(imageMatches, scene_corners[0] + Point2f(SourceImg.cols, 0), scene_corners[1] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[1] + Point2f(SourceImg.cols, 0), scene_corners[2] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[2] + Point2f(SourceImg.cols, 0), scene_corners[3] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	line(imageMatches, scene_corners[3] + Point2f(SourceImg.cols, 0), scene_corners[0] + Point2f(SourceImg.cols, 0), Scalar(0, 0, 255), 4);
	imshow(string, imageMatches);

	imwrite("feature_detect.jpg", imageMatches);
}
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    //get image cv::Pointer
    cv_bridge::CvImagePtr cv_ptr;

    //acquire image frame
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    const std::string filename =  
    "/home/cam/Documents/catkin_ws/src/object_detection/positive_images/wrench.png";

    //read in calibration image
    cv::Mat object = cv::imread(filename, 
      CV_LOAD_IMAGE_GRAYSCALE);

    cv::namedWindow("Good Matches", CV_WINDOW_AUTOSIZE);
    //SURF Detector, and descriptor parameters
    int minHess=2000;
    std::vector<cv::KeyPoint> kpObject, kpImage;
    cv::Mat desObject, desImage;

    //Display keypoints on training image
    cv::Mat interestPointObject=object;

    //SURF Detector, and descriptor parameters, match object initialization
    cv::SurfFeatureDetector detector(minHess);
    detector.detect(object, kpObject);
    cv::SurfDescriptorExtractor extractor;
    extractor.compute(object, kpObject, desObject);
    cv::FlannBasedMatcher matcher;

    //Object corner cv::Points for plotting box
    std::vector<cv::Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );

    double frameCount = 0;
    float thresholdMatchingNN=0.7;
    unsigned int thresholdGoodMatches=4;
    unsigned int thresholdGoodMatchesV[]={4,5,6,7,8,9,10};

    char escapeKey = 'k';

    for (int j=0; j<7;j++)
    {
      thresholdGoodMatches = thresholdGoodMatchesV[j];
      
      while (escapeKey != 'q')
      {
        frameCount++;
        cv::Mat image;
        cvtColor(cv_ptr->image, image, CV_RGB2GRAY);

        cv::Mat des_image, img_matches, H;
        std::vector<cv::KeyPoint> kp_image;
        std::vector<std::vector<cv::DMatch > > matches;
        std::vector<cv::DMatch> good_matches;
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;
        std::vector<cv::Point2f> scene_corners(4);

        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );
        matcher.knnMatch(desObject, des_image, matches, 2);

        for(int i = 0; i < std::min(des_image.rows-1, (int) matches.size()); i++) 
        //THIS LOOP IS SENSITIVE TO SEGFAULTS
        {
          if((matches[i][0].distance < thresholdMatchingNN*(matches[i][1].distance)) 
            && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }

        //Draw only "good" matches
        cv::drawMatches(object, kpObject, image, kp_image, good_matches, img_matches, 
          cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), 
          cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
        
        if (good_matches.size() >= thresholdGoodMatches)
        {

          //Display that the object is found
          cv::putText(img_matches, "Object Found", cvPoint(10,50), 0, 2, 
            cvScalar(0,0,250), 1, CV_AA);
            for(unsigned int i = 0; i < good_matches.size(); i++ )
            {
              //Get the keypoints from the good matches
              obj.push_back( kpObject[ good_matches[i].queryIdx ].pt );
              scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }

            H = findHomography( obj, scene, CV_RANSAC );

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            cv::line( img_matches, scene_corners[0] + cv::Point2f( object.cols, 0), 
              scene_corners[1] + cv::Point2f( object.cols, 0), cv::Scalar(0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[1] + cv::Point2f( object.cols, 0), 
              scene_corners[2] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[2] + cv::Point2f( object.cols, 0), 
              scene_corners[3] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
            cv::line( img_matches, scene_corners[3] + cv::Point2f( object.cols, 0), 
              scene_corners[0] + cv::Point2f( object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        }
        else
        {
          putText(img_matches, "", cvPoint(10,50), 0, 3, cvScalar(0,0,250), 1, CV_AA);
        }

        //Show detected matches
        imshow("Good Matches", img_matches);
        
        escapeKey=cvWaitKey(10);

        if(frameCount>10)
        {
          escapeKey='q';
        }


      }

      frameCount=0;
      escapeKey='a';
    }

    // Update GUI Window
    //cv::namedWindow(OPENCV_WINDOW);
    //cv::imshow(OPENCV_WINDOW, cv_ptr->image);
    //cv::waitKey(3);
    
    // Output modified video stream
    image_pub_.publish(cv_ptr->toImageMsg());
  }
Beispiel #6
0
//平面物体检测
void Feature::objectDetect( Mat& objectImage, Mat& sceneImage , Mat&outImage,
							Mat& objectDescriptor,Mat& sceneDescriptor, vector<DMatch>& matches,
							vector<KeyPoint>& objectKeypoints, vector<KeyPoint>& sceneKeypoints)
{
	double max_dist = 0; double min_dist = 100;

	//特征点最大最小距离
	for( int i = 0; i < objectDescriptor.rows; i++ )
	{ 
		double dist = matches[i].distance;
		if( dist < min_dist ) 
			min_dist = dist;
		if( dist > max_dist ) 
			max_dist = dist;
	}


	//找出强度较大的特征点(也可以用半径)
	std::vector< DMatch > good_matches;
	double acceptedDist = 2*min_dist;

	for( int i = 0; i < objectDescriptor.rows; i++ )
	{
		if( matches[i].distance < acceptedDist )
		{ 
			good_matches.push_back( matches[i]); 
		}
	}
	
	//画出匹配结果
	drawMatches( objectImage, objectKeypoints, sceneImage, sceneKeypoints,
				 good_matches, outImage, Scalar::all(-1), Scalar::all(-1),
				 vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

	//得到好的特征点的位置
	std::vector<Point2f> object;//目标图片中的点
	std::vector<Point2f> scene;//场景图片中的点
	for( int i = 0; i < good_matches.size(); i++ )
	{
		object.push_back( objectKeypoints[ good_matches[i].queryIdx ].pt );
		scene.push_back( sceneKeypoints[ good_matches[i].trainIdx ].pt );
	}

	//目标图片和场景图片的透视变化关系
	Mat H = findHomography( object, scene, CV_RANSAC );

	//目标图像的四个角的坐标
	std::vector<Point2f> object_corners(4);

	object_corners[0] = cvPoint(0,0); 
	object_corners[1] = cvPoint( objectImage.cols, 0 );
	object_corners[2] = cvPoint( objectImage.cols, objectImage.rows ); 
	object_corners[3] = cvPoint( 0, objectImage.rows );

	std::vector<Point2f> scene_corners(4);

	perspectiveTransform( object_corners, scene_corners, H);//透视变换

	//在输出图像的场景部分画出边框
	line( outImage, scene_corners[0] + Point2f( objectImage.cols, 0), scene_corners[1] + Point2f( objectImage.cols, 0), Scalar(0, 255, 0), 4 );
	line( outImage, scene_corners[1] + Point2f( objectImage.cols, 0), scene_corners[2] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[2] + Point2f( objectImage.cols, 0), scene_corners[3] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );
	line( outImage, scene_corners[3] + Point2f( objectImage.cols, 0), scene_corners[0] + Point2f( objectImage.cols, 0), Scalar( 0, 255, 0), 4 );

}
bool Homography::extract(cv::Mat &H, irr::core::vector2di *corners, irr::core::vector3df *position, irr::core::vector3df *angles, int refine) {
    
    if ( matches.size() > 3 && objectKeyPoints.size() < sceneKeyPoints.size() )
    {
        std::vector<cv::Point2f> objectPoints;
        std::vector<cv::Point2f> scenePoints;
        
        // get the keypoints from the goodmatches
        for( int i = 0; i < matches.size(); i++ )
        {
            objectPoints.push_back( objectKeyPoints[ matches[i].queryIdx ].pt );
            scenePoints.push_back( sceneKeyPoints[ matches[i].trainIdx ].pt );
        }
        
        // find the homography of the keypoints.
        H = cv::findHomography( objectPoints, scenePoints, CV_RANSAC );
        
        std::vector<cv::Point2f> obj_corners(4);;
        std::vector<cv::Point2f> scene_corners(4);
        obj_corners[0] = cvPoint( 0,          0 );
        obj_corners[1] = cvPoint( objectSize.width, 0 );
        obj_corners[2] = cvPoint( objectSize.width, objectSize.height );
        obj_corners[3] = cvPoint( 0,          objectSize.height );
        
        // get the 2D points for the homography corners
        perspectiveTransform( obj_corners, scene_corners, H);
        
        if (refine > 0) {
            cv::Mat sceneCopyCopy = sceneCopy.clone();
            cv::warpPerspective(sceneCopy, sceneCopy, H, objectSize, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
            cv::Mat H2;
            analyze(sceneCopy);
            if (extract(H2, NULL, NULL, NULL, refine - 1)) {
                H *= H2;
                perspectiveTransform( obj_corners, scene_corners, H);
            }
        }
        
        // give the caller the corners of the 2D plane
        if (corners != NULL)
            for (int i = 0; i < 4; i++) {
                corners[i] = irr::core::vector2di(scene_corners[i].x, scene_corners[i].y);
            }
        
        // init the rotation and translation vectors
        cv::Mat raux(3, 1, CV_64F), taux(3, 1, CV_64F);
        
        // calculating 3D points
        float maxSize = std::max(objectSize.width, objectSize.height);
        float unitW = objectSize.width / maxSize;
        float unitH = objectSize.height / maxSize;
        
        // get the rotation and translation vectors
        std::vector<cv::Point3f> scene_3d_corners(4);
        scene_3d_corners[0] = cv::Point3f(-unitW, -unitH, 0);
        scene_3d_corners[1] = cv::Point3f( unitW, -unitH, 0);
        scene_3d_corners[2] = cv::Point3f( unitW,  unitH, 0);
        scene_3d_corners[3] = cv::Point3f(-unitW,  unitH, 0);
        cv::solvePnP(scene_3d_corners, scene_corners, getCamIntrinsic(), cv::Mat(), raux, taux);
        
        // give the caller the 3D plane position and angle
        if (position != NULL)
            position->set(taux.at<double>(0, 0), -taux.at<double>(1, 0), taux.at<double>(2, 0));
        if (angles != NULL)
            angles->set(-raux.at<double>(0, 0) * irr::core::RADTODEG, raux.at<double>(1, 0) * irr::core::RADTODEG, -raux.at<double>(2, 0) * irr::core::RADTODEG);
        
        return true;
    }
    
    return false;
}
pcl::PointCloud<briskDepth> BDMatch(pcl::PointCloud<briskDepth> a, pcl::PointCloud<briskDepth> b)
{
    std::cout << "The count is: " << count << std::endl;
    pcl::PointCloud<briskDepth> pclMatch;
    try
    {
        cv::Mat descriptorsA;
        cv::Mat descriptorsB;
        for(int i =0; i < a.size(); i++)
        {
            descriptorsA.push_back(a[i].descriptor);
        }

        for(int i =0; i < b.size(); i++)
        {
            descriptorsB.push_back(b[i].descriptor);
        }

        cv::BFMatcher matcher(cv::NORM_HAMMING);
        std::vector< cv::DMatch > matches;

        matcher.match( descriptorsA, descriptorsB, matches );

        double max_dist = 0; double min_dist = 1000;

        StdDeviation sd;
        double temp[descriptorsA.rows];

        for (int i =0; i < descriptorsA.rows;i++)
        {
            double dist = matches[i].distance;
            if(max_dist<dist) max_dist = dist;
            if(min_dist>dist) min_dist = dist;
            //std::cout << dist << "\t";
            temp[i] = dist;
        }

        //std::cout << std::endl;
       // std::cout << " Brisk max dist " << max_dist << std::endl;
       // std::cout << " Brisk mins dist " << min_dist << std::endl;

        sd.SetValues(temp, descriptorsA.rows);

        double mean = sd.CalculateMean();
        double variance = sd.CalculateVariane();
        double samplevariance = sd.CalculateSampleVariane();
        double sampledevi = sd.GetSampleStandardDeviation();
        double devi = sd.GetStandardDeviation();

        std::cout << "Brisk\t" << descriptorsA.rows << "\t"
                << mean << "\t"
                << variance << "\t"
                << samplevariance << "\t"
                << devi << "\t"
                << sampledevi << "\n";

        std::vector< cv::DMatch > good_matches;

        for (int i=0;i<descriptorsA.rows;i++)
        {
            //if( matches[i].distance<10)
            if( matches[i].distance<max_dist/2)
            {
                good_matches.push_back(matches[i]);
                pclMatch.push_back(a[i]);
            }
        }

        cv::Mat img_matches;
        cv::drawMatches( brisk_lastImg, brisk_lastKeypoints, brisk_currentImg, brisk_lastKeypoints,
                           good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
                           std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

       // cv::imshow("Brisk Matches", img_matches);
        std::vector<cv::Point2f> obj;
        std::vector<cv::Point2f> scene;

        for( int i = 0; i < good_matches.size(); i++ )
        {
          //-- Get the keypoints from the good matches
          obj.push_back( brisk_lastKeypoints[ good_matches[i].queryIdx ].pt );
          scene.push_back( brisk_currentKeypoints[ good_matches[i].trainIdx ].pt );
        }

        cv::Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector<cv::Point2f> obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( brisk_lastImg.cols, 0 );
        obj_corners[2] = cvPoint( brisk_lastImg.cols, brisk_lastImg.rows ); obj_corners[3] = cvPoint( 0, brisk_lastImg.rows );
        std::vector<cv::Point2f> scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);

        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( img_matches, scene_corners[0] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[1] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar(0, 255, 0), 4 );
        line( img_matches, scene_corners[1] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[2] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[2] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[3] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );
        line( img_matches, scene_corners[3] + cv::Point2f( brisk_lastImg.cols, 0), scene_corners[0] + cv::Point2f( brisk_lastImg.cols, 0), cv::Scalar( 0, 255, 0), 4 );

        //-- Show detected matches
        cv::imshow( "Good brisk Matches & Object detection", img_matches );
        cv::waitKey(50);
       // std::cout << good_matches.size() << " Brisk features matched from, " << a.size() << ", " << b.size() << " sets." << std::endl;
    }
    catch (const std::exception &exc)
    {
        // catch anything thrown within try block that derives from std::exception
        std::cerr << exc.what();
    }
    return pclMatch;
}
Beispiel #9
0
/**
 * @function main
 * @brief Main function
 */
int SURF_main(Mat img_scene, Mat img_object)
{
  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
  printf("Coming to SURF");
  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 400;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //-- Step 3: Matching descriptor vectors using FLANN matcher
  FlannBasedMatcher matcher;
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );
  
  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }  

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, 
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), 
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); 


  //-- Localize the object from img_1 in img_2 
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( int i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); 
  }

  Mat H = findHomography( obj, scene, CV_RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);

   
  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
  line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
  line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );

  waitKey(1);

  return 0;
}
Beispiel #10
0
int main(void)
{
/*
    cv::Mat target = cv::imread("../../images/redbull_target.jpg");
    cv::Mat scene = cv::imread("../../images/redbull_scene.jpg");
    cv::Mat t_gray = cv::imread("../../images/redbull_target.jpg", cv::IMREAD_GRAYSCALE);
    cv::Mat s_gray = cv::imread("../../images/redbull_scene.jpg", cv::IMREAD_GRAYSCALE);
*/
    cv::Mat target = cv::imread("../../images/genmai_target.jpg");
    cv::Mat scene = cv::imread("../../images/genmai_scene.jpg");
    cv::Mat t_gray = cv::imread("../../images/genmai_target.jpg", cv::IMREAD_GRAYSCALE);
    cv::Mat s_gray = cv::imread("../../images/genmai_scene.jpg", cv::IMREAD_GRAYSCALE);
    cv::Mat dst;

    // 時間計算のための周波数
    double f = 1000.0 / cv::getTickFrequency();

    int64 time_s; //スタート時間
    double time_detect; // 検出エンド時間
    double time_match; // マッチングエンド時間


    // 特徴点検出と特徴量計算

    cv::Ptr<cv::Feature2D> feature;
    std::stringstream ss;

    feature = cv::xfeatures2d::SIFT::create();
    ss << "SIFT";
    std::cout << "--- 計測(SIFT ) ---" << std::endl;


    //******************************
    // キーポイント検出と特徴量記述
    //******************************
    std::vector<cv::KeyPoint> kpts1, kpts2;
    cv::Mat desc1, desc2;

    feature->detectAndCompute(t_gray, cv::noArray(), kpts1, desc1);

    time_s = cv::getTickCount(); // 時間計測 Start
    feature->detectAndCompute(s_gray, cv::noArray(), kpts2, desc2);
    time_detect = (cv::getTickCount() - time_s)*f; // 時間計測 Stop

    if (desc2.rows == 0){
        std::cout << "WARNING: 特徴点検出できず" << std::endl;
        return 1;
    }

    //*******************
    // 特徴量マッチング
    //*******************
    auto matchtype = feature->defaultNorm(); // SIFT, SURF: NORM_L2
                                             // BRISK, ORB, KAZE, A-KAZE: NORM_HAMMING
    cv::BFMatcher matcher(matchtype);
    std::vector<std::vector<cv::DMatch >> knn_matches;


    time_s = cv::getTickCount(); // 時間計測 Start
    // 上位2点
    matcher.knnMatch(desc1, desc2, knn_matches, 2);
    time_match = (cv::getTickCount() - time_s)*f; // 時間計測 Stop


    //***************
    // 対応点を絞る
    //***************
    const auto match_par = .6f; //対応点のしきい値
    std::vector<cv::DMatch> good_matches;

    std::vector<cv::Point2f> match_point1;
    std::vector<cv::Point2f> match_point2;

    for (size_t i = 0; i < knn_matches.size(); ++i) {
        auto dist1 = knn_matches[i][0].distance;
        auto dist2 = knn_matches[i][1].distance;

        //良い点を残す(最も類似する点と次に類似する点の類似度から)
        if (dist1 <= dist2 * match_par) {
            good_matches.push_back(knn_matches[i][0]);
            match_point1.push_back(kpts1[knn_matches[i][0].queryIdx].pt);
            match_point2.push_back(kpts2[knn_matches[i][0].trainIdx].pt);
        }
    }

    //ホモグラフィ行列推定
    cv::Mat masks;
    cv::Mat H;
    if (match_point1.size() != 0 && match_point2.size() != 0) {
        H = cv::findHomography(match_point1, match_point2, masks, cv::RANSAC, 3.f);
    }

    //RANSACで使われた対応点のみ抽出
    std::vector<cv::DMatch> inlierMatches;
    for (auto i = 0; i < masks.rows; ++i) {
        uchar *inlier = masks.ptr<uchar>(i);
        if (inlier[0] == 1) {
            inlierMatches.push_back(good_matches[i]);
        }
    }
    //特徴点の表示
    cv::drawMatches(target, kpts1, scene, kpts2, good_matches, dst);

    //インライアの対応点のみ表示
    cv::drawMatches(target, kpts1, scene, kpts2, inlierMatches, dst);

    if (!H.empty()) {

        //
        // 対象物体画像からコーナーを取得 ( 対象物体が"検出"される )
        std::vector<cv::Point2f> obj_corners(4);
        obj_corners[0] = cv::Point2f(.0f, .0f);
        obj_corners[1] = cv::Point2f(static_cast<float>(target.cols), .0f);
        obj_corners[2] = cv::Point2f(static_cast<float>(target.cols), static_cast<float>(target.rows));
        obj_corners[3] = cv::Point2f(.0f, static_cast<float>(target.rows));

        // シーンへの射影を推定
        std::vector<cv::Point2f> scene_corners(4);
        cv::perspectiveTransform(obj_corners, scene_corners, H);

        // コーナー間を線で結ぶ ( シーン中のマップされた対象物体 - シーン画像 )
        float w = static_cast<float>(target.cols);
        cv::line(dst, scene_corners[0] + cv::Point2f(w, .0f), scene_corners[1] + cv::Point2f(w, .0f), cv::Scalar(0, 255, 0), 4);
        cv::line(dst, scene_corners[1] + cv::Point2f(w, .0f), scene_corners[2] + cv::Point2f(w, .0f), cv::Scalar(0, 255, 0), 4);
        cv::line(dst, scene_corners[2] + cv::Point2f(w, .0f), scene_corners[3] + cv::Point2f(w, .0f), cv::Scalar(0, 255, 0), 4);
        cv::line(dst, scene_corners[3] + cv::Point2f(w, .0f), scene_corners[0] + cv::Point2f(w, .0f), cv::Scalar(0, 255, 0), 4);
    }


    double beta = 1.2;
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 40), cv::FONT_HERSHEY_SIMPLEX, beta-.1, cv::Scalar(255, 255, 255), 1, CV_AA);
    ss.str("");
    ss << "Detection & Description";
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 70), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(0, 255, 255), 1, CV_AA);
    ss.str("");
    ss << "Time: " << time_detect << " [ms]";
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 95), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(0, 255, 255), 1, CV_AA);
    ss.str("");
    ss << "Matching";
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 120), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(0, 255, 255), 1, CV_AA);
    ss.str("");
    ss << "Time: " << time_match << " [ms]";
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 145), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(0, 255, 255), 1, CV_AA);

    ss.str("");
    ss << "--Matches--";
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 170), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(255, 255, 0), 1, CV_AA);
    ss.str("");
    ss << "Good Matches: " << good_matches.size();
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 190), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(255, 255, 0), 1, CV_AA);

    ss.str("");
    ss << "Inlier: " << inlierMatches.size();
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 220), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(255, 255, 0), 1, CV_AA);

    ss.str("");
    auto ratio = .0;
    if (good_matches.size() != .0)
        ratio = inlierMatches.size()*1.0 / good_matches.size();
    ss << "Inlier ratio: " << ratio;
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 240), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(255, 255, 0), 1, CV_AA);


    ss.str("");
    ss << "Target KeyPoints: " << kpts1.size();
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 270), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(255, 0, 255), 1, CV_AA);
    ss.str("");
    ss << "Scene KeyPoints: " << kpts2.size();
    cv::putText(dst, ss.str(), cv::Point(10, target.rows + 290), cv::FONT_HERSHEY_SIMPLEX, beta - .1, cv::Scalar(255, 0, 255), 1, CV_AA);

    std::cout << "検出時間: " << time_detect << " [ms]" << std::endl;
    std::cout << "照合時間: " << time_match << " [ms]" << std::endl;
    std::cout << "Good Matches: " << good_matches.size() << std::endl;
    std::cout << "Inlier: " << inlierMatches.size() << std::endl;
    std::cout << "Inlier ratio: " << ratio << std::endl;
    std::cout << "target Keypoints: " << kpts1.size() << std::endl;
    std::cout << "scene Keypoints: " << kpts2.size() << std::endl;
    std::cout << "target match_points: " << match_point1.size() << std::endl;
    std::cout << "scene match_points: " << match_point2.size() << std::endl;

    cv::imshow("dst",dst);
    cv::waitKey(0);
    return 0;
}
bool findObjectSURF( cv::Mat objectMat, cv::Mat sceneMat, int hessianValue )
{
    bool objectFound = false;
    float nndrRatio = 0.7f;
    //vector of keypoints
    vector< cv::KeyPoint > keypointsO;
    vector< cv::KeyPoint > keypointsS;

    Mat descriptors_object, descriptors_scene;

    //-- Step 1: Extract keypoints
    SurfFeatureDetector surf(hessianValue);
    surf.detect(sceneMat,keypointsS);
    if(keypointsS.size() < 7) return false; //Not enough keypoints, object not found
    surf.detect(objectMat,keypointsO);
    if(keypointsO.size() < 7) return false; //Not enough keypoints, object not found

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    extractor.compute( sceneMat, keypointsS, descriptors_scene );
    extractor.compute( objectMat, keypointso, descriptors_object );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    cv::FlannBasedMatcher matcher;
    descriptors_scene.size(), keypointsO.size(), keypointsS.size());
    std::vector<std::vector<cv::DMatch> > matches;
    matcher.knnMatch( descriptors_object, descriptors_scene, matches, 2 );
    vector< cv::DMatch > good_matches;
    good_matches.reserve(matches.size());

    for (size_t i = 0; i < matches.size(); ++i)
    {
        if (matches[i].size() < 2)
            continue;

        const cv::DMatch &m1 = matches[i][0];
        const cv::DMatch &m2 = matches[i][1];

        if(m1.distance <= nndrRatio * m2.distance)
            good_matches.push_back(m1);
    }



    if( (good_matches.size() >=7))
    {
        std::cout << "OBJECT FOUND!" << std::endl;

        std::vector< cv::Point2f > obj;
        std::vector< cv::Point2f > scene;

        for( unsigned int i = 0; i < good_matches.size(); i++ )
        {
            //-- Get the keypoints from the good matches
            obj.push_back( keypointsO[ good_matches[i].queryIdx ].pt );
            scene.push_back( keypointsS[ good_matches[i].trainIdx ].pt );
        }

        Mat H = findHomography( obj, scene, CV_RANSAC );

        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector< Point2f > obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( objectMat.cols, 0 );
        obj_corners[2] = cvPoint( objectMat.cols, objectMat.rows ); obj_corners[3] = cvPoint( 0, objectMat.rows );
        std::vector< Point2f > scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);


        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        line( objectMat, scene_corners[0] , scene_corners[1], color, 2 ); //TOP line
        line( objectMat, scene_corners[1] , scene_corners[2], color, 2 );
        line( objectMat, scene_corners[2] , scene_corners[3], color, 2 );
        line( objectMat, scene_corners[3] , scene_corners[0] , color, 2 );
        objectFound=true;
    } else {
        std::cout << "OBJECT NOT FOUND!" << std::endl;
    }

    std::cout << "Matches found: " << matches.size() << std::endl;
    std::cout << "Good matches found: " << good_matches.size() << std::endl;

    return objectFound;
}