Ejemplo n.º 1
0
static void align_2nd_to_1st_img(Mat& img1, Mat& img2) {
    // Calculate descriptors (feature vectors)
    std::vector<KeyPoint> keyPoints1, keyPoints2;
    Mat descriptor1, descriptor2;
    
    OrbFeatureDetector detector(5000);
    detector.detect(img1, keyPoints1);
    detector.detect(img2, keyPoints2);

    OrbDescriptorExtractor extractor;
    extractor.compute(img1, keyPoints1, descriptor1);
    extractor.compute(img2, keyPoints2, descriptor2);
    
    // Match descriptor vectors
    BFMatcher matcher;
    std::vector<vector< DMatch >> matches;
    matcher.knnMatch(descriptor2, descriptor1, matches, 2);
    
    std::vector< DMatch > good_matches;
    for (int i = 0; i < matches.size(); i ++) {
        float rejectRatio = 0.8;
        if (matches[i][0].distance / matches[i][1].distance > rejectRatio)
            continue;
        good_matches.push_back(matches[i][0]);
    }
    
    std::vector<Point2f> good_keyPoints1, good_keyPoints2;
    for (int i = 0; i < good_matches.size(); i ++) {
        good_keyPoints1.push_back(keyPoints1[good_matches[i].trainIdx].pt);
        good_keyPoints2.push_back(keyPoints2[good_matches[i].queryIdx].pt);
    }
    
    Mat H = findHomography( good_keyPoints2, good_keyPoints1, CV_RANSAC );
    warpPerspective(img2, img2, H, img1.size(), INTER_NEAREST);
}
void CameraPoseOptimization::crossCheckMatching
(BFMatcher& descriptorMatcher, const Mat& descriptors1, const Mat& descriptors2,
vector<DMatch>& filteredMatches12, int knn /* = 1 */)
{
	filteredMatches12.clear();
	vector<vector<DMatch> > matches12, matches21;
	descriptorMatcher.knnMatch(descriptors1, descriptors2, matches12, knn);
	descriptorMatcher.knnMatch(descriptors2, descriptors1, matches21, knn);
	for (size_t m = 0; m < matches12.size(); m++)
	{
		bool findCrossCheck = false;
		for (size_t fk = 0; fk < matches12[m].size(); fk++)
		{
			DMatch forward = matches12[m][fk];
			for (size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++)
			{
				DMatch backward = matches21[forward.trainIdx][bk];
				if (backward.trainIdx == forward.queryIdx)
				{
					filteredMatches12.push_back(forward);
					findCrossCheck = true;
					break;
				}
			}
			if (findCrossCheck)
				break;
		}
	}
}
Ejemplo n.º 3
0
void findTopFiveBFMatches(Mat hqDesc, vector<Mat>* keyframeDesc, vector<vector< DMatch >>* matchVec, vector<int>* matchIndices){
	BFMatcher matcher;
	int index = 0;

	//Calculate matches between high quality image and 
	for (vector<Mat>::iterator it = keyframeDesc->begin(); it != keyframeDesc->end(); ++it){
		vector< DMatch > matches;

		//calculate initial matches
		Mat kfDesc = *it;
		matcher.match(hqDesc, kfDesc, matches);

		matchVec->push_back(matches);
		index++;
	}
	//pickTopFive
	pickTopFive(matchVec, matchIndices);
	index = 0;
}
Ejemplo n.º 4
0
bool recognizer::getmatched(  Mat  mat1, Mat  mat2){
    Mat det1=mat1;Mat det2 = mat2;
    std::vector<KeyPoint> keypoints_object, keypoints_scene;
    detector->detect(det1,keypoints_object);
    detector->detect(det2,keypoints_scene);
    if(keypoints_object.size()==0 || keypoints_scene.size()==0){
        return false;
    }
    Mat descriptors1, descriptors2;
    extractor->compute(det1, keypoints_object, descriptors1);
    extractor->compute(det2, keypoints_scene, descriptors2);
    BFMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);
//    Rect r3 = det1&det2;
//    double match = r3.area()/det2.area();
    if(matches.size()<threholdNum)
        return false;
    return true;
}
Ejemplo n.º 5
0
int main(int argc, char** argv)
{
    //read images
    Mat img_1c=imread("img3.jpg");
    Mat img_2c=imread("img1.jpg");
    
    Mat img_1, img_2;
    //transform images into gray scale
    cvtColor( img_1c, img_1, CV_BGR2GRAY );
    cvtColor( img_2c, img_2, CV_BGR2GRAY );

    SIFT sift;
    //Ptr<SIFT> ptrsift = SIFT::create(50, 3, .2, 5, 10);  //works for imag1 and 2
    Ptr<SIFT> ptrsift = SIFT::create(15, 5, .1, 5, 10); 
    vector<KeyPoint> key_points_1, key_points_2;
    Mat detector;
    //do sift, find key points
    ptrsift->detect(img_1, key_points_1);
    ptrsift->detect(img_2, key_points_2);
    //sift(img_2, Mat(), key_points_2, detector);

    //PSiftDescriptorExtractor extractor;
    Ptr<SIFT> extractor = SIFT::create(); 
    
    Mat descriptors_1,descriptors_2;
    //compute descriptors
    extractor->compute(img_1,key_points_1,descriptors_1);
    extractor->compute(img_2,key_points_2,descriptors_2);
    cout<<descriptors_1;
    //use burte force method to match vectors
    BFMatcher matcher;
    vector<DMatch>matches;
    matcher.match(descriptors_1,descriptors_2,matches);

    //draw results
    Mat img_matches;
    drawMatches(img_1c,key_points_1,img_2c,key_points_2,matches,img_matches);
    imshow("sift_Matches",img_matches);
    waitKey(0);
    return 0;
}
Ejemplo n.º 6
0
void YawAngleEstimator::train()
{
	printf("YawAngleEstimator:train\n");
	vector<vector<KeyPoint>> kp(AngleNum,vector<KeyPoint>());
	vector<Mat> descriptors(AngleNum,Mat());
	featureExtract(YawTemplate, kp, descriptors, Feature);

	if (useIndex)
	{
		//build Index with Lsh and Hamming distance
		for (int i = 0; i < AngleNum; i++)
		{
			flann::Index tempIndex;
			if (Feature == USE_SIFT)
			{
				tempIndex.build(descriptors[i], flann::KDTreeIndexParams(4), cvflann::FLANN_DIST_L2);
			}
			else
			{
				tempIndex.build(descriptors[i], flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
			}
			YawIndex.push_back(tempIndex);
		}
	}
	else
	{
		//build BFMathers
		for (int i = 0; i < AngleNum; i++)
		{
			//record
			fss<<"\nKeypoints number of template "<< i <<" is "<< descriptors[i].rows << endl;

			BFMatcher tempMatcher;
			vector<Mat> train_des(1, descriptors[i]);
			tempMatcher.add(train_des);
			tempMatcher.train();
			matchers.push_back(tempMatcher);
		}
	}
}
Ejemplo n.º 7
0
PERF_TEST_P(BruteForceMatcherFixture, DISABLED_knnMatch,
            OCL_BFMATCHER_TYPICAL_MAT_SIZES)  // TODO too many outliers
{
    const Size srcSize = GetParam();

    vector<vector<DMatch> > matches(2);
    Mat query(srcSize, CV_32F), train(srcSize, CV_32F);
    randu(query, 0.0f, 1.0f);
    randu(train, 0.0f, 1.0f);

    declare.in(query, train);
    if (srcSize.height == 2000)
        declare.time(8);

    if (RUN_PLAIN_IMPL)
    {
        BFMatcher matcher (NORM_L2);
        TEST_CYCLE() matcher.knnMatch(query, train, matches, 2);

        std::vector<DMatch> & matches0 = matches[0], & matches1 = matches[1];
        SANITY_CHECK_MATCHES(matches0);
        SANITY_CHECK_MATCHES(matches1);
    }
    else if (RUN_OCL_IMPL)
    {
        ocl::BruteForceMatcher_OCL_base oclMatcher(ocl::BruteForceMatcher_OCL_base::L2Dist);
        ocl::oclMat oclQuery(query), oclTrain(train);

        TEST_CYCLE() oclMatcher.knnMatch(oclQuery, oclTrain, matches, 2);

        std::vector<DMatch> & matches0 = matches[0], & matches1 = matches[1];
        SANITY_CHECK_MATCHES(matches0);
        SANITY_CHECK_MATCHES(matches1);
    }
    else
        OCL_PERF_ELSE
}
Ejemplo n.º 8
0
int findMatches(Mat img1, Mat img2, vector<KeyPoint>& keypoints1, vector<KeyPoint>& keypoints2, Mat descriptors1, Mat descriptors2, BFMatcher matcher, vector<Point2f>& finalPoint1, 
	vector<Point2f>& finalPoint2, double passRatio, vector<KeyPoint>& keypointsOut) {
	vector<DMatch> matches;
	matcher.match(descriptors1, descriptors2, matches);
	vector<char> matchesMask(matches.size(), 0);

	// Find max distance
	double maxDistance = 0;
	for (int idx = 0; idx < matches.size(); idx++) {
		if (matches[idx].distance > maxDistance)
			maxDistance = matches[idx].distance;
	}

	// Cut out 1-passratio % or points
	for (int idx = 0; idx < matches.size(); idx++) {
		if (matches[idx].distance <= (maxDistance*passRatio))
			matchesMask[idx] = 1;
	}

#ifdef DEBUG
	namedWindow("Matches", CV_WINDOW_AUTOSIZE);
	Mat img_matches;
	drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches, Scalar::all(-1), Scalar::all(-1), matchesMask, 2);

	while (1) {
		imshow("Matches", img_matches);
		int keypress = waitKey(30);
		if (keypress == 32) {
			break;
		}
	}
#endif

	// Output final points as well as a new vector of keypoints
	for (int idx = 0; idx < matches.size(); idx++) {
		if (matchesMask[idx]) {
			finalPoint1.push_back(keypoints1[matches[idx].queryIdx].pt);
			finalPoint2.push_back(keypoints2[matches[idx].trainIdx].pt);
			keypointsOut.push_back(keypoints2[matches[idx].trainIdx]);
		}
	}
	return 0;
}
Ejemplo n.º 9
0
Mat find_next_homography(Mat image, Mat image_next, vector<KeyPoint> keypoints_0, Mat descriptors_0,
						 SurfFeatureDetector detector, SurfDescriptorExtractor extractor, 
						 BFMatcher matcher, vector<KeyPoint>& keypoints_next, Mat& descriptors_next)
{

	//step 1 detect feature points in next image
	vector<KeyPoint> keypoints_1;
	detector.detect(image_next, keypoints_1);

	Mat img_keypoints_surf0, img_keypoints_surf1;
	drawKeypoints(image, keypoints_0, img_keypoints_surf0);
	drawKeypoints(image_next, keypoints_1, img_keypoints_surf1);
	//cout << "# im0 keypoints" << keypoints_0.size() << endl;
    //cout << "# im1 keypoints" << keypoints_1.size() << endl;
	imshow("surf 0", img_keypoints_surf0);
	imshow("surf 1", img_keypoints_surf1);

    //step 2: extract feature descriptors from feature points
	Mat descriptors_1;
	extractor.compute(image_next, keypoints_1, descriptors_1);

	//step 3: feature matching
	//cout << "fd matching" << endl;
	vector<DMatch> matches;
	vector<Point2f> matched_0;
	vector<Point2f> matched_1;

	matcher.match(descriptors_0, descriptors_1, matches);
	Mat img_feature_matches;
	drawMatches(image, keypoints_0, image_next, keypoints_1, matches, img_feature_matches );
	imshow("Matches", img_feature_matches);

	for (int i = 0; i < matches.size(); i++ )
	{
		matched_0.push_back(keypoints_0[matches[i].queryIdx].pt);	
		matched_1.push_back(keypoints_1[matches[i].trainIdx].pt);	
	}
	keypoints_next = keypoints_1;
	descriptors_next = descriptors_1;
	return findHomography(matched_0, matched_1, RANSAC);

}
Ejemplo n.º 10
0
bool compute(Mat CurrentImageGrayScale, Mat Kinverse, const int iteration){
    vector<KeyPoint> CurrentFeatures;
    SurfDetector.detect(CurrentImageGrayScale, CurrentFeatures);
    Mat CurrentFeatureDescriptors;
    SurfDescriptor.compute(CurrentImageGrayScale, CurrentFeatures, CurrentFeatureDescriptors);
    vector<DMatch> matches;
    matcher.match(PreviousFeatureDescriptors, CurrentFeatureDescriptors, matches);
    if (matches.size() > 200){
        nth_element(matches.begin(), matches.begin()+ 200, matches.end());
        matches.erase(matches.begin() + 201, matches.end());
    }
    //Debug(matches, PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures);
    vector< pair<double,double> > FirstImageFeatures;
    vector< pair<double,double> > SecondImageFeatures;
    for(int i  = 0; i < matches.size(); i++){
        Point2f myft = PreviousFeatures[matches[i].queryIdx].pt;
        Mat FtMatForm = (Mat_<double>(3,1) << (double)myft.x, (double)myft.y, 1.0);
        FtMatForm = Kinverse*FtMatForm;       
        pair<double,double> tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
        FirstImageFeatures.push_back(tmp);
        
        myft = CurrentFeatures[matches[i].trainIdx].pt;
        FtMatForm = (Mat_<double>(3,1) << (double)myft.x, (double)myft.y, 1.0);
        FtMatForm = Kinverse*FtMatForm;       
        tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
        SecondImageFeatures.push_back(tmp);
    }
    vector<int> inliers_indexes;
    Mat RobustEssentialMatrix= Ransac(FirstImageFeatures, SecondImageFeatures, 0.00001, 8, 2000, inliers_indexes);
    //cout << RobustEssentialMatrix << endl;
    
    //Debug2(matches, PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures, inliers_indexes);
    
    Mat P = Mat::eye(3,4,CV_64F);
    if (!GetRotationAndTraslation(RobustEssentialMatrix, FirstImageFeatures, SecondImageFeatures, inliers_indexes, P)){
        cerr << "Recovering Translation and Rotation: Failed" << endl;
        return false;
    }
    //cout << P << endl;
    Mat Transformation = Mat::zeros(4,4, CV_64F);
    Transformation.at<double>(3,3) = 1.0;
    for(int i = 0 ; i < 3; i++)
        for(int j = 0; j < 4; j++)
            Transformation.at<double>(i, j) = P.at<double>(i, j);
    Mat TransformationInverse = Transformation.inv();
    Pose = Pose * TransformationInverse;
    cerr << Pose.at<double>(0, 3) << " " << Pose.at<double>(1, 3) << " " << Pose.at<double>(2, 3) << endl;    
    
    PreviousImageGrayScale = CurrentImageGrayScale;
    PreviousFeatures = CurrentFeatures;
    PreviousFeatureDescriptors = CurrentFeatureDescriptors;
    
    //viejo
    
//    vector< pair<int,int> > correspondences = harrisFeatureMatcherMCC(PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures);
//    cout << "Iteracion" << iteration << "Cantidad de correspondencias " << correspondences.size() << endl;
//    vector< pair<double,double> > FirstImageFeatures;
//    vector< pair<double,double> > SecondImageFeatures;
//    for(int i  = 0; i < correspondences.size(); i++){
//        pair<int,int> myft = PreviousFeatures[correspondences[i].first];
//        Mat FtMatForm = (Mat_<double>(3,1) << (double)myft.first, (double)myft.second, 1.0);
//        FtMatForm = Kinverse*FtMatForm;       
//        pair<double,double> tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
//        FirstImageFeatures.push_back(tmp);
//        
//        myft = CurrentFeatures[correspondences[i].second];
//        FtMatForm = (Mat_<double>(3,1) << (double)myft.first, (double)myft.second, 1.0);
//        FtMatForm = Kinverse*FtMatForm;       
//        tmp = make_pair(FtMatForm.at<double>(0,0), FtMatForm.at<double>(1,0));
//        SecondImageFeatures.push_back(tmp);
//    }
//    vector<int> inliers_indexes;
//    Mat RobustEssentialMatrix= Ransac(FirstImageFeatures, SecondImageFeatures, 0.98, 0.00001, 0.5, 8, FirstImageFeatures.size()/2, inliers_indexes);
//    cout << "Iteration" << iteration << "Final EssentialMatrix" << endl;
//    cout << RobustEssentialMatrix << endl;
//    
//    
//    vector<pair<int, int> > correspondences_inliers;
//    for(int i = 0; i < inliers_indexes.size(); i++)
//        correspondences_inliers.push_back(correspondences[inliers_indexes[i]]);
//    debugging2(PreviousImageGrayScale, CurrentImageGrayScale, PreviousFeatures, CurrentFeatures, correspondences_inliers);
//    
//    Mat P = Mat::eye(3,4,CV_64F);
//    if (!GetRotationAndTraslation(RobustEssentialMatrix, FirstImageFeatures, SecondImageFeatures, inliers_indexes, P))
//        return false;
//    cout << "Iteration" << iteration << "Camera Matrix" << endl;
//    cout << P << endl;
//    Mat Transformation = Mat::zeros(4,4, CV_64F);
//    Transformation.at<double>(3,3) = 1.0;
//    for(int i = 0 ; i < 3; i++)
//        for(int j = 0; j < 4; j++)
//            Transformation.at<double>(i, j) = P.at<double>(i, j);
//    Mat TransformationInverse = Transformation.inv();
//    Pose = Pose * TransformationInverse;
//    PreviousImageGrayScale = CurrentImageGrayScale;
//    PreviousFeatures = CurrentFeatures;
//    cerr << Pose.at<double>(0, 4) << Pose.at<double>(1, 4) << Pose.at<double>(2, 4) << endl;

}
Ejemplo n.º 11
0
/* perform 2D SURF feature matching */
void match (Mat img_1, Mat img_2, vector<KeyPoint> keypoints_1,
    vector<KeyPoint> keypoints_2, vector<DMatch> &good_matches,
    pcl::CorrespondencesPtr &correspondences)
{
  SurfDescriptorExtractor extractor;
  Mat descriptors_1, descriptors_2;

  extractor.compute (img_1, keypoints_1, descriptors_1);
  extractor.compute (img_2, keypoints_2, descriptors_2);

  //FlannBasedMatcher matcher;
  BFMatcher matcher (NORM_L2);
  std::vector<DMatch> matches;

  matcher.match (descriptors_1, descriptors_2, matches);

  double max_dist = 0;
  double min_dist = 100;

  for (int i = 0; i < descriptors_1.rows; i++)
  {
    double dist = matches[i].distance;

    if (dist < min_dist)
      min_dist = dist;
    if (dist > max_dist)
      max_dist = dist;
  }

  for (int i = 0; i < descriptors_1.rows; i++)
  {
    // need to change the factor "2" to adapt to different cases
    if (matches[i].distance < 3 * min_dist)  //may adapt for changes
    {
      good_matches.push_back (matches[i]);
    }
  }

  correspondences->resize (good_matches.size ());

  for (unsigned cIdx = 0; cIdx < good_matches.size (); cIdx++)
  {
    (*correspondences)[cIdx].index_query = good_matches[cIdx].queryIdx;
    (*correspondences)[cIdx].index_match = good_matches[cIdx].trainIdx;

    if (0)  // for debugging
    {
      cout << good_matches[cIdx].queryIdx << " " << good_matches[cIdx].trainIdx
          << " " << good_matches[cIdx].distance << endl;
      cout << good_matches.size () << endl;
    }
  }

  // change the constant value of SHOW_MATCHING to 1 if you want to visulize the matching result
  if (SHOW_MATCHING)
  {
    Mat img_matches;
    drawMatches (img_1, keypoints_1, img_2, keypoints_2, good_matches,
        img_matches, Scalar::all (-1), Scalar::all (-1), vector<char> (),
        DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    //-- Show detected matches
    imshow ("Good Matches", img_matches);
    waitKey (0);
  }
}
Ejemplo n.º 12
0
void computePoseDifference(Mat img1, Mat img2, CommandArgs args, Mat k, Mat& dist_coefficients, double& worldScale, Mat& R, Mat& t, Mat& img_matches)
{
   cout << "%===============================================%" << endl;

   Mat camera_matrix = k.clone();
   if (args.resize_factor > 1) 
   {
      resize(img1, img1, Size(img1.cols / args.resize_factor, 
               img1.rows / args.resize_factor)); // make smaller for performance and displayablity
      resize(img2, img2, Size(img2.cols / args.resize_factor,
               img2.rows / args.resize_factor));
      // scale matrix down according to changed resolution
      camera_matrix = camera_matrix / args.resize_factor;
      camera_matrix.at<double>(2,2) = 1;
   }

   Mat K1, K2;
   K1 = K2 = camera_matrix;
   if (img1.rows > img1.cols) // it is assumed the camera has been calibrated in landscape mode, so undistortion must also be performed in landscape orientation, or the camera matrix must be modified (fx,fy and cx,cy need to be exchanged)
   {
      swap(K1.at<double>(0,0), K1.at<double>(1,1));
      swap(K1.at<double>(0,2), K1.at<double>(1,2));
   }
   if (img2.rows > img2.cols)
   {
      swap(K2.at<double>(0,0), K2.at<double>(1,1));
      swap(K2.at<double>(0,2), K2.at<double>(1,2));
   }

   // Feature detection + extraction
   vector<KeyPoint> KeyPoints_1, KeyPoints_2;
   Mat descriptors_1, descriptors_2;

   Ptr<Feature2D> feat_detector;
   if (args.detector == DETECTOR_KAZE) 
   {
      feat_detector = AKAZE::create(args.detector_data.upright ? AKAZE::DESCRIPTOR_MLDB_UPRIGHT : AKAZE::DESCRIPTOR_MLDB, 
            args.detector_data.descriptor_size,
            args.detector_data.descriptor_channels,
            args.detector_data.threshold,
            args.detector_data.nOctaves,
            args.detector_data.nOctaveLayersAkaze);

   } else if (args.detector == DETECTOR_SURF)
   {
      feat_detector = xfeatures2d::SURF::create(args.detector_data.minHessian, 
            args.detector_data.nOctaves, args.detector_data.nOctaveLayersAkaze, args.detector_data.extended, args.detector_data.upright);
   } else if (args.detector == DETECTOR_SIFT)
   {
      feat_detector = xfeatures2d::SIFT::create(args.detector_data.nFeatures, 
            args.detector_data.nOctaveLayersSift, args.detector_data.contrastThreshold, args.detector_data.sigma);
   }

   feat_detector->detectAndCompute(img1, noArray(), KeyPoints_1, descriptors_1);
   feat_detector->detectAndCompute(img2, noArray(), KeyPoints_2, descriptors_2);

   cout << "Number of feature points (img1, img2): " << "(" << KeyPoints_1.size() << ", " << KeyPoints_2.size() << ")" << endl;

   // Find correspondences
   BFMatcher matcher;
   vector<DMatch> matches;
   if (args.use_ratio_test) 
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, false);
      else matcher = BFMatcher(NORM_L2, false);

      vector<vector<DMatch>> match_candidates;
      const float ratio = args.ratio;
      matcher.knnMatch(descriptors_1, descriptors_2, match_candidates, 2);
      for (int i = 0; i < match_candidates.size(); i++)
         if (match_candidates[i][0].distance < ratio * match_candidates[i][1].distance)
            matches.push_back(match_candidates[i][0]);

      cout << "Number of matches passing ratio test: " << matches.size() << endl;

   } else
   {
      if (args.detector == DETECTOR_KAZE) 
         matcher = BFMatcher(NORM_HAMMING, true);
      else matcher = BFMatcher(NORM_L2, true);
      matcher.match(descriptors_1, descriptors_2, matches);
      cout << "Number of matching feature points: " << matches.size() << endl;
   }


   // Convert correspondences to vectors
   vector<Point2f>imgpts1,imgpts2;

   for(unsigned int i = 0; i < matches.size(); i++) 
   {
      imgpts1.push_back(KeyPoints_1[matches[i].queryIdx].pt); 
      imgpts2.push_back(KeyPoints_2[matches[i].trainIdx].pt); 
   }

   Mat mask; // inlier mask
   if (args.undistort) 
   {
      undistortPoints(imgpts1, imgpts1, K1, dist_coefficients, noArray(), K1);
      undistortPoints(imgpts2, imgpts2, K2, dist_coefficients, noArray(), K2);
   } 

   double focal = camera_matrix.at<double>(0,0);
   Point2d principalPoint(camera_matrix.at<double>(0,2),camera_matrix.at<double>(1,2));

   Mat E = findEssentialMat(imgpts1, imgpts2, focal, principalPoint, RANSAC, 0.999, 1, mask);
   /* Mat F = camera_matrix.t().inv() * E * camera_matrix.inv(); */
   Mat F = findFundamentalMat(imgpts1, imgpts2, CV_FM_RANSAC);

   correctMatches(F, imgpts1, imgpts2, imgpts1, imgpts2);
   cout << "Reprojection error:\n " << computeReprojectionError(imgpts1, imgpts2, mask, F) << endl;

   int inliers = recoverPose(E, imgpts1, imgpts2, R, t, focal, principalPoint, mask);

   cout << "Matches used for pose recovery:\n " << inliers << endl;

   /* Mat R1, R2, ProjMat1, ProjMat2, Q; */
   /* stereoRectify(camera_matrix, dist_coefficients, camera_matrix, dist_coefficients, img1.size(), R, t, R1, R2, ProjMat1, ProjMat2, Q); */
   /* cout << "P1=" << ProjMat1 << endl; */
   /* cout << "P2=" << ProjMat2 << endl; */
   /* cout << "Q=" << Q << endl; */

   Mat mtxR, mtxQ;
   Mat Qx, Qy, Qz;
   Vec3d angles = RQDecomp3x3(R, mtxR, mtxQ, Qx, Qy, Qz);
   /* cout << "Qx:\n " << Qx << endl; */
   /* cout << "Qy:\n " << Qy << endl; */
   /* cout << "Qz:\n " << Qz << endl; */
   cout << "Translation:\n " << t.t() << endl;
   cout << "Euler angles [x y z] in degrees:\n " << angles.t() << endl;

   if (args.epilines)
   {
      drawEpilines(Mat(imgpts1), 1, F, img2);
      drawEpilines(Mat(imgpts2), 2, F, img1);
   }

   drawMatches(img1, KeyPoints_1, img2, KeyPoints_2, // draw only inliers given by mask
         matches, img_matches, Scalar::all(-1), Scalar::all(-1), mask);

   vector<Point2f> imgpts1_masked, imgpts2_masked;
   for (int i = 0; i < imgpts1.size(); i++) 
   {
      if (mask.at<uchar>(i,0) == 1) 
      {
         imgpts1_masked.push_back(imgpts1[i]);
         imgpts2_masked.push_back(imgpts2[i]);
      }
   }

   Mat pnts4D;
   Mat P1 = camera_matrix * Mat::eye(3, 4, CV_64FC1), P2;
   Mat p2[2] = { R, t }; 
   hconcat(p2, 2, P2);
   P2 = camera_matrix * P2;

#define USE_OPENCV_TRIANGULATION
#ifndef USE_OPENCV_TRIANGULATION // strangely, both methods yield identical results
   vector<Point3d> homogPoints1, homogPoints2;
   for (int i = 0; i < imgpts1_masked.size(); i++) 
   {
      Point2f currentPoint1 = imgpts1_masked[i];
      homogPoints1.push_back(Point3d(currentPoint1.x, currentPoint1.y, 1));
      Point2f currentPoint2 = imgpts2_masked[i];
      homogPoints2.push_back(Point3d(currentPoint2.x, currentPoint2.y, 1));
   }

   Mat dehomogenized(imgpts1_masked.size(), 3, CV_64FC1);
   for (int i = 0; i < imgpts1_masked.size(); i++) 
   {
      Mat_<double> triangulatedPoint = IterativeLinearLSTriangulation(homogPoints1[i], P1, homogPoints2[i], P2);
      Mat r = triangulatedPoint.t();
      r.colRange(0,3).copyTo(dehomogenized.row(i)); // directly assigning to dehomogenized.row(i) compiles but does nothing, wtf?
   }
#else
   triangulatePoints(P1, P2, imgpts1_masked, imgpts2_masked, pnts4D);
   pnts4D = pnts4D.t();
   Mat dehomogenized;
   convertPointsFromHomogeneous(pnts4D, dehomogenized);
   dehomogenized = dehomogenized.reshape(1); // instead of 3 channels and 1 col, we want 1 channel and 3 cols
#endif


   double mDist = 0;
   int n = 0;
   int pos = 0, neg = 0;

   /* Write ply file header */
   ofstream ply_file("points.ply", ios_base::trunc);
   ply_file << 
      "ply\n"
      "format ascii 1.0\n"
      "element vertex " << dehomogenized.rows << "\n"
      "property float x\n"
      "property float y\n"
      "property float z\n"
      "property uchar red\n"
      "property uchar green\n"
      "property uchar blue\n"
      "end_header" << endl;

   Mat_<double> row;
   for (int i = 0; i < dehomogenized.rows; i++) 
   {
      row = dehomogenized.row(i);
      double d = row(2);
      if (d > 0) 
      {
         pos++;
         mDist += norm(row);
         n++;
         /* float startx=imgpts1_masked[i].x - 1, starty=imgpts1_masked[i].y - 1, endx=imgpts1_masked[i].x + 1, endy=imgpts1_masked[i].y + 1; */
         /* cout << "startx,endx = " << startx << "," << endx << endl; */
         /* cout << "starty,endy = " << starty << "," << endy << endl; */
         Vec3b rgb = img1.at<Vec3b>(imgpts1_masked[i].x, imgpts1_masked[i].y);
         ply_file << row(0) << " " << row(1) << " " << row(2) << " " << (int)rgb[2] << " " << (int)rgb[1] << " " << (int)rgb[0] << "\n";
      } else
      {
         neg++;
         ply_file << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << " " << 0 << "\n"; 
      }
   }
   ply_file.close();
   mDist /= n;
   worldScale = mDist;
   cout << "Mean distance of " << n << " points to camera:\n " << mDist << " (dehomogenized)" << endl;
   cout << "pos=" << pos << ", neg=" << neg << endl;


   /* char filename[100]; */
   /* sprintf(filename, "mat_1%d", i+1); */

   /* Ptr<Formatter> formatter = Formatter::get(Formatter::FMT_CSV); */
   /* Ptr<Formatted> formatted = formatter->format(dehomogenized); */
   /* ofstream file(filename, ios_base::trunc); */
   /* file << formatted << endl; */

   /* Removed until cmake has been fathomed */
   /* vector< Point3d > points3D; */
   /* vector< vector< Point2d > > pointsImg; */
   /* int NPOINTS=dehomogenized.rows; // number of 3d points */
   /* int NCAMS=2; // number of cameras */

   /* points3D.resize(NPOINTS); */
   /* for (int i = 0; i < NPOINTS; i++) */ 
   /* { */
   /*    points3D[i] = Point3d(dehomogenized.at<double>(i,0), */
   /*          dehomogenized.at<double>(i,1), */
   /*          dehomogenized.at<double>(i,2) */
   /*          ); */
   /* } */
   /* // fill image projections */
   /* vector<vector<int> > visibility(2, vector<int>(NPOINTS, 1)); */
   /* vector<Mat> camera_matrices(2, camera_matrix); */
   /* vector<Mat> Rs(2); */
   /* Rodrigues(Mat::eye(3, 3, CV_64FC1), Rs[0]); */
   /* Rodrigues(R, Rs[0]); */
   /* vector<Mat> Ts = { Mat::zeros(3,1, CV_64FC1), t }; */
   /* vector<Mat> dist_coefficientss(2, dist_coefficients); */

   /* pointsImg.resize(NCAMS); */
   /* for(int i=0; i<NCAMS; i++) pointsImg[i].resize(NPOINTS); */
   /* for (int i = 0; i < NPOINTS; i++) */ 
   /* { */
   /*    pointsImg[0][i] = Point2d(imgpts1_masked[i].x, imgpts1_masked[i].y); */
   /*    pointsImg[1][i] = Point2d(imgpts2_masked[i].x, imgpts2_masked[i].y); */
   /* } */
   /*  cvsba::Sba sba; */
   /*   sba.run(points3D, pointsImg, visibility, camera_matrices, Rs, Ts, dist_coefficientss); */

   /*   cout<<"Initial error="<<sba.getInitialReprjError()<<". "<< */
   /*              "Final error="<<sba.getFinalReprjError()<<endl; */

   cout << "%===============================================%" << endl;
}
//--------------------------------------【main( )函数】-----------------------------------------
//          描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//-----------------------------------------------------------------------------------------------
int main()
{
	//【0】改变console字体颜色
	system("color 5F"); 

	ShowHelpText();

	//【1】载入图像、显示并转化为灰度图
	Mat trainImage = imread("1.jpg"), trainImage_gray;
	imshow("原始图",trainImage);
	cvtColor(trainImage, trainImage_gray, CV_BGR2GRAY);

	//【2】检测SIFT关键点、提取训练图像描述符
	vector<KeyPoint> train_keyPoint;
	Mat trainDescription;
	SiftFeatureDetector featureDetector;
	featureDetector.detect(trainImage_gray, train_keyPoint);
	SiftDescriptorExtractor featureExtractor;
	featureExtractor.compute(trainImage_gray, train_keyPoint, trainDescription);

	// 【3】进行基于描述符的暴力匹配
	BFMatcher matcher;
	vector<Mat> train_desc_collection(1, trainDescription);
	matcher.add(train_desc_collection);
	matcher.train();

	//【4】创建视频对象、定义帧率
	VideoCapture cap(0);
	unsigned int frameCount = 0;//帧数

	//【5】不断循环,直到q键被按下
	while(char(waitKey(1)) != 'q')
	{
		//<1>参数设置
		double time0 = static_cast<double>(getTickCount( ));//记录起始时间
		Mat captureImage, captureImage_gray;
		cap >> captureImage;//采集视频到testImage中
		if(captureImage.empty())
			continue;

		//<2>转化图像到灰度
		cvtColor(captureImage, captureImage_gray, CV_BGR2GRAY);

		//<3>检测SURF关键点、提取测试图像描述符
		vector<KeyPoint> test_keyPoint;
		Mat testDescriptor;
		featureDetector.detect(captureImage_gray, test_keyPoint);
		featureExtractor.compute(captureImage_gray, test_keyPoint, testDescriptor);

		//<4>匹配训练和测试描述符
		vector<vector<DMatch> > matches;
		matcher.knnMatch(testDescriptor, matches, 2);

		// <5>根据劳氏算法(Lowe's algorithm),得到优秀的匹配点
		vector<DMatch> goodMatches;
		for(unsigned int i = 0; i < matches.size(); i++)
		{
			if(matches[i][0].distance < 0.6 * matches[i][1].distance)
				goodMatches.push_back(matches[i][0]);
		}

		//<6>绘制匹配点并显示窗口
		Mat dstImage;
		drawMatches(captureImage, test_keyPoint, trainImage, train_keyPoint, goodMatches, dstImage);
		imshow("匹配窗口", dstImage);

		//<7>输出帧率信息
		cout << "\t>当前帧率为:" << getTickFrequency() / (getTickCount() - time0) << endl;
	}

	return 0;
}
Ejemplo n.º 14
0
bool TrackerForProject::filterRANSAC(cv::Mat newFrame_, vector<Point2f> &corners, vector<Point2f> &nextCorners)
{
	int ransacReprojThreshold = 3;

	cv::Mat prev_(prevFrame_(position_));
	cv::Mat new_(newFrame_);

	// detecting keypoints
    SurfFeatureDetector detector;

	detector.detect(prev_, keypoints1);

    vector<KeyPoint> keypoints2;
    detector.detect(new_, keypoints2);

    // computing descriptors
    SurfDescriptorExtractor extractor;
    Mat descriptors1;
    extractor.compute(prev_, keypoints1, descriptors1);
    Mat descriptors2;
    extractor.compute(newFrame_, keypoints2, descriptors2);

    // matching descriptors
    BFMatcher matcher;
    vector<DMatch> matches;
    matcher.match(descriptors1, descriptors2, matches);
	
	std::cout << matches.size() << std::endl;

	vector<Point2f> points1, points2;

    // fill the arrays with the points
    for (int i = 0; i < matches.size(); i++)
    {
		points1.push_back(keypoints1[matches[i].queryIdx].pt);
    }
    for (int i = 0; i < matches.size(); i++)
    {
        points2.push_back(keypoints2[matches[i].trainIdx].pt);
    }

    Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);

    Mat points1Projected;
    perspectiveTransform(Mat(points1), points1Projected, H);

	vector<KeyPoint> keypoints3;

	for(int i = 0; i < matches.size(); i++)
	{
		Point2f p1 = points1Projected.at<Point2f>(matches[i].queryIdx);
        Point2f p2 = keypoints2.at(matches[i].trainIdx).pt;
		if(((p2.x - p1.x) * (p2.x - p1.x) +
			(p2.y - p1.y) * (p2.y - p1.y) <= ransacReprojThreshold * ransacReprojThreshold)&& ((p2.x > position_.x - 10) 
			&& (p2.x < position_.x + position_.width + 10) && (p2.y > position_.y - 10) &&(p2.y < position_.y + position_.height + 10)) )
		{
			corners.push_back(keypoints1.at(matches[i].queryIdx).pt);
			nextCorners.push_back(keypoints2.at(matches[i].trainIdx).pt);

			keypoints3.push_back(keypoints2.at(matches[i].trainIdx));
		}		
	}

	for(int i = 0; i < corners.size(); i++)
	{
		corners[i].x += position_.x;
		corners[i].y += position_.y;
	}

	keypoints1 = keypoints3;

	for(int i = 0; i < keypoints1.size(); i++)
	{
		keypoints1[i].pt.x -= position_.x;
		keypoints1[i].pt.y -= position_.y;
	}

    if (keypoints1.empty())
    {
        return false;
    }

    return true;
}
JNIEXPORT void JNICALL Java_org_recg_writehomog_NativeCodeInterface_nativeLoop
(JNIEnv * jenv, jclass, jlong hataddr, jlong gray1, jlong gray2)
{
	clock_t t1, t2;
	t1 = clock();
	homogandtimer *hatinloop = (homogandtimer *) hataddr;
    LOGD("passed just entered nativeloop b4 trying");
    try
    {
    	LOGD("passed just entered the try in nativeloop");
    	LOGD("passed char jenv getutfchars");
    	string homogstring;//(jidentitystr); // <--this one
    	LOGD("passed making jidentitystr");

    	//output the matrices to the Log
    	Mat frame1 = *((Mat *)gray1);
    	Mat frame2 = *((Mat *)gray2);
    	LOGD("passed making mats");

    	int minHessian = 400;

    	//initial variable declaration
    	OrbFeatureDetector detector(minHessian);
    	LOGD("passed making detector");
    	std::vector<KeyPoint> keypoints1, keypoints2;
    	LOGD("passed making keypoints");
    	OrbDescriptorExtractor extractor;
    	LOGD("passed making extractor");
    	Mat descriptors1, descriptors2;
    	LOGD("passed making descriptors");

    	//process first frame
    	detector.detect(frame1, keypoints1);
    	LOGD("passed detecting1");
    	extractor.compute(frame1, keypoints1, descriptors1);
    	LOGD("passed computing1");

    	//process second frame
    	detector.detect(frame2, keypoints2);
    	LOGD("passed detecting2");
    	extractor.compute(frame2, keypoints2, descriptors2);
    	LOGD("passed computing2");

    	//in case frame has no features (eg if all-black from finger blocking lens)
    	if (keypoints1.size() == 0){
    		LOGD("passed keypointssize was zero!!");
			frame1 = frame2.clone();
			keypoints1 = keypoints2;
			descriptors1 = descriptors2;
			//go back to the javacode and continue with the next frame
			return;
    	}

    	LOGD("passed keypointssize not zero!");
    	//Now match the points on the successive images
    	//FlannBasedMatcher matcher;
    	BFMatcher matcher;
    	LOGD("passed creating matcher");
    	std::vector<DMatch> matches;
    	LOGD("passed creating matches");
    	if(descriptors1.empty()){
    		LOGD("passed descriptors1 is empty!");
    	}
    	if(descriptors2.empty()){
    		LOGD("passed descriptors2 is empty!");
    	}
    	LOGD("passed key1 size %d", keypoints1.size());
    	LOGD("passed key2 size %d", keypoints2.size());

    	matcher.match(descriptors1, descriptors2, matches);
    	LOGD("passed doing the matching");

    	//eliminate weaker matches
    	double maxdist = 0;
		double mindist = 100;
		for (int j = 0; j < descriptors1.rows; j++){
			DMatch match = matches[j];
			double dist = match.distance;
			if( dist < mindist ) mindist = dist;
			if( dist > maxdist ) maxdist = dist;
		}

		//build the list of "good" matches
		std::vector<DMatch> goodmatches;
		for( int k = 0; k < descriptors1.rows; k++ ){
			DMatch amatch = matches[k];
			if( amatch.distance <= 3*mindist ){
				goodmatches.push_back(amatch);
			}
		}

	//Now compute homography matrix between the stronger matches
		//-- Localize the object
		std::vector<Point2f> obj;
		std::vector<Point2f> scene;
		if (goodmatches.size() < 4){
			frame1 = frame2.clone();
			keypoints1 = keypoints2;
			descriptors1 = descriptors2;
			return;
		}

		for(int l = 0; l < goodmatches.size(); l++){
		//-- Get the keypoints from the good matches
			DMatch gm1 = goodmatches[l];
			KeyPoint kp1 = keypoints1[gm1.queryIdx];
			obj.push_back(kp1.pt);

			KeyPoint kp2 = keypoints1[gm1.trainIdx];
			scene.push_back(kp2.pt);
		}

		Mat hmatrix = findHomography(obj,scene,CV_RANSAC);

		hatinloop->writehomogm << hmatrix.at<double>(0,0) << " ";
		LOGD("passed el00  %f",hmatrix.at<double>(0,0));
		LOGD("  ");
		hatinloop->writehomogm << hmatrix.at<double>(0,1) << " ";
		LOGD("passed el01  %f",hmatrix.at<double>(0,1));
		LOGD("  ");
		hatinloop->writehomogm << hmatrix.at<double>(0,2) << " ";

		hatinloop->writehomogm << hmatrix.at<double>(1,0) << " ";
		hatinloop->writehomogm << hmatrix.at<double>(1,1) << " ";
		hatinloop->writehomogm << hmatrix.at<double>(1,2) << " ";

		hatinloop->writehomogm << hmatrix.at<double>(2,0) << " ";
		hatinloop->writehomogm << hmatrix.at<double>(2,1) << " ";
		hatinloop->writehomogm << hmatrix.at<double>(2,2) << " endmatrix\n";

		t2 = clock();
		hatinloop->speedrecord << (float) (t2 - t1)/CLOCKS_PER_SEC << "\n";
		LOGD("passed timingstuff %f",(float) (t2 - t1)/CLOCKS_PER_SEC);

		hatinloop->writehomogm.flush();
		hatinloop->speedrecord.flush();

    }
    catch(cv::Exception& e)
    {
        LOGD("nativeCreateObject caught cv::Exception: %s", e.what());
        jclass je = jenv->FindClass("org/opencv/core/CvException");
        if(!je)
            je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, e.what());
    }
    catch (...)
    {
        LOGD("nativeDetect caught unknown exception");
        jclass je = jenv->FindClass("java/lang/Exception");
        jenv->ThrowNew(je, "Unknown exception in JNI nativeloop's code");
    }
    LOGD("Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect exit");

}
Ejemplo n.º 16
0
/**
 * @function main
 * @brief Main function
 */
int main( int argc, char** argv )
{
  if( argc != 3 )
  { readme(); return -1; }

  Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
  Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );

  if( !img_object.data || !img_scene.data )
  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

  //-- Step 1: Detect the keypoints using SURF Detector
  int minHessian = 100;

  SurfFeatureDetector detector( minHessian );

  std::vector<KeyPoint> keypoints_object, keypoints_scene;

  detector.detect( img_object, keypoints_object );
  detector.detect( img_scene, keypoints_scene );

  //-- Step 2: Calculate descriptors (feature vectors)
  SurfDescriptorExtractor extractor;

  Mat descriptors_object, descriptors_scene;

  extractor.compute( img_object, keypoints_object, descriptors_object );
  extractor.compute( img_scene, keypoints_scene, descriptors_scene );

  //-- Step 3: Matching descriptor vectors using brute force matcher
  BFMatcher matcher = BFMatcher(NORM_L2, false);
  std::vector< DMatch > matches;
  matcher.match( descriptors_object, descriptors_scene, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_object.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );

  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
  std::vector< DMatch > good_matches;

  for( int i = 0; i < descriptors_object.rows; i++ )
  { if( matches[i].distance < 3*min_dist )
    { good_matches.push_back( matches[i]); }
  }

  Mat img_matches;
  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );


  //-- Localize the object from img_1 in img_2
  std::vector<Point2f> obj;
  std::vector<Point2f> scene;

  for( size_t i = 0; i < good_matches.size(); i++ )
  {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
  }

  Mat H = findHomography( obj, scene, RANSAC );

  //-- Get the corners from the image_1 ( the object to be "detected" )
  std::vector<Point2f> obj_corners(4);
  obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
  obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
  std::vector<Point2f> scene_corners(4);

  perspectiveTransform( obj_corners, scene_corners, H);


  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
  Point2f offset( (float)img_object.cols, 0);
  //line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
  //line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
  //line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
  //line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );

  //-- Show detected matches
  imshow( "Good Matches & Object detection", img_matches );

  waitKey(0);

  return 0;
}
Ejemplo n.º 17
0
static Mat detect_table(Mat &frame, table_detection_params_t& params, control_panel_t& panel, const SubottoReference& reference, const SubottoMetrics &metrics, FrameAnalysis &frame_analysis) {

  dump_time(panel, "cycle", "detect table start");

	const Mat& reference_image = reference.image;
	const Mat& reference_mask = reference.mask;
	auto& reference_metrics = reference.metrics;

  vector< KeyPoint > frame_features, reference_features;
	Mat frame_features_descriptions, reference_features_descriptions;
	tie(frame_features, frame_features_descriptions) = get_features(frame, Mat(), params.frame_features_per_level, params.frame_features_levels);
  tie(reference_features, reference_features_descriptions) = get_features(reference_image, reference_mask, params.reference_features_per_level, params.reference_features_levels);

	vector<vector<DMatch>> matches_groups;

	BFMatcher dm;
	dm.knnMatch(reference_features_descriptions, frame_features_descriptions, matches_groups, params.features_knn, Mat());

	//if(will_show(panel, "table detect", "matches")) {
  if (true) {
    Mat &matches = frame_analysis.detect_table_matches;
    drawMatches(reference_image, reference_features, frame, frame_features, matches_groups, matches);
  }

	vector<Point2f> coarse_from, coarse_to;

	for (auto matches : matches_groups) {
		for (DMatch match : matches) {
			auto f = reference_features[match.queryIdx].pt;
			auto t = frame_features[match.trainIdx].pt;

			coarse_from.push_back(f);
			coarse_to.push_back(t);
		}
	}

	logger(panel, "table detect", INFO) <<
			"reference features: " << reference_features.size() <<
			" frame features: " << frame_features.size() <<
			" matches: " << coarse_from.size() << endl;

	Mat coarse_transform;
	if(coarse_from.size() < 6) {
		coarse_transform = Mat::eye(3, 3, CV_32F);
		logger(panel, "table detect", WARNING) << "phase 1 motion estimation - not enough features!" << endl;
	} else {
		RansacParams ransac_params(6, params.coarse_ransac_threshold, params.coarse_ransac_outliers_ratio, 0.99f);
		float rmse;
		int ninliers;
		coarse_transform = estimateGlobalMotionRansac(coarse_from, coarse_to, MM_SIMILARITY, ransac_params, &rmse, &ninliers);

		logger(panel, "table detect", INFO) <<
				"phase 1 motion estimation - rmse: " << rmse <<
				" inliers: " << ninliers << "/" << coarse_from.size() << endl;

	}

  dump_time(panel, "cycle", "detect table phase 1 finished");

	Mat &warped = frame_analysis.detect_table_after_matching;
	warpPerspective(frame, warped, coarse_transform, reference_image.size(), WARP_INVERSE_MAP | INTER_LINEAR);

	vector<KeyPoint> optical_flow_features;

  // As above
	//PyramidAdaptedFeatureDetector optical_flow_fd(new GoodFeaturesToTrackDetector(params.optical_flow_features_per_level), params.optical_flow_features_levels);
  auto optical_flow_fd = GFTTDetector::create(params.optical_flow_features_per_level);

	optical_flow_fd->detect(reference_image, optical_flow_features);

	vector<Point2f> optical_flow_from, optical_flow_to;
	vector<uchar> status;

	for(KeyPoint kp : optical_flow_features) {
		optical_flow_from.push_back(kp.pt);
	}

	vector<Point2f> good_optical_flow_from, good_optical_flow_to;

	if (!optical_flow_features.empty()) {
    calcOpticalFlowPyrLK(reference_image, warped, optical_flow_from, optical_flow_to, status, noArray());

		for (int i = 0; i < optical_flow_from.size(); i++) {
			if (!status[i]) {
				continue;
			}

			good_optical_flow_from.push_back(optical_flow_from[i]);
			good_optical_flow_to.push_back(optical_flow_to[i]);
		}

		logger(panel, "table detect", INFO) <<
				"detection optical flow features: " << good_optical_flow_from.size() << "/" << optical_flow_from.size() << endl;
	} else {
		logger(panel, "table detect", WARNING) << "detection optical flow - no features!" << endl;
	}

	Mat flow_correction;
	if (good_optical_flow_from.size() < 6) {
		flow_correction = Mat::eye(3, 3, CV_32F);
		logger(panel, "table detect", WARNING) << "detection optical flow - not enough features for flow correction!" << endl;
	} else {
		findHomography(good_optical_flow_from, good_optical_flow_to, RANSAC, params.optical_flow_ransac_threshold).convertTo(
				flow_correction, CV_32F);
	}

	Mat flow_transform = coarse_transform * flow_correction;

  /*logger(panel, "gio", DEBUG) << "flow_transform " << endl << flow_transform << endl;
  logger(panel, "gio", DEBUG) << "referenceToSize(...)" << endl << referenceToSize(reference_metrics, metrics) << endl;
  logger(panel, "gio", DEBUG) << "sizeToReference(...)" << endl << sizeToReference(reference_metrics, metrics) << endl;
  logger(panel, "gio", DEBUG) << "identity" << endl << referenceToSize(reference_metrics, metrics) * sizeToReference(reference_metrics, metrics) << endl;*/
	Mat transform = flow_transform * referenceToSize(reference_metrics, metrics);

  dump_time(panel, "cycle", "detect table phase 2 finished");

	return transform;
}
Ejemplo n.º 18
0
int main(int argc, char **argv) {
	// load image
	Mat img1 = imread("input_1.jpg");
	Mat img2 = imread("input_2.jpg");

	// resize
	resize(img1, img1, Size(640, 480));
	resize(img2, img2, Size(640, 480));

	// to gray (optional)
	//cvtColor(img1, img1, CV_BGR2GRAY);
	//cvtColor(img2, img2, CV_BGR2GRAY);

	// get features
	Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
	vector<KeyPoint> kp1, kp2;
	Mat dp1, dp2;


	int step = 10; // 10 pixels spacing between kp's

	for (int i = step; i<img1.rows - step; i += step)
	{
		for (int j = step; j<img1.cols - step; j += step)
		{
			// x,y,radius
			kp1.push_back(KeyPoint(float(j), float(i), float(step)));
		}
	}

	for (int i = step; i<img2.rows - step; i += step)
	{
		for (int j = step; j<img2.cols - step; j += step)
		{
			// x,y,radius
			kp2.push_back(KeyPoint(float(j), float(i), float(step)));
		}
	}

	get_features(f2d, img1, kp1, dp1);
	get_features(f2d, img2, kp2, dp2);

	// display keypoints to canvas
	Mat cvs1, cvs2;
	drawKeypoints(img1, kp1, cvs1);
	drawKeypoints(img2, kp2, cvs2);

	// find matches
	BFMatcher matcher;
	std::vector< DMatch > matches;
	matcher.match(dp1, dp2, matches);

	// display matches
	Mat cvs3;
	drawMatches(img1, kp1, img2, kp2, matches, cvs3);

	// show
	imshow("keypoints 1", cvs1);
	imshow("keypoints 2", cvs2);
	imshow("matches", cvs3);
	waitKey(0);
}
Ejemplo n.º 19
0
int main( int argc, char** argv )
{
    if( argc != 4 )
    { readme(); return -1; }
    namespace io = boost::iostreams;
    if(strncmp(argv[1],"detect",6)==0)
    {
        const char* fname_pic = argv[2];
        const char* fname_kps = argv[3];
        Mat img = imread(fname_pic,IMREAD_GRAYSCALE);
        if(!img.data)
        {
            cout<< "Error reading images!" << std::endl;
            return -1;
        }
        Ptr<SIFT> sift_detector = SIFT::create(siftPoints);
        Ptr<SURF> surf_detector = SURF::create(minHessian);
        vector<KeyPoint> surf_keypoints,sift_keypoints;
        Mat sift_descriptors,surf_descriptors;
        sift_detector->detectAndCompute(img, Mat(),sift_keypoints, sift_descriptors);
        surf_detector->detectAndCompute(img, Mat(),surf_keypoints, surf_descriptors);

        ofstream ofs(fname_kps,ios_base::binary);
        {
            io::filtering_streambuf<io::output> out;
            out.push(io::zlib_compressor(io::zlib::best_compression));
            out.push(ofs);
            binary_oarchive oa(out);
            ArchiveHelper<vector<KeyPoint> > sift_archiver(sift_keypoints);
            ArchiveHelper<vector<KeyPoint> > surf_archiver(surf_keypoints);
            ArchiveHelper<Mat> ar1(sift_descriptors);
            ArchiveHelper<Mat> ar2(surf_descriptors);
            oa << sift_archiver;
            oa << surf_archiver;
            oa << ar1;
            oa<<ar2;
        }
        ofs.close();
    }
    else if(strncmp(argv[1],"match",5)==0)
    {
        const char* fname_pic = argv[2];
        const char* fname_kps = argv[3];
        vector<KeyPoint> isift_keypoints,isurf_keypoints,psift_keypoints,psurf_keypoints;
        Mat isift_descriptors,isurf_descriptors;

        ifstream ifs(fname_kps,ios_base::binary);
        {
            io::filtering_streambuf<io::input> in;
            in.push(iostreams::zlib_decompressor());
            in.push(ifs);
            binary_iarchive ia(in);
            ArchiveHelper<vector<KeyPoint> > sift_archiver(isift_keypoints),surf_archiver(isurf_keypoints);
            ArchiveHelper<Mat> ar1(isift_descriptors),ar2(isurf_descriptors);

            ia>>sift_archiver;
            ia>>surf_archiver;
            ia>>ar1;
            ia>>ar2;
        }
        ifs.close();
        Mat img = imread(fname_pic,IMREAD_GRAYSCALE);
        Ptr<SIFT> sift_detector = SIFT::create(siftPoints);
        Ptr<SURF> surf_detector = SURF::create(minHessian);
        Mat psift_descriptors, psurf_descriptors;

        sift_detector->detectAndCompute(img, Mat(),psift_keypoints, psift_descriptors);
        surf_detector->detectAndCompute(img, Mat(),psurf_keypoints, psurf_descriptors);

        BFMatcher matcher;
        vector< DMatch > sift_matches,surf_matches;
        vector<vector<DMatch> > sift_knnMatches,surf_knnMatches;
        matcher.knnMatch(psift_descriptors,isift_descriptors,sift_knnMatches,2);
        matcher.knnMatch(psurf_descriptors,isurf_descriptors,surf_knnMatches,2);

        for( size_t i = 0; i < sift_knnMatches.size(); i++ )
        {
            const DMatch& bestMatch = sift_knnMatches[i][0];
            const DMatch& betterMatch1 = sift_knnMatches[i][1];
            float  distanceRatio = bestMatch.distance / betterMatch1.distance;
            if(distanceRatio<0.61)
            {
                sift_matches.push_back(bestMatch);
            }
        }
        for( size_t i = 0; i < surf_knnMatches.size(); i++ )
        {
            const DMatch& bestMatch = surf_knnMatches[i][0];
            const DMatch& betterMatch1 = surf_knnMatches[i][1];
            float  distanceRatio = bestMatch.distance/betterMatch1.distance;
            if(distanceRatio<0.65)
            {
                surf_matches.push_back(bestMatch);
            }
        }
        printf("-- SIFT KNN Matching rate:%f\n",sift_matches.size()/(0.0+psift_keypoints.size()));
        printf("-- SURF KNN Matching rate:%f\n\n",surf_matches.size()/(0.0+psurf_keypoints.size()));
        //-- Quick calculation of max and min distances between keypoints
        double mx_sift_dist = 0; double mn_sift_dist = 999;
        double mx_surf_dist = 0; double mn_surf_dist = 999;
        for( size_t i = 0; i < sift_matches.size(); i++ )
        {
            double dist = sift_matches[i].distance;
            if( dist < mn_sift_dist ) mn_sift_dist = dist;
            if( dist > mx_sift_dist ) mx_sift_dist = dist;
        }
        for( size_t i = 0; i < surf_matches.size(); i++ )
        {
            double dist = surf_matches[i].distance;
            if( dist < mn_surf_dist ) mn_surf_dist = dist;
            if( dist > mx_surf_dist ) mx_surf_dist = dist;
        }

        std::vector< DMatch > final_sift_matches,final_surf_matches;

        for( size_t i = 0; i < sift_matches.size(); i++ )
        {
            if( sift_matches[i].distance <= max(1.8*mn_sift_dist+1,90.0))
            {
                final_sift_matches.push_back(sift_matches[i]);
            }
        }
        for( size_t i = 0; i < surf_matches.size(); i++ )
        {
            if( surf_matches[i].distance <= max(1.8*mn_surf_dist+0.0016,0.16))
            {
                final_surf_matches.push_back(surf_matches[i]);
            }
        }

        printf("-- SIFT Max dist: %f \n", mx_sift_dist);
        printf("-- SIFT Min dist: %f \n", mn_sift_dist);
        printf("-- SIFT Total matches: %d, good matches: %d\n",sift_matches.size(),final_sift_matches.size());
        printf("-- SIFT Matching rate: %f\n\n",(final_sift_matches.size()+0.0) / sift_matches.size());

        printf("-- SURF Max dist: %f \n", mx_surf_dist);
        printf("-- SURF Min dist: %f \n", mn_surf_dist);
        printf("-- SURF Total matches: %d, good matches: %d\n",surf_matches.size(),final_surf_matches.size());
        printf("-- SURF Matching rate: %f\n\n",(final_surf_matches.size()+0.0) / surf_matches.size());

        Mat img_white(img.rows,img.cols,CV_8UC3,cv::Scalar(255,255,255));
        Mat img_matches1,img_matches2;
        drawMatches(img,psift_keypoints,img_white,isift_keypoints,final_sift_matches,img_matches1);
        //-- Show detected matches
        namedWindow("SIFT Matches",WINDOW_FREERATIO|WINDOW_NORMAL);
        imshow( "SIFT Matches", img_matches1 );

        drawMatches(img,psurf_keypoints,img_white,isurf_keypoints,final_surf_matches,img_matches2);
        //-- Show detected matches
        namedWindow("SURF Matches",WINDOW_FREERATIO|WINDOW_NORMAL);
        imshow( "SURF Matches", img_matches2 );
    }
    else if(strncmp(argv[1],"test",4)==0)
Ejemplo n.º 20
0
//------------------------------------------------------------------------------
String PAN::authenticate(String CWD,String fileoutput){
	Point matchLoc;
	float percentage, threshold;
	float average = 0;
	int count = 0;
	Mat big_image;
	big_image = panimage.img->clone();//big image
	resize(big_image, big_image, Size(2000, 1500));
	if (!big_image.data)
	{
		std::cout << "Error reading images " << std::endl; return"";
	}
	Mat temp, temp1[3];
	if (big_image.channels() >= 2){
		cvtColor(big_image, temp, COLOR_BGR2GRAY);
	}
	//split(temp, temp1);
	big_image = temp.clone();
	/*img_1 = temp2.clone();
	resize(img_2, img_2, Size(600, 400));
	*///-- Step 1: Detect the keypoints using SURF Detector
	vector<KeyPoint> keypoints_big, keypoints_small;
	int minHessian = 200;
	//FeatureDetector * detector = new SURF();
	FastFeatureDetector detector;
	detector.detect(big_image, keypoints_big);
	cout << "big sift done\n\n";

	//-- Step 2: Calculate descriptors (feature vectors)
	int Threshl = 10;
	int Octaves = 3;
	//(pyramid layer) from which the keypoint has been extracted
	float PatternScales = 1.0f;
	//declare a variable BRISKD of the type cv::BRISK
	Mat descriptors_2, descriptors_small;
	BRISK BRISKD;

	//BRISKD.detect(img_1, keypoints_1);
	//BRISKD.detect(img_2, keypoints_2);
	BRISKD.compute(big_image, keypoints_big, descriptors_2);

	cout << "big brisk done\n\n";



	int i = 0;
	for ( i = 0; i < 7; i++){
		String path(CWD);
		// setting up input standard containers used for matching to
		String temp = "win1";
		temp = temp + char(i + 48) + ".jpg";
		path = path + temp;
		Mat find = imread(path, CV_LOAD_IMAGE_UNCHANGED);
		//cout << path << "\n\n";
		if (find.data == NULL){ break; }
		//templateMatch(*panimage.img, find, matchLoc, threshold, percentage);
		//-------------------------------------------------------------------------------------
		if (!find.data)
		{
			std::cout << "Error reading images " << std::endl; return "";
		}

		if (find.channels() >= 2){
			cvtColor(find,find, COLOR_BGR2GRAY);
		}


		//img_1 = temp2.clone();
		resize(find ,find, Size(1200, 600));
		//-- Step 1: Detect the keypoints using SURF Detector
		vector<KeyPoint>  keypoints_small;
		int minHessian = 200;
		detector.detect(find, keypoints_small);
		cout << "small sift done\n\n";

		//-- Step 2: Calculate descriptors (feature vectors)
		int Threshl = 10;
		int Octaves = 3;
		//(pyramid layer) from which the keypoint has been extracted
		float PatternScales = 1.0f;
		//declare a variable BRISKD of the type cv::BRISK
		Mat descriptors_small;
		//BRISKD.detect(img_1, keypoints_1);
		//BRISKD.detect(img_2, keypoints_2);
		BRISKD.compute(find, keypoints_small, descriptors_small);
		cout << "brisk done\n\n";

		//-------------------------------------------------------------------------------------
		

		//-- Step 3: Matching descriptor vectors using FLANN matcher
		//FlannBasedMatcher matcher;

		BFMatcher matcher;
		std::vector< DMatch > matches;
		matcher.match(descriptors_small, descriptors_2, matches);
		cv::Mat all_matches;
		drawMatches(find, keypoints_small, big_image, keypoints_big, matches, all_matches, cv::Scalar::all(-1), cv::Scalar::all(-1), vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
		
		namedWindow("BRISK", CV_WINDOW_NORMAL);
			imshow("BRISK", all_matches);
		cv::waitKey(0);
		double max_dist = 0; double min_dist = 800;

		//-- Quick calculation of max and min distances between keypoints
		for (int i = 0; i < descriptors_small.rows; i++)
		{
			double dist = matches[i].distance;
			if (dist < min_dist) min_dist = dist;
			if (dist > max_dist) max_dist = dist;
		}

		//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
		//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
		//-- small)
		//-- PS.- radiusMatch can also be used here.
		std::vector< DMatch > good_matches;

		for (int i = 0; i < descriptors_small.rows; i++)
		{
			if (matches[i].distance <= 1.2 * min_dist)		{
				good_matches.push_back(matches[i]);
			}
		}

		//-- Draw only "good" matches
		Mat img_matches;
		drawMatches(find, keypoints_small, big_image, keypoints_big,good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

		////-- Show detected matches
		namedWindow("Good Matches", CV_WINDOW_NORMAL);
		imshow("Good Matches", img_matches);
		waitKey();
		for (int i = 0; i < (int)good_matches.size(); i++)
		{
			//printf("-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
		}
		percentage = (float)((float)good_matches.size() / (float)matches.size()) * 100;

		
		int width, height;
		width = find.size().width; height = find.size().height;
		//cout << i + 1 << "--LOC x=" << matchLoc.x << "  y=" << matchLoc.y << "  % match= " << percentage << "\n";
		if (percentage > 50){
			average += percentage;
			count++;
		}
		fileoutput = fileoutput + to_string(percentage) + "$";
		//cout << percentage << "$";
		//rectangle(find,Rect(matchLoc.x, matchLoc.y, width, height),0,1,4,0); //removed to prevent alteration of the find image
		find.release();
		}
	if (count != 0){
		average /= count;
	}
	else { average = 0; }
	
	//cout << "Authenticity is " << average <<"\n";
	authenticity = average;
	fileoutput = fileoutput + to_string(average) + "$";
	//cout << to_string(average) << "$";
	cout << fileoutput;
	if (i == 0){ cout << "database not loaded"; }
	return fileoutput;
}
int main(int argc, char* argv[])
{
	//video input
	string videoName("A_kind_of_a_Show.avi");
	VideoCapture capture(videoName);
	if (!capture.isOpened())
	{
		cout << "!capture.isOpened()";
		return -1;
	}

	//path list
	vector<vector<Point2f>> pathList;
	vector<int> kpIdx2pathListIdx;
	
	vector<KeyPoint> kpTrackedPrev;
	Mat desTrackedPrev;
	vector<KeyPoint> kpEdgePrev;
	Mat desEdgePrev;

	//firstFrame init
	Mat firstFrame;
	Mat frame, framePrev;
	capture.read(firstFrame);
	keypointDetectorAnddescriptor.detect(firstFrame, kpTrackedPrev);
	keypointDetectorAnddescriptor.compute(firstFrame, kpTrackedPrev, desTrackedPrev);
	getEdgeKeypoint(firstFrame.cols, firstFrame.rows, 0.25,
		kpTrackedPrev, desTrackedPrev,
		kpEdgePrev, desEdgePrev);
	for (int i = 0; i < kpTrackedPrev.size(); ++i)
	{
		pathList.push_back(vector<Point2f>());
		pathList[i].push_back(kpTrackedPrev[i].pt);
		kpIdx2pathListIdx.push_back(i);
	}
	firstFrame.copyTo(framePrev);

	//video writer
	VideoWriter vw("result.avi", CV_FOURCC('M', 'J', 'P', 'G'), 12, Size(firstFrame.cols, firstFrame.rows));
	if (!vw.isOpened())
		return -1;

	//frame
	vector<KeyPoint> kpCur;
	Mat desCur;
	int frameIdx = 0;

	//processing
	while (capture.read(frame))
	{
		++frameIdx;

		keypointDetectorAnddescriptor.detect(frame, kpCur);
		keypointDetectorAnddescriptor.compute(frame, kpCur, desCur);

		//edge keypoint matching for homography
		vector<Point2f> ptEdgeCurMatched;
		vector<Point2f> ptEdgePrevMatched;
		vector<vector<DMatch>> vvmatchs;
		matcher.knnMatch(desEdgePrev, desCur, vvmatchs, 2);
		for (int i = 0; i < vvmatchs.size(); ++i)
		{
			if (vvmatchs[i][0].distance < vvmatchs[i][1].distance * 0.8)
			{
				ptEdgeCurMatched.push_back(kpCur[vvmatchs[i][0].trainIdx].pt);
				ptEdgePrevMatched.push_back(kpEdgePrev[vvmatchs[i][0].queryIdx].pt);
			}
		}

		//findHomography
		Mat h = findHomography(ptEdgePrevMatched,ptEdgeCurMatched, RANSAC);
		cout << h << endl;
		
		// camera movement compensation
		for (vector<Point2f>& path : pathList){
			perspectiveTransform(path, path, h);
		}

		Mat warpedframe;
		warpPerspective(framePrev, warpedframe, h, frame.size());
		imshow("frame", frame);
		imshow("prev", framePrev);
		imshow("warpedframe", warpedframe);

		getEdgeKeypoint(frame.cols, frame.rows, 0.25,
			kpCur, desCur,
			kpEdgePrev, desEdgePrev);
		frame.copyTo(framePrev);

		//keypoint tracking for pathlist
		vector<int> kpIdx2pathListIdxTemp;
		vector<KeyPoint> kpTrackedCur;
		Mat desTrackedCur;
		set<int> curMatchedKpIdxSet;
		matcher.knnMatch(desTrackedPrev, desCur, vvmatchs, 2);
		for (int i = 0; i < vvmatchs.size(); ++i)
		{
			if (vvmatchs[i][0].distance < vvmatchs[i][1].distance * 0.6)
			{
				pathList[kpIdx2pathListIdx[i]].push_back(kpCur[vvmatchs[i][0].trainIdx].pt);
				kpTrackedCur.push_back(kpCur[vvmatchs[i][0].trainIdx]);
				desTrackedCur.push_back(desCur.row(vvmatchs[i][0].trainIdx));
				kpIdx2pathListIdxTemp.push_back(kpIdx2pathListIdx[i]);
				curMatchedKpIdxSet.insert(vvmatchs[i][0].trainIdx);
			}
		}
		if (frameIdx%5==0)
		{
			//add new keypoint
			for (int i = 0; i < kpCur.size(); ++i)
			{
				if (curMatchedKpIdxSet.find(i) == curMatchedKpIdxSet.end()){
					kpTrackedCur.push_back(kpCur[i]);
					desTrackedCur.push_back(desCur.row(i));
					pathList.push_back(vector<Point2f>());
					pathList.rbegin()->push_back(kpCur[i].pt);
					kpIdx2pathListIdxTemp.push_back(pathList.size() - 1);
				}
			}
		}

		kpIdx2pathListIdx = kpIdx2pathListIdxTemp;
		kpTrackedPrev = kpTrackedCur;
		desTrackedCur.copyTo(desTrackedPrev);
		Mat show;
		drawPathList(frame, show, pathList);
		imshow("pathlist", show);
		waitKey(1);

		vw << show;
	}
	vw.release();
	//uniform
	for (vector<Point2f>& path : pathList)
	{
		for (Point2f& pt : path)
		{
			pt.x /= firstFrame.cols;
			pt.y /= firstFrame.rows;
		}
	}

	vector<double> motionHist;
	calMotionHist(pathList, motionHist);
	cout << "h : " << endl;
	for (double h : motionHist)
		cout << h << " ";
	cout << endl;
	return 1;
}
int main( int argc, char** argv ) {
    
  if (argc != 2) { 
    cout << "Must provide directory argument.\n";
    return -1; 
  }


  vector<string> files;
  GetFilesInDirectory(files, argv[1]);

  int originalIndex = 0;
  int imgAindex = 0;
  int imgBindex = 0;

  std::set<int> indexesIncluded;
  std::map<int, vector<Mat,Mat>> knownRts;

  // Find first two images based on snavely method - set originalIndex, imgAindex, imgBindex

  indexesIncluded.insert(imgAindex);
  indexesIncluded.insert(imgBindex);

  while (indexesIncluded.size() != files.size()) {
      // find features in each image, find matches

      // findEssentialMatrix

      // recoverPose between A and B

      // convert R|t for B using original R|t value for A if we have it. (check knownRts map)

      // add new R|ts to the map for both images

      // triangulatePoints and add to cloud

      // find next B to use based on best match between remaining images (Snavely's method) and an included image.
  }



  // Create image
    string filepath1 = argv[1];
    image1 = Image(filepath1);
    string filepath2 = argv[2];
    image2 = Image(filepath2);
    
    // Detect keypoints
    FeatureDetectorSIFT siftDetector = FeatureDetectorSIFT();
    vector<KeypointDescriptor> keypoints1 = siftDetector.detect(image1);
    vector<KeypointDescriptor> keypoints2 = siftDetector.detect(image2);

    // Convert descriptors back to cv keypoints :(
    sift_keypoints1 = vector<KeyPoint>(keypoints1.begin(), keypoints1.end());
    sift_keypoints2 = vector<KeyPoint>(keypoints2.begin(), keypoints2.end());

    //STUFF FROM THE OPEN CV EXAMPLE BELOW
    // https://github.com/npinto/opencv/blob/master/samples/cpp/matcher_simple.cpp
    cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
    
    Mat descriptors1, descriptors2; 
    f2d->compute(image1.matrix, sift_keypoints1, descriptors1);
    f2d->compute(image2.matrix, sift_keypoints2, descriptors2);
    
    BFMatcher matcher;
    matcher.match(descriptors1, descriptors2, matches);
    
    vector<Point2f> ptList1;
    vector<Point2f> ptList2;
    
    vector<int> queryIdxs;
    vector<int> trainIdxs;
    
    for (vector<DMatch>::size_type i = 0; i != matches.size(); i++){
        queryIdxs.push_back(matches[i].queryIdx);
        trainIdxs.push_back(matches[i].trainIdx);
    }
    
    KeyPoint::convert(sift_keypoints1, ptList1, queryIdxs);
    KeyPoint::convert(sift_keypoints2, ptList2, trainIdxs);
    
    vector<uchar> funOut;
    
    //press 8 for RANSAC
    Mat F = findFundamentalMat(ptList1, ptList2, 8, 3, .99, funOut);
    
    vector<int> funOutInt(funOut.begin(), funOut.end());
    
    for (vector<int>::size_type i = 0; i != funOut.size(); i++){
        if (funOutInt[i]==1){
            filteredMatches.push_back(matches[i]);
        }
    }
    
    namedWindow("filtered_matches", 1);
    drawMatches(image1.matrix, sift_keypoints1, image2.matrix, sift_keypoints2, emptyMatches, filtered_matches_matrix, matchColor, pointColor);
    imshow("filtered_matches", filtered_matches_matrix);

    cout << "^C to exit.\n";
    waitKey(0);

  return 0;
}