void build_model() {
			vector<Mat> trainDesc;
        		FeatureDetector *detector = new SurfFeatureDetector();
        		DescriptorExtractor *extractor = new SurfDescriptorExtractor();

        		// Generate descriptors from the image db
        		fs::path p = fs::system_complete(IMAGE_DATABASE);
			fs::directory_iterator end_iter;
			for(fs::directory_iterator dir_itr(p); dir_itr != end_iter; ++dir_itr) {
                		string img_name(dir_itr->path().string());
                		Mat img = imread(img_name, CV_LOAD_IMAGE_GRAYSCALE);
				
				// feature extraction
                		vector<KeyPoint> keypoints;
				detector->detect(img, keypoints);

				// feature description
				Mat descriptor;
				extractor->compute(img, keypoints, descriptor);
                		trainDesc.push_back(descriptor);
                		imgNames.push_back(img_name);
			}

			// train the model
			matcher->add(trainDesc);
			matcher->train();

			// Clean up
        		delete detector;
        		delete extractor;
		}
Пример #2
0
void DetectDescribe::performDescription(const cv::Mat& image,
		std::vector<cv::KeyPoint> &v, cv::Mat & descriptors,
		const int _descriptorType) {
	DescriptorExtractor *extractor;
	LDB *ldb;
	cv::SiftDescriptorExtractor extractorSift;

	switch (_descriptorType) {
	case 0:
		ldb = new LDB();
		break;
	case 1:
		// SIFT is created statically
		break;
	case 2:
		extractor = new cv::SurfDescriptorExtractor();
		break;
	case 3:
		extractor = new cv::ORB();
		break;
	case 4:
		extractor = new cv::BriefDescriptorExtractor();
		break;
	case 5:
		extractor = new cv::BRISK();
		break;
	case 6:
		extractor = new cv::FREAK();
		break;
	default:
		extractor = new cv::ORB();
	}

	if (_descriptorType == 0) {
		cv::Mat dst;
		cv::cvtColor(image, dst, CV_BGR2GRAY);
		ldb->compute(dst, v, descriptors);
		delete ldb;
	} else if (_descriptorType == 1) {
		extractorSift.compute(image, v, descriptors);
	} else {
		extractor->compute(image, v, descriptors);
		delete extractor;
	}
}
Пример #3
0
DescriptorExtractor* createDescriptorExtractor(const std::string& descriptorType) 
{
    DescriptorExtractor* extractor = 0;
    if(descriptorType == "SIFT") {
        extractor = new SiftDescriptorExtractor();/*( double magnification=SIFT::DescriptorParams::GET_DEFAULT_MAGNIFICATION(), bool isNormalize=true, bool recalculateAngles=true, int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES, int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS, int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE, int angleMode=SIFT::CommonParams::FIRST_ANGLE )*/
    }
    else if(descriptorType == "BRIEF") {
        extractor = new BriefDescriptorExtractor();
    }
    else if(descriptorType == "BRISK") {
        extractor = new cv::BRISK();/*brisk default: (int thresh=30, int octaves=3, float patternScale=1.0f)*/
    }
    else if(descriptorType == "FREAK") {
        extractor = new cv::FREAK();
    }
    else if(descriptorType == "SURF") {
        extractor = new SurfDescriptorExtractor();/*( int nOctaves=4, int nOctaveLayers=2, bool extended=false )*/
    }
    else if(descriptorType == "SURF128") {
        extractor = new SurfDescriptorExtractor();/*( int nOctaves=4, int nOctaveLayers=2, bool extended=false )*/
        extractor->set("extended", 1);
    }
#if CV_MAJOR_VERSION > 2 || CV_MINOR_VERSION >= 3
    else if(descriptorType == "ORB") {
        extractor = new OrbDescriptorExtractor();
    }
#endif
    else if(descriptorType == "SIFTGPU") {
      ROS_DEBUG("%s is to be used as extractor, creating SURF descriptor extractor as fallback.", descriptorType.c_str());
      extractor = new SurfDescriptorExtractor();/*( int nOctaves=4, int nOctaveLayers=2, bool extended=false )*/
    }
    else {
      ROS_ERROR("No valid descriptor-matcher-type given: %s. Using SURF", descriptorType.c_str());
      extractor = createDescriptorExtractor("SURF");
    }
    assert(extractor != 0 && "No extractor could be created");
    return extractor;
}
		void match_img(string &query_img) {
			
			// save the query image into local disk
                        // gettimeofday(&tp, NULL);
                        // long int timestamp = tp.tv_sec * 1000000 + tp.tv_usec;
			// ostringstream sstream;
			// sstream << timestamp;
                        // string image_path = "input-" + sstream.str() + ".jpg";
                        // ofstream imagefile(image_path.c_str(), ios::binary);
                        // imagefile.write(query_img.c_str(), query_img.size());
                        // imagefile.close();
			cout << "image query is " << query_img << endl;
			string image_path = query_img;
			
			
                        gettimeofday(&tv1, NULL);
			// feature extraction
                        Mat imgInput = imread(image_path, CV_LOAD_IMAGE_GRAYSCALE);
                        vector<KeyPoint> features;
                        // gettimeofday(&tv1, NULL);
                        detector->detect(imgInput, features);
                        // gettimeofday(&tv2, NULL);

			 // feature description
                        Mat descriptors;
                        // gettimeofday(&tv1, NULL);
                        extractor->compute(imgInput, features, descriptors);
                        descriptors.convertTo(descriptors, CV_32F);
                        // gettimeofday(&tv2, NULL);

			// image matching
			// gettimeofday(&tv1, NULL);
			string response = exec_match(descriptors, MATCHING_METHOD);
			gettimeofday(&tv2, NULL);

			long int runtimematching = (tv2.tv_sec - tv1.tv_sec) * 1000000 + (tv2.tv_usec - tv1.tv_usec);
			cout << "The matching image is " << response << endl;
			cout << "Image Matching Time: " << fixed << setprecision(2) << (double)runtimematching / 1000 << "(ms)" << endl;
		}
TEST_F(TestRansac, TestDrawInliers) {

  // Load two images and compute feature matches. Draw matches with and without
  // RANSAC.
  if (!FLAGS_ransac_draw_feature_matches) {
    return;
  }

  // Get some noisy feature matches.
  Image image1(test_image1.c_str());
  Image image2(test_image2.c_str());

  image1.Resize(0.25);
  image2.Resize(0.25);

  KeypointDetector detector;
  detector.SetDetector("SIFT");

  KeypointList keypoints1;
  KeypointList keypoints2;
  detector.DetectKeypoints(image1, keypoints1);
  detector.DetectKeypoints(image2, keypoints2);

  // DistanceMetric::Instance().SetMetric(DistanceMetric::Metric::SCALED_L2);
  DescriptorExtractor extractor;
  extractor.SetDescriptor("SIFT");

  std::vector<Feature> features1;
  std::vector<Feature> features2;
  std::vector<Descriptor> descriptors1;
  std::vector<Descriptor> descriptors2;
  extractor.DescribeFeatures(image1, keypoints1, features1, descriptors1);
  extractor.DescribeFeatures(image2, keypoints2, features2, descriptors2);

  FeatureMatcherOptions matcher_options;
  matcher_options.distance_metric = "SCALED_L2";
  NaiveMatcher2D2D feature_matcher;
  feature_matcher.AddImageFeatures(features1, descriptors1);
  feature_matcher.AddImageFeatures(features2, descriptors2);
  PairwiseImageMatchList image_matches;
  feature_matcher.MatchImages(matcher_options, image_matches);

  ASSERT_TRUE(image_matches.size() == 1);

  // RANSAC the feature matches to get inliers.
  FundamentalMatrixRansacProblem problem;
  problem.SetData(image_matches[0].feature_matches_);

  // Create the ransac solver, set options, and run RANSAC on the problem.
  Ransac<FeatureMatch, FundamentalMatrixRansacModel> solver;
  RansacOptions ransac_options;

  ransac_options.iterations = 5000;
  ransac_options.acceptable_error = 1e-3;
  ransac_options.minimum_num_inliers = 100;
  ransac_options.num_samples = 8;

  solver.SetOptions(ransac_options);
  solver.Run(problem);

  ASSERT_TRUE(problem.SolutionFound());

  drawing::DrawImageFeatureMatches(image1, image2,
                                   image_matches[0].feature_matches_,
                                   "Noisy Matched Features");

  const FeatureMatchList& inliers = problem.Inliers();
  drawing::DrawImageFeatureMatches(image1, image2, inliers,
                                   "Inlier Matched Features");
}