Ejemplo n.º 1
0
void trainSURFMatcher(
  const KeyframeVector& keyframes,
  cv::FlannBasedMatcher& matcher)
{  
  std::vector<cv::Mat> descriptors_vector;
  for (unsigned int kf_idx = 0; kf_idx < keyframes.size(); ++kf_idx)
  {
    const RGBDKeyframe& keyframe = keyframes[kf_idx];
    descriptors_vector.push_back(keyframe.descriptors);
  }
  matcher.add(descriptors_vector);

  matcher.train();
}
Ejemplo n.º 2
0
void trainSURFMatcher_Iterative(
  const KeyframeVector& keyframes,u_int min, u_int max,
  cv::FlannBasedMatcher& matcher)
{
  std::vector<cv::Mat> descriptors_vector;

  for (unsigned int kf_idx = min; kf_idx < max; ++kf_idx)
  {
    const RGBDKeyframe& keyframe = keyframes[kf_idx];
    descriptors_vector.push_back(keyframe.descriptors);
  }
  matcher.add(descriptors_vector);

  matcher.train();
}
/*****************************************************************************
 // The knn matching with k = 2
 // This code performs the matching and the refinement.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int knn_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with the database descriptors.
    // with k-nearest neighbors with k=2
    _matcher.knnMatch(descriptorQuery , matches1, 2);
    
#ifdef DEBUG_OUT
    std::cout << "Found " << matches1.size() << " matching feature descriptors out of " << _matcher.getTrainDescriptors().size() << " database descriptors."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 3 Filter the matches.
    // Accept only matches (knn with k=2) which belong ot one images
    // The database tree within _matcher contains descriptors of all input images.
    // We expect that both nearest neighbors must belong to one image.
    // Otherwise we can remove the result.
    // Along with this, we count which reference image has the highest number of matches.
    // At this time, we consinder this image as the searched image.
    
    // we init the variable hit with 0
    std::vector<int> hits(_num_ref_images);
    for (int i=0; i<_num_ref_images; i++)
    {
        hits[i] = 0;
    }
    
    // the loop runs through all matches and comparees the image indes
    // imgIdx. The must be equal otherwise the matches belong to two
    // different reference images.
    for (int i=0; i<matches1.size(); i++)
    {
        // The comparison.
        if(matches1[i].at(0).imgIdx == matches1[i].at(1).imgIdx)
        {
            // we keep it
            matches_opt1.push_back(matches1[i]);
            // and count a hit
            hits[matches1[i].at(0).imgIdx]++;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Optimized " << matches_opt1.size() << " feature descriptors." << std::endl;
#endif
    
    // Now we search for the highest number of hits in our hit array
    // The variable max_idx keeps the image id.
    // The variable max_value the amount of hits.
    int max_idx = -1;
    int max_value = 0;
    for (int i=0; i<_num_ref_images; i++)
    {
#ifdef DEBUG_OUT
        std::cout << "for " << i << " : " << hits[i] << std::endl;
#endif
        if(hits[i]  > max_value)
        {
            max_value = hits[i];
            max_idx = i;
        }
    }
    
    
    
    ///////////////////////////////////////////////////////
    // 4. The cross-match
    // At this time, we test the database agains the query descriptors.
    // The variable max_id stores the reference image id. Thus, we test only
    // the descriptors that belong to max_idx agains the query descriptors.
    _matcher.knnMatch(_descriptorsRefDB[max_idx], descriptorQuery, matches2, 2);
    
    
    ///////////////////////////////////////////////////////
    // 5. Refinement; Ratio test
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_opt1);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    removed = ratioTest(matches2);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches_opt1, matches2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_idx], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_idx]._ref_image , _keypointsRefDB[max_idx], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_idx]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    
    return max_idx;
    
}
/*****************************************************************************
 // MAIN
 */
int main(int argc, const char * argv[])
{
    
    
    
    //*************************************************************************
    // 1. Read the input files
    // This code reads the arguments from the input variable argv which is supposed to contain the
    // path of the input and reference database.
    std::string teachdb_folder, querydb_folder;
    if (argc > 2)
    {
        std::string command = argv[2];
        std::string type   = argv[1];
        if(type.compare("-SIFT")== 0)
        {
            _ftype = SIFT;
            std::cout << "NFT with SIFT feature detector and extractor." << std::endl;
        }
        else if(type.compare("-SURF")== 0)
        {
            _ftype = SURF;
            std::cout << "NFT with SURF feature detector and extractor." << std::endl;
        }
        else if(type.compare("-ORB")== 0)
        {
            _ftype = ORB;
            std::cout << "NFT with ORB feature detector and extractor." << std::endl;
        }
        
        
        if(command.compare("-file") == 0)
        {
            if(argc > 4)
            {
                teachdb_folder = argv[3];
                querydb_folder = argv[4];
                run_video = false;
            }
            else
            {
                std::cout << "No folder with query or reference images has been specified" << std::endl;
                std::cout << "Call: ./HCI571X_Feature_Matching -file folder_reference folder_query" << std::endl;
                system("pause");
                exit(0);
            }
            
        }
        else if(command.compare("-video") == 0)
        {
            run_video = true;
            if(argc > 4)
            {
                teachdb_folder = argv[3];
                device_id = atoi(argv[4]);
            }
        }
    }
    else
    {
        std::cout << "No command has been specified. use -file or -video" << std::endl;
        system("pause");
        exit(0);
    }
    
    
    
    // Read the filenames inside the teach database directory.
    std::vector<std::string> ref_filename;
    readDirFiles(teachdb_folder, &ref_filename);
    
    
    // Read the filenames inside the query database directory.
    std::vector<std::string> query_filename;
    readDirFiles(querydb_folder, &query_filename);
    
    
    //*************************************************************************
    // 2. Create a detector and a descriptor extractor
    // In this case, the SIFT detector and extractor are used
    
    // Corner detector
    if(_ftype == SIFT)_detector = new cv::SiftFeatureDetector(_num_feature, _octaves, _contrast_threshold, _edge_threshold, _sigma);
    else if(_ftype == SURF)_detector = new cv::SurfFeatureDetector( _hessianThreshold, _surf_Octaves, _surf_OctaveLayers, _surf_extended, _surf_upright );
    else if(_ftype == ORB)_detector = new cv::OrbFeatureDetector(1000);
    
    
    // Corner extractor
    if(_ftype == SIFT) _extractor = new cv::SiftDescriptorExtractor(_num_feature, _octaves, _contrast_threshold, _edge_threshold, _sigma);
    else if(_ftype == SURF) _extractor = new cv::SurfDescriptorExtractor( _hessianThreshold, _surf_Octaves, _surf_OctaveLayers, _surf_extended, _surf_upright );
    else if(_ftype == ORB)_extractor = new cv::OrbDescriptorExtractor(1000);
    

	// Check whether files are in the database list. 
	if(ref_filename.size() == 0)
	{
		std::cout << "STOP: no files in the reference database!!! Specify a folder or a set of files." << std::cout;
		system("pause");
		return -1;
	}

    //*************************************************************************
    // 3. Init the database
    // The code reads all the images in ref_filename, detect keypoints, extract descriptors and
    // stores them in the datbase variables.
    init_database(std::string(teachdb_folder), ref_filename);
    
    
    //*************************************************************************
    // 4. The data of the database _descriptorsRefDB is added to the featue matcher
    // and the mathcer is trained
    _matcher.add(_descriptorsRefDB);
    _matcher.train();
    
    // Read the number of reference images in the database
    _num_ref_images = _matcher.getTrainDescriptors().size();
    
    
    //*************************************************************************
    // 5. Here we run the matching.
    // for images from files
    if(!run_video)
    {
        if(_mtype == KNN)run_matching( querydb_folder, query_filename);
        else if(_mtype == BRUTEFORCE) run_bf_matching(querydb_folder, query_filename);
        else
        {
            std::cout << "No matching type specified. Specify a matching type" << std::endl;
            system("pause");
        }
        
    }
    else
        // and image from a video camera
    {
        if(_mtype == KNN)run_matching( device_id);
        else if(_mtype == BRUTEFORCE)  run_bf_matching(device_id);
        else
        {
            std::cout << "No matching type specified. Specify a matching type" << std::endl;
            system("pause");
        }
        
        
    }
    
    //*************************************************************************
    // 6. Cleanup: release the keypoint detector and feature descriptor extractor
    _extractor.release();
    _detector.release();
    
    
    return 1;
}