void trainSURFMatcher( const KeyframeVector& keyframes, cv::FlannBasedMatcher& matcher) { std::vector<cv::Mat> descriptors_vector; for (unsigned int kf_idx = 0; kf_idx < keyframes.size(); ++kf_idx) { const RGBDKeyframe& keyframe = keyframes[kf_idx]; descriptors_vector.push_back(keyframe.descriptors); } matcher.add(descriptors_vector); matcher.train(); }
void trainSURFMatcher_Iterative( const KeyframeVector& keyframes,u_int min, u_int max, cv::FlannBasedMatcher& matcher) { std::vector<cv::Mat> descriptors_vector; for (unsigned int kf_idx = min; kf_idx < max; ++kf_idx) { const RGBDKeyframe& keyframe = keyframes[kf_idx]; descriptors_vector.push_back(keyframe.descriptors); } matcher.add(descriptors_vector); matcher.train(); }
/***************************************************************************** // MAIN */ int main(int argc, const char * argv[]) { //************************************************************************* // 1. Read the input files // This code reads the arguments from the input variable argv which is supposed to contain the // path of the input and reference database. std::string teachdb_folder, querydb_folder; if (argc > 2) { std::string command = argv[2]; std::string type = argv[1]; if(type.compare("-SIFT")== 0) { _ftype = SIFT; std::cout << "NFT with SIFT feature detector and extractor." << std::endl; } else if(type.compare("-SURF")== 0) { _ftype = SURF; std::cout << "NFT with SURF feature detector and extractor." << std::endl; } else if(type.compare("-ORB")== 0) { _ftype = ORB; std::cout << "NFT with ORB feature detector and extractor." << std::endl; } if(command.compare("-file") == 0) { if(argc > 4) { teachdb_folder = argv[3]; querydb_folder = argv[4]; run_video = false; } else { std::cout << "No folder with query or reference images has been specified" << std::endl; std::cout << "Call: ./HCI571X_Feature_Matching -file folder_reference folder_query" << std::endl; system("pause"); exit(0); } } else if(command.compare("-video") == 0) { run_video = true; if(argc > 4) { teachdb_folder = argv[3]; device_id = atoi(argv[4]); } } } else { std::cout << "No command has been specified. use -file or -video" << std::endl; system("pause"); exit(0); } // Read the filenames inside the teach database directory. std::vector<std::string> ref_filename; readDirFiles(teachdb_folder, &ref_filename); // Read the filenames inside the query database directory. std::vector<std::string> query_filename; readDirFiles(querydb_folder, &query_filename); //************************************************************************* // 2. Create a detector and a descriptor extractor // In this case, the SIFT detector and extractor are used // Corner detector if(_ftype == SIFT)_detector = new cv::SiftFeatureDetector(_num_feature, _octaves, _contrast_threshold, _edge_threshold, _sigma); else if(_ftype == SURF)_detector = new cv::SurfFeatureDetector( _hessianThreshold, _surf_Octaves, _surf_OctaveLayers, _surf_extended, _surf_upright ); else if(_ftype == ORB)_detector = new cv::OrbFeatureDetector(1000); // Corner extractor if(_ftype == SIFT) _extractor = new cv::SiftDescriptorExtractor(_num_feature, _octaves, _contrast_threshold, _edge_threshold, _sigma); else if(_ftype == SURF) _extractor = new cv::SurfDescriptorExtractor( _hessianThreshold, _surf_Octaves, _surf_OctaveLayers, _surf_extended, _surf_upright ); else if(_ftype == ORB)_extractor = new cv::OrbDescriptorExtractor(1000); // Check whether files are in the database list. if(ref_filename.size() == 0) { std::cout << "STOP: no files in the reference database!!! Specify a folder or a set of files." << std::cout; system("pause"); return -1; } //************************************************************************* // 3. Init the database // The code reads all the images in ref_filename, detect keypoints, extract descriptors and // stores them in the datbase variables. init_database(std::string(teachdb_folder), ref_filename); //************************************************************************* // 4. The data of the database _descriptorsRefDB is added to the featue matcher // and the mathcer is trained _matcher.add(_descriptorsRefDB); _matcher.train(); // Read the number of reference images in the database _num_ref_images = _matcher.getTrainDescriptors().size(); //************************************************************************* // 5. Here we run the matching. // for images from files if(!run_video) { if(_mtype == KNN)run_matching( querydb_folder, query_filename); else if(_mtype == BRUTEFORCE) run_bf_matching(querydb_folder, query_filename); else { std::cout << "No matching type specified. Specify a matching type" << std::endl; system("pause"); } } else // and image from a video camera { if(_mtype == KNN)run_matching( device_id); else if(_mtype == BRUTEFORCE) run_bf_matching(device_id); else { std::cout << "No matching type specified. Specify a matching type" << std::endl; system("pause"); } } //************************************************************************* // 6. Cleanup: release the keypoint detector and feature descriptor extractor _extractor.release(); _detector.release(); return 1; }