void build_model() { vector<Mat> trainDesc; FeatureDetector *detector = new SurfFeatureDetector(); DescriptorExtractor *extractor = new SurfDescriptorExtractor(); // Generate descriptors from the image db fs::path p = fs::system_complete(IMAGE_DATABASE); fs::directory_iterator end_iter; for(fs::directory_iterator dir_itr(p); dir_itr != end_iter; ++dir_itr) { string img_name(dir_itr->path().string()); Mat img = imread(img_name, CV_LOAD_IMAGE_GRAYSCALE); // feature extraction vector<KeyPoint> keypoints; detector->detect(img, keypoints); // feature description Mat descriptor; extractor->compute(img, keypoints, descriptor); trainDesc.push_back(descriptor); imgNames.push_back(img_name); } // train the model matcher->add(trainDesc); matcher->train(); // Clean up delete detector; delete extractor; }
void DetectDescribe::performDescription(const cv::Mat& image, std::vector<cv::KeyPoint> &v, cv::Mat & descriptors, const int _descriptorType) { DescriptorExtractor *extractor; LDB *ldb; cv::SiftDescriptorExtractor extractorSift; switch (_descriptorType) { case 0: ldb = new LDB(); break; case 1: // SIFT is created statically break; case 2: extractor = new cv::SurfDescriptorExtractor(); break; case 3: extractor = new cv::ORB(); break; case 4: extractor = new cv::BriefDescriptorExtractor(); break; case 5: extractor = new cv::BRISK(); break; case 6: extractor = new cv::FREAK(); break; default: extractor = new cv::ORB(); } if (_descriptorType == 0) { cv::Mat dst; cv::cvtColor(image, dst, CV_BGR2GRAY); ldb->compute(dst, v, descriptors); delete ldb; } else if (_descriptorType == 1) { extractorSift.compute(image, v, descriptors); } else { extractor->compute(image, v, descriptors); delete extractor; } }
void match_img(string &query_img) { // save the query image into local disk // gettimeofday(&tp, NULL); // long int timestamp = tp.tv_sec * 1000000 + tp.tv_usec; // ostringstream sstream; // sstream << timestamp; // string image_path = "input-" + sstream.str() + ".jpg"; // ofstream imagefile(image_path.c_str(), ios::binary); // imagefile.write(query_img.c_str(), query_img.size()); // imagefile.close(); cout << "image query is " << query_img << endl; string image_path = query_img; gettimeofday(&tv1, NULL); // feature extraction Mat imgInput = imread(image_path, CV_LOAD_IMAGE_GRAYSCALE); vector<KeyPoint> features; // gettimeofday(&tv1, NULL); detector->detect(imgInput, features); // gettimeofday(&tv2, NULL); // feature description Mat descriptors; // gettimeofday(&tv1, NULL); extractor->compute(imgInput, features, descriptors); descriptors.convertTo(descriptors, CV_32F); // gettimeofday(&tv2, NULL); // image matching // gettimeofday(&tv1, NULL); string response = exec_match(descriptors, MATCHING_METHOD); gettimeofday(&tv2, NULL); long int runtimematching = (tv2.tv_sec - tv1.tv_sec) * 1000000 + (tv2.tv_usec - tv1.tv_usec); cout << "The matching image is " << response << endl; cout << "Image Matching Time: " << fixed << setprecision(2) << (double)runtimematching / 1000 << "(ms)" << endl; }