/***************************************************************************** * @brief : orbFeatureDetect * @author : Zhangle * @date : 2014/9/8 11:17 * @version : ver 1.0 * @inparam : * @outparam : *****************************************************************************/ void FeatureDetect::orbFeatureDetect(string inputImageName, string outputImageName,string outputTxtName) { Mat image = imread(inputImageName); Mat descriptors; vector<KeyPoint> keypoints; ORB orb; time_t beginTime = time(NULL); orb.detect(image,keypoints); time_t endTime = time(NULL); time_t runTime = endTime - beginTime; drawKeypoints(image,keypoints,image,Scalar(255,255,255)); imwrite(outputImageName,image); ofstream outTxt(outputTxtName); outTxt << "ORB" << endl; outTxt << "影像尺寸:" << image.cols<<" * "<<image.rows<<endl; outTxt << "特征点数目:" << keypoints.size() <<"个"<< endl; outTxt << "提取特征点耗费时间:" << runTime << "s"<< endl; outTxt << "默认参数设置" << endl; outTxt.close(); }
void regressionTest() { assert( dextractor ); // Read the test image. string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME; Mat img = imread( imgFilename ); if( img.empty() ) { ts->printf( cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str() ); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); return; } vector<KeyPoint> keypoints; FileStorage fs( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::READ ); if( fs.isOpened() ) { read( fs.getFirstTopLevelNode(), keypoints ); Mat calcDescriptors; double t = (double)getTickCount(); dextractor->compute( img, keypoints, calcDescriptors ); t = getTickCount() - t; ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)getTickFrequency()*1000.)/calcDescriptors.rows); if( calcDescriptors.rows != (int)keypoints.size() ) { ts->printf( cvtest::TS::LOG, "Count of computed descriptors and keypoints count must be equal.\n" ); ts->printf( cvtest::TS::LOG, "Count of keypoints is %d.\n", (int)keypoints.size() ); ts->printf( cvtest::TS::LOG, "Count of computed descriptors is %d.\n", calcDescriptors.rows ); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT ); return; } if( calcDescriptors.cols != dextractor->descriptorSize() || calcDescriptors.type() != dextractor->descriptorType() ) { ts->printf( cvtest::TS::LOG, "Incorrect descriptor size or descriptor type.\n" ); ts->printf( cvtest::TS::LOG, "Expected size is %d.\n", dextractor->descriptorSize() ); ts->printf( cvtest::TS::LOG, "Calculated size is %d.\n", calcDescriptors.cols ); ts->printf( cvtest::TS::LOG, "Expected type is %d.\n", dextractor->descriptorType() ); ts->printf( cvtest::TS::LOG, "Calculated type is %d.\n", calcDescriptors.type() ); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT ); return; } // TODO read and write descriptor extractor parameters and check them Mat validDescriptors = readDescriptors(); if( !validDescriptors.empty() ) compareDescriptors( validDescriptors, calcDescriptors ); else { if( !writeDescriptors( calcDescriptors ) ) { ts->printf( cvtest::TS::LOG, "Descriptors can not be written.\n" ); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); return; } } } else { ts->printf( cvtest::TS::LOG, "Compute and write keypoints.\n" ); fs.open( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::WRITE ); if( fs.isOpened() ) { ORB fd; fd.detect(img, keypoints); write( fs, "keypoints", keypoints ); } else { ts->printf(cvtest::TS::LOG, "File for writting keypoints can not be opened.\n"); ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); return; } } }
int main(int, char**) { char* imagesDirectory = "../../data/Louvre/samples/"; string descriptorsDirectory = "../../descriptors/Louvre/FULL/"; string vocFileName = "../../descriptors/words/ORB+ORB+100000.yml"; string histsFileName = "../../descriptors/words/samples-hists100000-bis.yml"; string upFileName = "../../descriptors/words/ORB1000+ORB+1000.yml"; string indexFileName = "../../descriptors/words/indexes.yml"; string matsFileName = "../..//descriptors/words/mats.yml"; //pre-compute: do this once //computeCenters(imagesDirectory, descriptorsDirectory, vocFileName, 100000); //bag(imagesDirectory, descriptorsDirectory, vocFileName, histsFileName); //upToBottom = bottomUp2(500, 2, vocFileName, upFileName, matsFileName, indexFileName); vector<Mat> mats; Mat upToBottom; FileStorage ifs(indexFileName, FileStorage::READ); ifs["index"] >> upToBottom; ifs.release(); FileStorage mfs(matsFileName, FileStorage::READ); read(mfs["mats"], mats); mfs.release(); //The histograms SparseMat hists; FileStorage fs(histsFileName, FileStorage::READ); fs["hists"] >> hists; fs.release(); //BOW //IndexParams* indexParams = new LshIndexParams(6, 12, 1); //Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher(indexParams)); //Ptr<DescriptorExtractor> descex(new ORB(1000)); //Ptr<DescriptorMatcher> matcher(new BFMatcher(NORM_HAMMING)); //Mat vocabulary; //FileStorage voc(vocFileName, FileStorage::READ); //voc["centers"] >> vocabulary; //voc.release(); //vector<Mat> vocs; //vocs.push_back(vocabulary); /*matcher->add(vocs); matcher->train();*/ //BOWImgDescriptorExtractor bow(descex, matcher); //bow.setVocabulary(vocabulary); vector<KeyPoint> kp; vector<vector<int>> hist; const int numberOfInput = 10; string inputs[numberOfInput] = { "../input/cc.jpg", "../input/ex2.jpg", "../input/woman1.jpg", "../input/woman2.jpg", "../input/liberte-glass.jpg", "../input/chartres-input.jpg", "../input/corot-pearl-input.jpg", "../input/meduse.jpg", "../input/lebrun-input.jpg", "../input/lebrun2-input.jpg"}; Mat up; FileStorage u(upFileName, FileStorage::READ); u["centers"] >> up; u.release(); cout << up.cols << " " << up.rows << " " << up.type(); for(int k=0; k<numberOfInput; k++) { clock_t start = clock(); cout << inputs[k] << endl; Mat input = imread(inputs[k], CV_LOAD_IMAGE_GRAYSCALE); if(input.empty()) exit(-1); Mat descriptors; vector<KeyPoint> keypoints; ORB orb; orb.detect(input, keypoints); orb.compute(input, keypoints, descriptors); vector<DMatch> upMatches; BFMatcher(NORM_HAMMING).match(descriptors, up, upMatches); cout << ( clock() - start ) / (double) CLOCKS_PER_SEC << endl; vector<vector<int> > hist; for(int i=0; i<100000; i++) hist.push_back(vector<int>()); for(int i=0; i<descriptors.rows; i++) { vector<DMatch> bottomMatch; BFMatcher(NORM_HAMMING).match(descriptors.row(i), mats[upMatches[i].trainIdx], bottomMatch); hist[upToBottom.at<INT32>(upMatches[i].trainIdx,bottomMatch[0].trainIdx)].push_back(i); } cout << ( clock() - start ) / (double) CLOCKS_PER_SEC << " ... second matching" << endl; Mat nhist = normalise(hist); vector<pair<int, float> > imageResponse; Mat answer = mul(hists, nhist); for(int i=0; i<answer.rows; i++) { imageResponse.push_back(pair<int, float>(i, answer.at<float>(i))); } std::sort(imageResponse.begin(), imageResponse.end(), irComparer2); cout << ( clock() - start ) / (double) CLOCKS_PER_SEC << endl; cout << imageResponse[0].first << " " << imageResponse[1].first << " " << imageResponse[2].first << " " << imageResponse[3].first << " " << imageResponse[4].first << endl; //vector<pair<int, float> > pss = paintingSearch(input, hists, bow, kp, hist); //for(unsigned i=0; i<5; i++) // cout << (pss[i]).first << endl: cin.ignore(); } return 0; }
void processImage(ORB& detector, std::vector<KeyPoint> keypoints_object, Mat& descriptors_object, Mat& img_object, Mat& img_scene) { //-- Step 1: Detect the keypoints using ORB Detector std::vector<KeyPoint> keypoints_scene; detector.detect( img_scene, keypoints_scene ); //-- Step 2: Calculate descriptors (feature vectors) Mat descriptors_scene; detector.compute( img_scene, keypoints_scene, descriptors_scene ); descriptors_scene.convertTo(descriptors_scene, CV_32F); if(descriptors_scene.empty()) { //throw std::runtime_error("Missing Scene Descriptors"); imshow( "Camera", img_scene ); return; } //-- Step 3: Matching descriptor vectors using FLANN matcher FlannBasedMatcher matcher; std::vector< DMatch > matches; matcher.match( descriptors_object, descriptors_scene, matches ); // m1 - main match / m2 - closest neighbor //std::vector< std::vector< DMatch > > matches; //matcher.knnMatch( descriptors_object, descriptors_scene, matches, k ); std::sort(matches.begin(), matches.end(), [](const DMatch& l, const DMatch& r) -> bool { return l.distance < r.distance; }); //-- Quick calculation of max and min distances between keypoints //double max_dist = matches[matches.size()-1].distance; //printf("-- Max dist : %f \n", max_dist ); double min_dist = std::min(200.0f, matches[0].distance); //printf("-- Min dist : %f \n", min_dist ); /* double average = 0; for( int i = 0; i < descriptors_object.rows; i++ ) { average += matches[i].distance; } average /= descriptors_object.rows; printf("-- Avg dist : %f \n", average); double sd = 0; for( int i = 0; i < descriptors_object.rows; i++ ) { sd += pow((matches[i].distance - average), 2.0f); } sd /= descriptors_object.rows; printf("-- Avg dist : %f \n", sd ); */ //-- Draw only "good" matches - top N matches std::vector< DMatch > good_matches; for( unsigned i = 0; i < matches.size() && i < MAX_MATCH_COUNT; ++i ) { if(matches[i].distance < 1.15 * min_dist) { good_matches.push_back(matches[i]); } } Mat img_matches; drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); if(match_list.front()) { --match_count; } match_list.pop_front(); if(good_matches.size() > MIN_MATCH_COUNT) { match_list.push_back(true); ++match_count; //std::cout << "-- matches : " << good_matches.size() << std::endl; std::vector<Point2f> scene; for( unsigned i = 0; i < good_matches.size(); i++ ) { //-- Get the keypoints from the good matches scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); } std::vector<Point2f> hull; convexHull(scene, hull); for(unsigned i = 0; i < hull.size()-1; ++i) { line( img_matches, hull[i] + Point2f( img_object.cols, 0), hull[i+1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 ); } line( img_matches, hull[hull.size()-1] + Point2f( img_object.cols, 0), hull[0] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 ); } else { match_list.push_back(false); } if(match_count >= MATCH_THRESHOLD) { std::cout << "MATCH DETECTED: " << match_count << std::endl; } else { std::cout << "NO MATCH: " << match_count << std::endl; } //-- Show detected matches imshow( "Camera", img_matches ); }