void match( const cv::Mat& descriptors_0, const cv::Mat& descriptors_1, std::vector<cv::DMatch>& matches) { switch (filter_) { case CROSS_CHECK_FILTER: crossCheckMatching(descriptors_0, descriptors_1, matches, 1); break; default: simpleMatching(descriptors_0, descriptors_1, matches); } }
void pkmDetector::update() { if (bSetImageSearch && bSetImageTemplate) { if (!bInitialized) { // do matching between the template and image search // without tracking previous features since none initialized filteredMatches.clear(); switch( matcherFilterType ) { case CROSS_CHECK_FILTER : crossCheckMatching( descriptorMatcher, img_template_descriptors, img_search_descriptors, filteredMatches, 1 ); break; default : simpleMatching( descriptorMatcher, img_template_descriptors, img_search_descriptors, filteredMatches ); } // reindex based on found matches vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() ); for( size_t i = 0; i < filteredMatches.size(); i++ ) { queryIdxs[i] = filteredMatches[i].queryIdx; trainIdxs[i] = filteredMatches[i].trainIdx; } // build homograhpy w/ ransac vector<cv::Point2f> points1; cv::KeyPoint::convert(img_template_keypoints, points1, queryIdxs); vector<cv::Point2f> points2; cv::KeyPoint::convert(img_search_keypoints, points2, trainIdxs); H12 = findHomography( cv::Mat(points1), cv::Mat(points2), CV_RANSAC, ransacReprojThreshold ); // create a mask of the current inliers based on transform distance vector<char> matchesMask( filteredMatches.size(), 0 ); printf("Matched %d features.\n", filteredMatches.size()); // convert previous image points to current image points via homography // although this is a transformation from image to world coordinates // it should estimate the current image points cv::Mat points1t; perspectiveTransform(cv::Mat(points1), points1t, H12); for( size_t i1 = 0; i1 < points1.size(); i1++ ) { if( norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) < 4 ) // inlier { matchesMask[i1] = 1; img_search_points_inliers[1].push_back(points2[i1]); } else { img_search_points_outliers[1].push_back(points2[i1]); } } // update bounding box cv::Mat bb; perspectiveTransform(cv::Mat(img_template_boundingbox), bb, H12); for( int i = 0; i < 4; i++ ) { dst_corners[i] = bb.at<cv::Point2f>(i,0); //img_template_boundingbox[i] = bb.at<cv::Point2f>(i,0); } /* // draw inliers drawMatches( img_search, img_template_keypoints, img_template, img_search_keypoints, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask #if DRAW_RICH_KEYPOINTS_MODE , DrawMatchesFlags::DRAW_RICH_KEYPOINTS #endif ); #if DRAW_OUTLIERS_MODE // draw outliers for( size_t i1 = 0; i1 < matchesMask.size(); i1++ ) matchesMask[i1] = !matchesMask[i1]; drawMatches( img_template, img_template_keypoints, img_search, img_search_keypoints, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask, DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS ); #endif imshow( winName, drawImg ); */ } else { // track features from previous frame into the current frame and see which // features are inliers, and which are outliers. among the features that // are outliers, see if any were marked as inliers in the previous frame and // remark then as outliers // mark decsriptors on new features marked as outliers once the number of // inliers drops to a certain threshold and perform matching on the template. // patch based seems powerful as well. creating a manifold or detecting planes // in the set of inliers and tracking them as a whole may be more powerful. //<#statements#> } //std::swap(current_template_points, previous_template_points); std::swap(img_search_points_inliers[1], img_search_points_inliers[0]); std::swap(img_search_points_outliers[1], img_search_points_outliers[0]); swap(prev_img_search, img_search); } // end bSetImageSearch && bSetImageTemplate }
static void doIteration(Mat& img1, Mat& img2, vector<int>& queryIdxs, vector<int>& trainIdxs, Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter,vector<KeyPoint>& keypoints1, Mat& descriptors1, vector<KeyPoint>& keypoints2,Mat& descriptors2, Mat& matchedDesc1, Mat& matchedDesc2, vector<Point2f>& matchedPoints1, vector<Point2f>& matchedPoints2, vector<Point2f>& MP1, vector<KeyPoint>& tempkey) { assert( !img2.empty()); cv::SURF mySURF; mySURF.extended = 0; Mat H12; mySURF.detect(img2, keypoints2); mySURF.compute(img2, keypoints2, descriptors2); vector<DMatch> filteredMatches; switch( matcherFilter ) { case CROSS_CHECK_FILTER : crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 ); break; default : simpleMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches ); } trainIdxs.clear(); queryIdxs.clear(); for( size_t i = 0; i < filteredMatches.size(); i++ ) { queryIdxs.push_back(filteredMatches[i].queryIdx); trainIdxs.push_back(filteredMatches[i].trainIdx); } ////// Mat mDesc1, mDesc2; for(size_t i=0; i<queryIdxs.size(); i++) { mDesc1.push_back(descriptors1.row(queryIdxs[i])); mDesc2.push_back(descriptors2.row(trainIdxs[i])); } vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs); vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs); vector<char> matchesMask( filteredMatches.size(), 0 );//, matchesMask2( filteredMatches.size(), 1 );; Mat drawImg;// drawImg2; cout << "points2.size \t" << points2.size() << endl; cout <<"HELLO \t" << endl; if( RANSAC_THREHOLD >= 0 ) { if (points2.size() < 4 ) { cout << "matchedPoints1 less than 4, hence prev ROI is retained" << endl; for(size_t i1=0;i1<points2.size();i1++) { matchesMask[i1] = 1; matchedPoints1.push_back( points1[i1]); matchedPoints2.push_back( points2[i1]); matchedDesc1.push_back(descriptors1.row(queryIdxs[i1])); matchedDesc2.push_back(descriptors2.row(trainIdxs[i1])); tempkey.push_back(keypoints2[trainIdxs[i1]]); MP1.push_back(points2[i1]); } } else { H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, RANSAC_THREHOLD ); if( !H12.empty() ) { Mat points1t; perspectiveTransform(Mat(points1), points1t, H12); vector<Point2f> points2Shift(points2.size()); points2Shift = points2; shiftPoints(points2Shift, box); Point2f boxCenter; boxCenter.x = box.x + box.width/2; boxCenter.y = box.y + box.height/2; for( size_t i1 = 0; i1 < points1.size(); i1++ ) { double descDiff = pow(norm(mDesc1.row(i1) - mDesc2.row(i1)) , 2); // if(descDiff < 0.08) { double diff = norm(points2[i1] - points1t.at<Point2f>((int)i1,0)); if(diff <= 30) { // cout << diff << endl; matchesMask[i1] = 1; matchedPoints1.push_back( points1[i1]); matchedPoints2.push_back( points2[i1]); matchedDesc1.push_back(descriptors1.row(queryIdxs[i1])); matchedDesc2.push_back(descriptors2.row(trainIdxs[i1])); tempkey.push_back(keypoints2[trainIdxs[i1]]); MP1.push_back(points2[i1]); } } } } // drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg2, CV_RGB(255, 255, 0), CV_RGB(255,255, 255), matchesMask2 // #if DRAW_RICH_KEYPOINTS_MODE // , DrawMatchesFlags::DRAW_RICH_KEYPOINTS // #endif // ); drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask #if DRAW_RICH_KEYPOINTS_MODE , DrawMatchesFlags::DRAW_RICH_KEYPOINTS #endif ); cout << endl; imshow( "doiter", drawImg ); // Mat newimg = img1.clone(); // KeyPoint::convert(keypoints1, points1); // for(size_t i=0;i<points1.size();i++) // circle(newimg, points1[i], 2, Scalar(255,0,255),2); // imshow( "doimg", newimg ); // points1.clear(); // waitKey(0); } } // waitKey(0); }