コード例 #1
0
    void homography(
            const std::vector<cv::KeyPoint>& keypoints_0,
            const std::vector<cv::KeyPoint>& keypoints_1,
            const std::vector<cv::DMatch>& matches,
            cv::Mat& H)
    {
        const size_t N = matches.size();
        std::vector<int> queryIdxs(N), trainIdxs(N);
        for (size_t i = 0; i < N; ++i)
        {
            queryIdxs[i] = matches[i].queryIdx;
            trainIdxs[i] = matches[i].trainIdx;
        }

        if (threshold_ >= 0)
        {
            std::vector<cv::Point2f> points1;
            cv::KeyPoint::convert(keypoints_0, points1, queryIdxs);

            std::vector<cv::Point2f> points2;
            cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

            H = cv::findHomography(
                    cv::Mat(points1), cv::Mat(points2),
                    cv::RANSAC, threshold_);
        }
    }
コード例 #2
0
void CameraPoseOptimization::computeCorrespondenceBetweenTwoFrames(
	vector<cv::KeyPoint>& keypointsInCurrFrame,
	cv::Mat& descriptorsInCurrFrame,
	vector<DMatch>& filteredMatches,
	vector<char>& matchesMask)
{
	FastFeatureDetector detector;
	BriefDescriptorExtractor extractor;

	// Match the descriptors of the two images through cross-checking
	BFMatcher matcher(NORM_L2);
	crossCheckMatching(matcher, m_descriptorsInLastFrame, descriptorsInCurrFrame, filteredMatches);

	// Given the corresponding keypoint pairs of two images, compute the homography matrix,
	// which can be treated as a 3D transformation matrix between two images.
	vector<int> queryIdxs(filteredMatches.size()), trainIdxs(filteredMatches.size());
	for (size_t i = 0; i < filteredMatches.size(); i++)
	{
		queryIdxs[i] = filteredMatches[i].queryIdx;
		trainIdxs[i] = filteredMatches[i].trainIdx;
	}
	vector<Point2f> points1, points2;
	KeyPoint::convert(m_keypointsInLastFrame, points1, queryIdxs);
	KeyPoint::convert(keypointsInCurrFrame, points2, trainIdxs);
	Mat H12 = findHomography(Mat(points1), Mat(points2), CV_RANSAC, g_thresholdRansacReproj);

	//std::cout << H12.cols << "," << H12.rows << std::endl;
	//for (int i = 0; i != H12.rows; ++i)
	//{
	//	for (int j = 0; j != H12.cols; ++j)
	//	{
	//		std::cout << H12.at<double>(i,j) << " ";
	//	}
	//	std::cout << std::endl;
	//}
	//

	// For each keypoint A in the first image, perform the homography matrix on it to get its corresponding
	// transformed point B in the second image. If B is equal to A's corresponding keypoint computed by 
	// the cross-checking method, then treat this pair of points as reasonable by putting its flag in 
	// the variable "matchesMask" as 1. Otherwise, treat the pair as unreasonable by putting its flag in 
	// the variable "matchesMask" as 0.
	matchesMask.clear();
	matchesMask.resize(filteredMatches.size(), 1);

	if (!H12.empty())
	{
		Mat points1t;
		perspectiveTransform(Mat(points1), points1t, H12);
		for (size_t i1 = 0; i1 < points1.size(); i1++)
		{
			// If the distance between A's transformed point A1 and its corresponding point B is too large, 
			// then the correspondence pair (A,B) is unreasonable.
			if (norm(points2[i1] - points1t.at<Point2f>((int)i1, 0)) > g_thresholdRansacReproj)
				matchesMask[i1] = 0; // mask 0 for unreasonable pair
		}
	}
}
コード例 #3
0
void pkmDetector::update()
{
	if (bSetImageSearch && bSetImageTemplate) 
	{
		if (!bInitialized) 
		{
			
			
			// do matching between the template and image search
			// without tracking previous features since none initialized
			filteredMatches.clear();
			switch( matcherFilterType )
			{
				case CROSS_CHECK_FILTER :
					crossCheckMatching( descriptorMatcher, img_template_descriptors, img_search_descriptors, filteredMatches, 1 );
					break;
				default :
					simpleMatching( descriptorMatcher, img_template_descriptors, img_search_descriptors, filteredMatches );
			}
			
			// reindex based on found matches
			vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
			for( size_t i = 0; i < filteredMatches.size(); i++ )
			{
				queryIdxs[i] = filteredMatches[i].queryIdx;
				trainIdxs[i] = filteredMatches[i].trainIdx;
			}
			
			// build homograhpy w/ ransac
			vector<cv::Point2f> points1; cv::KeyPoint::convert(img_template_keypoints, points1, queryIdxs);
			vector<cv::Point2f> points2; cv::KeyPoint::convert(img_search_keypoints, points2, trainIdxs);
			H12 = findHomography( cv::Mat(points1), cv::Mat(points2), CV_RANSAC, ransacReprojThreshold );
			
			// create a mask of the current inliers based on transform distance
			vector<char> matchesMask( filteredMatches.size(), 0 );
			
			printf("Matched %d features.\n", filteredMatches.size());
			
			// convert previous image points to current image points via homography 
			// although this is a transformation from image to world coordinates
			// it should estimate the current image points
			cv::Mat points1t; perspectiveTransform(cv::Mat(points1), points1t, H12);
			for( size_t i1 = 0; i1 < points1.size(); i1++ )
			{
				if( norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) < 4 ) // inlier
				{
					matchesMask[i1] = 1;
					img_search_points_inliers[1].push_back(points2[i1]);
				}
				else {
					img_search_points_outliers[1].push_back(points2[i1]);
				}
			}
			
			
			// update bounding box
			cv::Mat bb;
			perspectiveTransform(cv::Mat(img_template_boundingbox), bb, H12);
			for( int i = 0; i < 4; i++ )
			{
				dst_corners[i] = bb.at<cv::Point2f>(i,0);
				//img_template_boundingbox[i] = bb.at<cv::Point2f>(i,0);
			}
			
			/*
			// draw inliers
			drawMatches( img_search, img_template_keypoints, 
						 img_template, img_search_keypoints, 
						filteredMatches, drawImg, 
						CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
	#if DRAW_RICH_KEYPOINTS_MODE
						, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
	#endif
						);
			
	#if DRAW_OUTLIERS_MODE
			// draw outliers
			for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
				matchesMask[i1] = !matchesMask[i1];
			drawMatches( img_template, img_template_keypoints, 
						img_search, img_search_keypoints, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask,
						DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
			
	#endif
			
			imshow( winName, drawImg );
			*/
			
		}
		else {
			// track features from previous frame into the current frame and see which 
			// features are inliers, and which are outliers.  among the features that
			// are outliers, see if any were marked as inliers in the previous frame and
			// remark then as outliers
			
			// mark decsriptors on new features marked as outliers once the number of 
			// inliers drops to a certain threshold and perform matching on the template.
			
			// patch based seems powerful as well.  creating a manifold or detecting planes 
			// in the set of inliers and tracking them as a whole may be more powerful.
			//<#statements#>
		}
		
		//std::swap(current_template_points, previous_template_points);
		std::swap(img_search_points_inliers[1], img_search_points_inliers[0]);
		std::swap(img_search_points_outliers[1], img_search_points_outliers[0]);
		swap(prev_img_search, img_search);
	} // end bSetImageSearch && bSetImageTemplate
	
}
コード例 #4
0
    void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }