コード例 #1
0
void BR::Database::fixAndAdd(cv::Mat & image, const cv::vector<cv::Point2f> points, std::string title, std::string author, std::string isbn)
{
  cv::Mat output(480, 640, image.type());
  cv::vector<cv::Point2f> dst_points(4);
  dst_points[0] =  cv::Point2f(0,0);
  dst_points[1] =  cv::Point2f(480,0);
  dst_points[2] =  cv::Point2f(480,640);
  dst_points[3] =  cv::Point2f(0,640);
  cv::Mat transform = cv::getPerspectiveTransform(points, dst_points);
  cv::warpPerspective(image, output, transform, cv::Size(480, 640));
  addBook(new Book(isbn, title, author, output));
}
コード例 #2
0
ファイル: features_matcher.cpp プロジェクト: hrnr/m-explore
/* modified implementation of match from BestOf2NearestMatcher */
void AffineBestOf2NearestMatcher::match(
    const cv::detail::ImageFeatures &features1,
    const cv::detail::ImageFeatures &features2,
    cv::detail::MatchesInfo &matches_info)
{
    (*impl_)(features1, features2, matches_info);

    ROS_DEBUG("AffineMatcher: have %lu matches", matches_info.matches.size());

    // Check if it makes sense to find homography
    if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
        return;

    // Construct point-point correspondences for homography estimation
    // Points are centered s.t. image center is (0,0). This is similar to other
    // matchers a shows better results in practice (numerical stability?).
    cv::Mat src_points(1, static_cast<int>(matches_info.matches.size()),
                       CV_32FC2);
    cv::Mat dst_points(1, static_cast<int>(matches_info.matches.size()),
                       CV_32FC2);
    for (size_t i = 0; i < matches_info.matches.size(); ++i) {
        const cv::DMatch &m = matches_info.matches[i];

        cv::Point2f p = features1.keypoints[static_cast<size_t>(m.queryIdx)].pt;
        p.x -= features1.img_size.width * 0.5f;
        p.y -= features1.img_size.height * 0.5f;
        src_points.at<cv::Point2f>(0, static_cast<int>(i)) = p;

        p = features2.keypoints[static_cast<size_t>(m.trainIdx)].pt;
        p.x -= features2.img_size.width * 0.5f;
        p.y -= features2.img_size.height * 0.5f;
        dst_points.at<cv::Point2f>(0, static_cast<int>(i)) = p;
    }

    // Find pair-wise motion
    // this is my special modified version of estimateRigidTransform
    matches_info.H = estimateRigidTransform(src_points, dst_points,
                                            matches_info.inliers_mask, false);
    ROS_DEBUG_STREAM("estimate:\n" << matches_info.H);

    if (matches_info.H.empty()) {
        // could not find trasformation
        matches_info.confidence = 0;
        matches_info.num_inliers = 0;
        return;
    }

    // extend H to represent linear tranformation in homogeneous coordinates
    matches_info.H.push_back(cv::Mat::zeros(1, 3, CV_64F));
    matches_info.H.at<double>(2, 2) = 1;

    /* TODO: should we handle determinant ~ 0 (can it happen due to agressive
     * scaling?) */

    // Find number of inliers
    matches_info.num_inliers = 0;
    for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
        if (matches_info.inliers_mask[i])
            matches_info.num_inliers++;

    ROS_DEBUG("num_inliers %d", matches_info.num_inliers);

    // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic
    // Image Stitching using Invariant Features"
    matches_info.confidence =
        matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());

    /* we don't want any cuttof for merging maps. TODO: allow to set this in
     * constructor. */
    // Set zero confidence to remove matches between too close images, as they
    // don't provide additional information anyway. The threshold was set
    // experimentally.
    // matches_info.confidence =
    // matches_info.confidence > 3. ? 0. : matches_info.confidence;

    /* Unlike other matchers it makes no sense to rerun estimation on liers only.
     * estimateRigidTransform already did this for us. So we are done here. */
}
コード例 #3
0
ファイル: matchers.cpp プロジェクト: MasaMune692/alcexamples
void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
                                  MatchesInfo &matches_info)
{
    (*impl_)(features1, features2, matches_info);

    // Check if it makes sense to find homography
    if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
        return;

    // Construct point-point correspondences for homography estimation
    Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
    Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
    for (size_t i = 0; i < matches_info.matches.size(); ++i)
    {
        const DMatch& m = matches_info.matches[i];

        Point2f p = features1.keypoints[m.queryIdx].pt;
        p.x -= features1.img_size.width * 0.5f;
        p.y -= features1.img_size.height * 0.5f;
        src_points.at<Point2f>(0, static_cast<int>(i)) = p;

        p = features2.keypoints[m.trainIdx].pt;
        p.x -= features2.img_size.width * 0.5f;
        p.y -= features2.img_size.height * 0.5f;
        dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
    }

    // Find pair-wise motion
    matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, CV_RANSAC);
    if (std::abs(determinant(matches_info.H)) < numeric_limits<double>::epsilon())
        return;

    // Find number of inliers
    matches_info.num_inliers = 0;
    for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
        if (matches_info.inliers_mask[i])
            matches_info.num_inliers++;

    // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
    // using Invariant Features"
    matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());

    // Set zero confidence to remove matches between too close images, as they don't provide
    // additional information anyway. The threshold was set experimentally.
    matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;

    // Check if we should try to refine motion
    if (matches_info.num_inliers < num_matches_thresh2_)
        return;

    // Construct point-point correspondences for inliers only
    src_points.create(1, matches_info.num_inliers, CV_32FC2);
    dst_points.create(1, matches_info.num_inliers, CV_32FC2);
    int inlier_idx = 0;
    for (size_t i = 0; i < matches_info.matches.size(); ++i)
    {
        if (!matches_info.inliers_mask[i])
            continue;

        const DMatch& m = matches_info.matches[i];

        Point2f p = features1.keypoints[m.queryIdx].pt;
        p.x -= features1.img_size.width * 0.5f;
        p.y -= features1.img_size.height * 0.5f;
        src_points.at<Point2f>(0, inlier_idx) = p;

        p = features2.keypoints[m.trainIdx].pt;
        p.x -= features2.img_size.width * 0.5f;
        p.y -= features2.img_size.height * 0.5f;
        dst_points.at<Point2f>(0, inlier_idx) = p;

        inlier_idx++;
    }

    // Rerun motion estimation on inliers only
    matches_info.H = findHomography(src_points, dst_points, CV_RANSAC);
}