예제 #1
0
파일: main.cpp 프로젝트: bzobl/sfm
bool process_sfm(IFeatureDetect *fdetect, IPoseEstimation *poseestim, ITriangulation *triang,
                 Camera const &cam,
                 Frame *frame_last, Frame *frame_now, std::vector<Frame *> const &frames,
                 PointCloud &global_points, SfMTimes &times,
                 bool initial_pose_estimated, bool export_kp, bool show_matches, bool show_points)
{
    double const DISTANCE_THRESHOLD = 10;

    StopWatch fdsw;
    fdetect->findFeatures(frame_now);
    ViewCombination view = fdetect->matchFrames(frame_last, frame_now);

    if (export_kp) {
        export_keypoints(view);
    }
    if (show_matches) {
        test_show_features_colored(view, view.getMatchesSize(), true);
    }

    double average_distance = 0.0;
    for (size_t i = 0; i < view.getMatchesSize(); i++) {
        double distance = ITriangulation::norm(view.getMatchingPointLeft(i),
                                               view.getMatchingPointRight(i));
        average_distance += distance;
    }
    average_distance /= view.getMatchesSize();
    fdsw.stop();
    std::cout << "average_distance between " << view.getMatchesSize()
              << " matches: " << average_distance << std::endl;

    if (average_distance < DISTANCE_THRESHOLD) {
        std::cerr << "camera has not moved" << std::endl;
        return false;
    }

    StopWatch pesw;
    if (!initial_pose_estimated) {
        std::cout << "Estimating initial pose" << std::endl;
        if (!initial_pose_estimation(cam, view, poseestim, show_points)) {
            std::cerr << "Pose estimation failed" << std::endl;
            return false;
        }
    } else {
        std::vector<cv::Point2f> points_2d;
        std::vector<cv::Point3f> points_3d;

        std::cout << "finding 2D 3D correspondences" << std::endl;
        if (!find_2D_3D_correspndences(frame_now, frames, global_points,
                                       fdetect, points_2d, points_3d)) {
            std::cerr << "could not find enough correspondences" << std::endl;
            return false;
        }

        std::cout << "Starting PnP" << std::endl;
        if (!poseestim->estimatePose(cam, *frame_now, points_3d, points_2d)) {
            std::cerr << "PnP failed" << std::endl;
            return false;
        }
    }
    pesw.stop();

    std::cout << "Starting triangulation of " << view.getMatchesSize() << " keypoints" << std::endl;
    StopWatch trsw;
    if (!triang->triangulate(cam, view, global_points)) {
        std::cerr << "Triangulation failed" << std::endl;
        return false;
    }
    trsw.stop();
    /*
    OpticalFlowFeatures of;
    std::cout << "starting of" << std::endl;
    ViewCombination vof = of.matchFrames(frame_last, frame_now);
    std::cout << "matches:  " << vof.getMatchesSize() << std::endl
              << "kp left:  " << frame_last->getKeypointsSize() << std::endl
              << "kp right: " << frame_now->getKeypointsSize() << std::endl;
    std::cout << "starting triangulating of" << std::endl;
    triang->triangulate(cam, vof,
                        view.getImageLeft()->getProjectionMatrix(),
                        view.getImageRight()->getProjectionMatrix(),
                        global_points);
    //of_pts.RunVisualization();
    */
    /*
    OpticalFlowFeatures of;
    std::cout << "starting of" << std::endl;
    Frame f1(img_last);
    Frame f2(img_now);
    ViewCombination vof = of.matchFrames(&f1, &f2);
    triang->triangulate(cam, vof,
                        frame_last->getProjectionMatrix(),
                        frame_now->getProjectionMatrix(),
                        global_points);
                        */

    if (show_points) {
        global_points.RunVisualization("Global points in sfm loop");
    }

    times.feature_detection = fdsw.getDurationMs();
    times.pose_estimation = pesw.getDurationMs();
    times.triangulation = trsw.getDurationMs();
    return true;
}
예제 #2
0
파일: main.cpp 프로젝트: bzobl/sfm
bool initial_pose_estimation(Camera const &cam, ViewCombination &view, IPoseEstimation *poseestim,
                             bool show_points, bool debug = false)
{
    cv::Mat P_0, P_1;
    PointCloud pts;

    if (!poseestim->estimatePose(cam, view, P_0, P_1, pts)) {
        return false;
    }

    if (debug) {
        std::cout << "Initial Pose estimation successful" << std::endl;
        std::cout << "P_0" << std::endl << P_0 << std::endl;
        std::cout << "P_1" << std::endl << P_1 << std::endl;
        std::cout << "Triangulated " << pts.size() << " 3d points" << std::endl;
    }

    if (show_points) {
        pts.RunVisualization("Initial pose estimation");
    }

    std::vector<std::pair<size_t, double>> minimalErrors;
    for (size_t i = 0; i < pts.size(); i++) {
        minimalErrors.emplace_back(i, pts.getError(i));
    }
    std::sort(minimalErrors.begin(), minimalErrors.end(),
              [](std::pair<size_t, double> const &lhs, std::pair<size_t, double> const &rhs)
    {
        return lhs.second < rhs.second;
    });

    std::vector<cv::Point2f> img_points_left;
    std::vector<cv::Point2f> img_points_right;
    std::vector<cv::Point3f> points_3d;
    for (size_t i = 0;
            (i < minimalErrors.size()) && (minimalErrors[i].second <= 50.0);
            i++) {
        size_t idx = minimalErrors[i].first;

        if (pts.getPoint(idx)(3) <= 0) continue;

        img_points_left.emplace_back(view.getMatchingPointLeft(idx));
        img_points_right.emplace_back(view.getMatchingPointRight(idx));
        cv::Vec4d const &pt = pts.getPoint(idx);
        points_3d.emplace_back(pt(0), pt(1), pt(2));
    }

    if (debug) {
        std::cout << "found " << points_3d.size() << " points elegible for PnP" << std::endl;
    }

    if (points_3d.size() <= 6) {
        return false;
    }

    try {
        if (debug) {
            std::cout << "PnP of left view" << std::endl;
        }
        poseestim->estimatePose(cam, *view.getImageLeft(), points_3d, img_points_left);

        if (debug) {
            std::cout << "PnP of right view" << std::endl;
        }
        poseestim->estimatePose(cam, *view.getImageRight(), points_3d, img_points_right);
    } catch (std::exception e) {
        std::cerr << "PnP after initial pose estimation failed: " << e.what();
        return false;
    }

    return true;
}