Exemplo n.º 1
0
bool findCorrectPose(Mat_<double> E, vector<KeyPoint> key1, vector<KeyPoint> key2, Mat_<double> K,Mat_<double> Kinv,Matx34d& out,vector<Point3d>& cloudOut) {
    Matx34d Peye(1.0, 0, 0, 0, 0, 1.0, 0, 0, 0, 0, 1.0, 0);
    //decompose E to P' , HZ (9.19)
    cv::SVD svd(E, SVD::MODIFY_A);
    Mat svd_u = svd.u;
    Mat svd_vt = svd.vt;
    Mat svd_w = svd.w;
    Matx33d W(0, -1, 0,	//HZ 9.13
              1, 0, 0, 0, 0, 1);
    /////////////////////////////////////////////////////

    //newly added must check if correct
    Matx33d Z(0, 1, 0,	//HZ 9.13
              -1, 0, 0, 0, 0, 0);
    Mat_<double> S = svd_u * Mat(Z) * svd_u.t();
    ///////////////////////////////////
    Mat_<double> R = svd_u * Mat(W) * svd_vt; //HZ 9.19
    Mat_<double> R1 = svd_u * Mat(W).t() * svd_vt; //HZ 9.19
    ///////////////////newly added according to mit implementation////////
    if(determinant(R)<0)
        R = -R;
    if(determinant(R1)<0)
        R1 = -R1;
    /////////////////////////////////////
    Mat_<double> t = svd_u.col(2); // u3
    Mat_<double> tneg = -1 * svd_u.col(2); // -u3
    cv::SVD rSVD(R);
    R = rSVD.u * Mat::eye(3, 3, CV_64F) * rSVD.vt;
    double myscale = trace(rSVD.w)[0] / 3;
    t = t / myscale;
    cv::SVD r1SVD(R1);
    R1 = r1SVD.u * Mat::eye(3, 3, CV_64F) * r1SVD.vt;
    double myscale1 = trace(r1SVD.w)[0] / 3;
    tneg = tneg / myscale1;
    if (!CheckCoherentRotation(R) || !CheckCoherentRotation(R1)) {
        out = 0;
        return false;
    }
    //save the correct pointclouds and Relativepose
    Matx34d TempPose,poseHolder;
    vector<Point3d> pointCloud,pointCloudMax;

    createPoseFromRotationTranslation(R, t, out);
    double max = -1,temp = 0.0;
    max = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
    pointCloudMax = pointCloud;
    poseHolder = Matx34d(out);


    pointCloud.clear();
    out = TempPose;
    createPoseFromRotationTranslation(R, tneg, out);
    temp = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
    if(temp>max) {
        pointCloudMax = pointCloud;
        poseHolder = Matx34d(out);
        max= temp;
    }

    pointCloud.clear();
    out = TempPose;
    createPoseFromRotationTranslation(R1, t, out);
    temp = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
    if(temp>max) {
        pointCloudMax = pointCloud;
        poseHolder = Matx34d(out);
        max= temp;
    }

    pointCloud.clear();
    out = TempPose;
    createPoseFromRotationTranslation(R1, tneg, out);
    temp = getPointCloudAndPointsInFront(Peye, out, key1, key2, K, K.inv(), pointCloud);
    if(temp>max) {
        pointCloudMax = pointCloud;
        poseHolder = Matx34d(out);
        max= temp;
    }
    out = poseHolder;
    cloudOut = pointCloudMax;
    return true;
}
int main(int argc, char **argv) {
    try {
        ParseArgs(argc, argv);

        int num_cameras = static_cast<int>(imgs.size());
        if (num_cameras < 1)
            throw runtime_error("Need at least one camera");

        // Find features

        cout << "Finding features...\n";
        Ptr<detail::FeaturesFinder> features_finder = features_finder_creator->Create();

        for (int i = 0; i < num_cameras; ++i) {
            int64 t = getTickCount();
            cout << "Finding features in '" << img_names[i] << "'... ";

            Ptr<detail::ImageFeatures> features = new detail::ImageFeatures();
            (*features_finder)(imgs[i], *features);
            features_collection[i] = features;

            cout << "#features = " << features_collection.find(i)->second->keypoints.size()
                 << ", time = " << (getTickCount() - t) / getTickFrequency() << " sec\n";
        }

        // Match all pairs

        cout << "Matching pairs... ";
        MatchesCollection matches_collection;
        Ptr<detail::FeaturesMatcher> matcher = features_matcher_creator.Create();

        FeaturesCollection::iterator from_iter = features_collection.begin();
        FeaturesCollection::iterator from_next_iter = from_iter; ++from_next_iter;
        FeaturesCollection::iterator to_iter;

        for (; from_next_iter != features_collection.end(); from_iter = from_next_iter++) {
            for (to_iter = from_next_iter; to_iter != features_collection.end(); ++to_iter) {
                cout << "(" << from_iter->first << "->" << to_iter->first << ") ";
                detail::MatchesInfo mi;
                (*matcher)(*(from_iter->second), *(to_iter->second), mi);
                matches_collection[make_pair(from_iter->first, to_iter->first)]
                        = new vector<DMatch>(mi.matches);
            }
        }
        cout << endl;

        // Estimate homographies

        HomographiesP2 Hs;
        HomographiesP2 good_Hs;
        vector<Mat> Hs_from_0;
        RelativeConfidences rel_confs;
        Mat keypoints1, keypoints2;

        cout << "Estimating Hs...\n";
        for (int from = 0; from < num_cameras - 1; ++from) {
            for (int to = from + 1; to < num_cameras; ++to) {
                const vector<DMatch> &matches = *(matches_collection.find(make_pair(from, to))->second);

                cout << "Estimating H between '" << img_names[from] << "' and '" << img_names[to]
                     << "'... #matches = " << matches.size();

                if (static_cast<int>(matches.size()) < min_num_matches) {
                    cout << ", not enough matches\n";
                    continue;
                }

                ExtractMatchedKeypoints(*(features_collection.find(from)->second),
                                        *(features_collection.find(to)->second),
                                        matches, keypoints1, keypoints2);
                vector<uchar> inliers_mask;
                Mat_<double> H = findHomography(keypoints1.reshape(2), keypoints2.reshape(2),
                                                inliers_mask, RANSAC, H_est_thresh);

                if (H.empty()) {
                    cout << ", can't estimate H\n";
                    continue;
                }

                Ptr<vector<DMatch> > inliers = new vector<DMatch>();
                for (size_t i = 0; i < matches.size(); ++i)
                    if (inliers_mask[i])
                        inliers->push_back(matches[i]);
                cout << ", #inliers = " << inliers->size();

                double rms_err = 0;
                for (size_t i = 0; i < matches.size(); ++i) {
                    const Point2d &kp1 = keypoints1.at<Point2d>(0, i);
                    const Point2d &kp2 = keypoints2.at<Point2d>(0, i);
                    double x = H(0, 0) * kp1.x + H(0, 1) * kp1.y + H(0, 2);
                    double y = H(1, 0) * kp1.x + H(1, 1) * kp1.y + H(1, 2);
                    double z = H(2, 0) * kp1.x + H(2, 1) * kp1.y + H(2, 2);
                    x /= z; y /= z;
                    rms_err += (kp2.x - x) * (kp2.x - x) + (kp2.y - y) * (kp2.y - y);
                }
                rms_err = sqrt(rms_err / matches.size());
                cout << ", RMS err = " << rms_err;

                // See "Automatic Panoramic Image Stitching using Invariant Features"
                // by Matthew Brown and David G. Lowe, IJCV 2007 for the explanation
                double confidence = inliers->size() / (8 + 0.3 * matches.size()) - 1;

                rel_confs[make_pair(from, to)] = confidence;
                cout << ", conf = " << confidence;                

                cout << endl;                                

                Hs[make_pair(from, to)] = H;
                matches_collection[make_pair(from, to)] = inliers;

                if (confidence > 0)
                    good_Hs[make_pair(from, to)] = H;
                if (from == 0)
                    Hs_from_0.push_back(H);
            }
        }

        // Linear calibration

        if (K_init.empty()) {
            cout << "Linear calibrating...\n";
            if (lin_est_skew)
                K_init = CalibRotationalCameraLinear(good_Hs);
            else
                K_init = CalibRotationalCameraLinearNoSkew(good_Hs);
            cout << "K_init =\n" << K_init << endl;
        }

        // Non-linear refinement

        cout << "Refining camera...\n";

        if (Hs_from_0.size() != num_cameras - 1) {
            stringstream msg;
            msg << "Refinement requires Hs between first and all other images, "
                << "but only " << Hs_from_0.size() << " were/was found";
            throw runtime_error(msg.str());
        }

        map<int, Mat> Rs;
        Rs[0] = Mat::eye(3, 3, CV_64F);
        for (int i = 1; i < num_cameras; ++i)
            Rs[i] = K_init.inv() * Hs_from_0[i - 1] * K_init;

        Mat_<double> K_refined = K_init.clone();
        if (refine_skew)
            RefineRigidCamera(K_refined, Rs, features_collection, matches_collection);
        else {
            K_refined(0, 1) = 0;
            RefineRigidCamera(K_refined, Rs, features_collection, matches_collection,
                              REFINE_FLAG_K_ALL & ~REFINE_FLAG_K_SKEW);
        }
        cout << "K_refined =\n" << K_refined << endl;

        cout << "SUMMARY\n";
        cout << "K_init =\n" << K_init << endl;
        cout << "K_refined =\n" << K_refined << endl;
    }
    catch (const exception &e) {
        cout << "Error: " << e.what() << endl;
    }
    return 0;
}