예제 #1
0
파일: emvs.hpp 프로젝트: avanindra/emvs
 void SetRotation( const Matx33f &R )
 {
   m_R = R;
   // cache
   m_inv_R = R.t();
   m_C = -m_inv_R * m_t;
 }
예제 #2
0
Matx33f CalculateEssentialMatrix(Matx33f f, Matx33f K1, Matx33f K2)
{
    Matx33f E;
    
    E = K2.t()*f*K1;
    
    return (Matx33f)E;
}
void SfMBundleAdjustmentUtils::adjustBundle(
        PointCloud&                  pointCloud,
        std::vector<Pose>&           cameraPoses,
        Intrinsics&                  intrinsics,
        const std::vector<Features>& image2dFeatures) {

    std::call_once(initLoggingFlag, initLogging);

    // Create residuals for each observation in the bundle adjustment problem. The
    // parameters for cameras and points are added automatically.
    ceres::Problem problem;

    //Convert camera pose parameters from [R|t] (3x4) to [Angle-Axis (3), Translation (3), focal (1)] (1x7)
    typedef cv::Matx<double, 1, 6> CameraVector;
    vector<CameraVector> cameraPoses6d;
    cameraPoses6d.reserve(cameraPoses.size());
    for (size_t i = 0; i < cameraPoses.size(); i++) {
        const Pose& pose = cameraPoses[i];

        if (pose(0, 0) == 0 and pose(1, 1) == 0 and pose(2, 2) == 0) {
            //This camera pose is empty, it should not be used in the optimization
            cameraPoses6d.push_back(CameraVector());
            continue;
        }
        Vec3f t(pose(0, 3), pose(1, 3), pose(2, 3));
        Matx33f R = pose.get_minor<3, 3>(0, 0);
        float angleAxis[3];
        ceres::RotationMatrixToAngleAxis<float>(R.t().val, angleAxis); //Ceres assumes col-major...

        cameraPoses6d.push_back(CameraVector(
                angleAxis[0],
                angleAxis[1],
                angleAxis[2],
                t(0),
                t(1),
                t(2)));
    }

    //focal-length factor for optimization
    double focal = intrinsics.K.at<float>(0, 0);

    vector<cv::Vec3d> points3d(pointCloud.size());

    for (int i = 0; i < pointCloud.size(); i++) {
        const Point3DInMap& p = pointCloud[i];
        points3d[i] = cv::Vec3d(p.p.x, p.p.y, p.p.z);

        for (const auto& kv : p.originatingViews) {
            //kv.first  = camera index
            //kv.second = 2d feature index
            Point2f p2d = image2dFeatures[kv.first].points[kv.second];

            //subtract center of projection, since the optimizer doesn't know what it is
            p2d.x -= intrinsics.K.at<float>(0, 2);
            p2d.y -= intrinsics.K.at<float>(1, 2);

            // Each Residual block takes a point and a camera as input and outputs a 2
            // dimensional residual. Internally, the cost function stores the observed
            // image location and compares the reprojection against the observation.
            ceres::CostFunction* cost_function = SimpleReprojectionError::Create(p2d.x, p2d.y);

            problem.AddResidualBlock(cost_function,
                    NULL /* squared loss */,
                    cameraPoses6d[kv.first].val,
                    points3d[i].val,
					&focal);
        }
    }

    // Make Ceres automatically detect the bundle structure. Note that the
    // standard solver, SPARSE_NORMAL_CHOLESKY, also works fine but it is slower
    // for standard bundle adjustment problems.
    ceres::Solver::Options options;
    options.linear_solver_type = ceres::DENSE_SCHUR;
    options.minimizer_progress_to_stdout = true;
    options.max_num_iterations = 500;
    options.eta = 1e-2;
    options.max_solver_time_in_seconds = 10;
    options.logging_type = ceres::LoggingType::SILENT;
    ceres::Solver::Summary summary;
    ceres::Solve(options, &problem, &summary);
    std::cout << summary.BriefReport() << "\n";

    if (not (summary.termination_type == ceres::CONVERGENCE)) {
        cerr << "Bundle adjustment failed." << endl;
        return;
    }

    //update optimized focal
    intrinsics.K.at<float>(0, 0) = focal;
    intrinsics.K.at<float>(1, 1) = focal;

    //Implement the optimized camera poses and 3D points back into the reconstruction
    for (size_t i = 0; i < cameraPoses.size(); i++) {
    	Pose& pose = cameraPoses[i];
    	Pose poseBefore = pose;

        if (pose(0, 0) == 0 and pose(1, 1) == 0 and pose(2, 2) == 0) {
            //This camera pose is empty, it was not used in the optimization
            continue;
        }

        //Convert optimized Angle-Axis back to rotation matrix
        double rotationMat[9] = { 0 };
        ceres::AngleAxisToRotationMatrix(cameraPoses6d[i].val, rotationMat);

        for (int r = 0; r < 3; r++) {
            for (int c = 0; c < 3; c++) {
                pose(c, r) = rotationMat[r * 3 + c]; //`rotationMat` is col-major...
            }
        }

        //Translation
        pose(0, 3) = cameraPoses6d[i](3);
        pose(1, 3) = cameraPoses6d[i](4);
        pose(2, 3) = cameraPoses6d[i](5);
    }

    for (int i = 0; i < pointCloud.size(); i++) {
        pointCloud[i].p.x = points3d[i](0);
        pointCloud[i].p.y = points3d[i](1);
        pointCloud[i].p.z = points3d[i](2);
    }
}