Example #1
0
void calc_projective (const std::vector<double>& frame_ts,
                      const std::vector<Vec4>& gyro_quat,
                      const std::vector<Vec3>& acc_trans,
                      const std::vector<double>& gyro_ts,
                      CalibrationParams calib,
                      std::vector<Mat3>& projective)
{
    int index0 = 0;
    int index1 = 0;

    size_t frame_count = frame_ts.size();

    for (int fid = 0; fid < frame_count; fid++) {
        const double ts0 = frame_ts[fid] + calib.gyro_delay;
        Quatern quat0 = interp_gyro_quatern(ts0, gyro_quat, gyro_ts, index0) + Quatern(calib.gyro_drift);

        const double ts1 = frame_ts[fid + 1] + calib.gyro_delay;
        Quatern quat1 = interp_gyro_quatern(ts1, gyro_quat, gyro_ts, index1) + Quatern(calib.gyro_drift);

        Mat3 extr0 = calc_extrinsic(quat0);
        Mat3 extr1 = calc_extrinsic(quat1);

        Mat3 intrinsic = calc_intrinsic(calib.fx, calib.fy, calib.cx, calib.cy, calib.skew);

        Mat3 extrinsic0 = rotate_coordinate_system(AXIS_X, AXIS_MINUS_Z) * extr0  * mirror_coordinate_system(AXIS_Y);
        Mat3 extrinsic1 = rotate_coordinate_system(AXIS_X, AXIS_MINUS_Z) * extr1  * mirror_coordinate_system(AXIS_Y);

        projective[fid] = intrinsic * extrinsic0 * extrinsic1.transpose() * intrinsic.inverse();
    }
}
Example #2
0
  // Camera triangulation using the iterated linear method
  Vec3 Triangulation::compute(int iter) const
  {
    const int nviews = int(views.size());
    assert(nviews>=2);

    // Iterative weighted linear least squares
    Mat3 AtA;
    Vec3 Atb, X;
    Vec weights = Vec::Constant(nviews,1.0);
    for (int it=0;it<iter;++it)
    {
      AtA.fill(0.0);
      Atb.fill(0.0);
      for (int i=0;i<nviews;++i)
      {
        const Mat34& PMat = views[i].first;
        const Vec2 & p = views[i].second;
        const double w = weights[i];

        Vec3 v1, v2;
        for (int j=0;j<3;j++)
        {
          v1[j] = w * ( PMat(0,j) - p(0) * PMat(2,j) );
          v2[j] = w * ( PMat(1,j) - p(1) * PMat(2,j) );
          Atb[j] += w * ( v1[j] * ( p(0) * PMat(2,3) - PMat(0,3) )
                 + v2[j] * ( p(1) * PMat(2,3) - PMat(1,3) ) );
        }

        for (int k=0;k<3;k++)
        {
          for (int j=0;j<=k;j++)
          {
            const double v = v1[j] * v1[k] + v2[j] * v2[k];
            AtA(j,k) += v;
            if (j<k) AtA(k,j) += v;
          }
        }
      }

      X = AtA.inverse() * Atb;

      // Compute reprojection error, min and max depth, and update weights
      zmin = std::numeric_limits<double>::max();
      zmax = - std::numeric_limits<double>::max();
      err = 0.0;
      for (int i=0;i<nviews;++i)
      {
        const Mat34& PMat = views[i].first;
        const Vec2 & p = views[i].second;
        const Vec3 xProj = PMat * X.homogeneous();
        const double z = xProj(2);
        if (z < zmin) zmin = z;
        else if (z > zmax) zmax = z;
        err += (p - xProj.hnormalized()).norm(); // residual error
        weights[i] = 1.0 / z;
      }
    }
    return X;
  }
Example #3
0
 Pinhole_Intrinsic(
   unsigned int w = 0, unsigned int h = 0,
   double focal_length_pix = 0.0,
   double ppx = 0.0, double ppy = 0.0)
   :IntrinsicBase(w,h)
 {
   _K << focal_length_pix, 0., ppx, 0., focal_length_pix, ppy, 0., 0., 1.;
   _Kinv = _K.inverse();
 }
  ACKernelAdaptorResection_K(const Mat &x2d, const Mat &x3D, const Mat3 & K)
    : x2d_(x2d.rows(), x2d.cols()), x3D_(x3D),
    N1_(K.inverse()),
    logalpha0_(log10(M_PI)), K_(K)
  {
    assert(2 == x2d_.rows());
    assert(3 == x3D_.rows());
    assert(x2d_.cols() == x3D_.cols());

    // Normalize points by inverse(K)
    ApplyTransformationToPoints(x2d, N1_, &x2d_);
  }
Example #5
0
Mat3 calc_projective (double* frame_ts,
                      Vec4* gyro_quat,
                      Vec3* acc_trans,
                      double* gyro_ts,
                      CalibrationParams calib)
{
    double ts0 = frame_ts[0] + calib.gyro_delay;
    Quatern rot0 = Quatern(gyro_quat[0] + calib.gyro_drift);
    double ts1 = frame_ts[1] + calib.gyro_delay;
    Quatern rot1 = Quatern(gyro_quat[1] + calib.gyro_drift);

    Vec3 trans0 = acc_trans[0];
    Vec3 trans1 = acc_trans[1];

    Mat3 extr0 = calc_extrinsic(rot0);
    Mat3 extr1 = calc_extrinsic(rot1);
    Mat3 intrin = calc_intrinsic(calib.fx, calib.fy, calib.cx, calib.cy, calib.skew);

    return intrin * extr0 * extr1.transpose() * intrin.inverse();
}
// Generates all necessary inputs and expected outputs for EuclideanResection.
void CreateCameraSystem(const Mat3& KK,
                        const Mat3X& x_image,
                        const Vec& X_distances,
                        const Mat3& R_input,
                        const Vec3& T_input,
                        Mat2X *x_camera,
                        Mat3X *X_world,
                        Mat3  *R_expected,
                        Vec3  *T_expected) {
  int num_points = x_image.cols();

  Mat3X x_unit_cam(3, num_points);
  x_unit_cam = KK.inverse() * x_image;

  // Create normalized camera coordinates to be used as an input to the PnP
  // function, instead of using NormalizeColumnVectors(&x_unit_cam).
  *x_camera = x_unit_cam.block(0, 0, 2, num_points);
  for (int i = 0; i < num_points; ++i){
    x_unit_cam.col(i).normalize();
  }

  // Create the 3D points in the camera system.
  Mat X_camera(3, num_points);
  for (int i = 0; i < num_points; ++i) {
    X_camera.col(i) = X_distances(i) * x_unit_cam.col(i);
  }

  // Apply the transformation to the camera 3D points
  Mat translation_matrix(3, num_points);
  translation_matrix.row(0).setConstant(T_input(0));
  translation_matrix.row(1).setConstant(T_input(1));
  translation_matrix.row(2).setConstant(T_input(2));

  *X_world = R_input * X_camera + translation_matrix;

  // Create the expected result for comparison.
  *R_expected = R_input.transpose();
  *T_expected = *R_expected * ( - T_input);
};
Example #7
0
void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n) {
  Mat3X x_camera_h = K.inverse() * x;
  HomogeneousToEuclidean(x_camera_h, n);
}
Example #8
0
void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n) {
  Mat3X x_image_h;
  EuclideanToHomogeneous(x, &x_image_h);
  Mat3X x_camera_h = K.inverse() * x_image_h;
  HomogeneousToEuclidean(x_camera_h, n);
}
Example #9
0
TEST(Translation_Structure_L_Infinity_Noisy, Outlier_OSICLP_SOLVER) {

  const int nViews = 5;
  const int nbPoints = 5;
  const double focalValue = 1000;
  const double cx = 500, cy = 500;

  const NViewDataSet d =
    //NRealisticCamerasRing(nViews, nbPoints,
    NRealisticCamerasCardioid(nViews, nbPoints,
    nViewDatasetConfigurator(focalValue,focalValue,cx,cy,5,0));

  d.ExportToPLY("test_Before_Infinity.ply");
  //-- Test triangulation of all the point
  NViewDataSet d2 = d;

  //-- Set to 0 the future computed data to be sure of computation results :
  d2._X.fill(0); //Set _Xi of dataset 2 to 0 to be sure of new data computation
  fill(d2._t.begin(), d2._t.end(), Vec3(0.0,0.0,0.0));

  {
    // Prepare Rotation matrix (premultiplied by K)
    // Apply the K matrix to the rotation
    Mat3 K;
    K << focalValue, 0, cx,
      0, focalValue, cy,
      0, 0, 1;
    std::vector<Mat3> vec_KRotation(nViews);
    for (size_t i = 0; i < nViews; ++i) {
      vec_KRotation[i] = K * d._R[i];
    }

    //set two point as outlier
    d2._x[0].col(0)(0) +=10; //Camera 0, measurement 0, noise on X coord
    d2._x[3].col(3)(1) -=8;  //Camera 3, measurement 3, noise on Y coord

    //Create the mega matrix
    Mat megaMat(4, d._n*d._x[0].cols());
    {
      int cpt = 0;
      for (size_t i=0; i<d._n;++i)
      {
        const int camIndex = i;
        for (int j=0; j<d._x[0].cols(); ++j)
        {
          megaMat(0,cpt) = d2._x[camIndex].col(j)(0);
          megaMat(1,cpt) = d2._x[camIndex].col(j)(1);
          megaMat(2,cpt) = j;
          megaMat(3,cpt) = camIndex;
          cpt++;
        }
      }
    }

    double admissibleResidual = 1.0/focalValue;
    std::vector<double> vec_solution((nViews + nbPoints + megaMat.cols())*3);
    OSI_CLP_SolverWrapper wrapperOSICLPSolver(vec_solution.size());
    TiXi_withNoise_L1_ConstraintBuilder cstBuilder( vec_KRotation, megaMat);
    std::cout << std::endl << "Bisection returns : " << std::endl;
    std::cout<< BisectionLP<TiXi_withNoise_L1_ConstraintBuilder, LP_Constraints_Sparse>(
            wrapperOSICLPSolver,
            cstBuilder,
            &vec_solution,
            admissibleResidual,
            0.0, 1e-8);

    std::cout << "Found solution:\n";
    std::copy(vec_solution.begin(), vec_solution.end(), std::ostream_iterator<double>(std::cout, " "));

    //-- First the ti and after the Xi :

    //-- Fill the ti
    for (int i=0; i < nViews; ++i)  {
      d2._t[i] = K.inverse() * Vec3(vec_solution[3*i], vec_solution[3*i+1], vec_solution[3*i+2]);
      // Change Ci to -Ri*Ci
      d2._C[i] = -d2._R[i].inverse() * d2._t[i];
    }

    for (int i=0; i < nbPoints; ++i)  {
      size_t index = 3*nViews;
      d2._X.col(i) = Vec3(vec_solution[index+i*3], vec_solution[index+i*3+1], vec_solution[index+i*3+2]);
    }

    // Compute residuals L2 from estimated parameter values :
    std::cout << std::endl << "Residual : " << std::endl;
    Vec2 xk;
    double xsum = 0.0;
    for (size_t i = 0; i < d2._n; ++i) {
        std::cout << "\nCamera : " << i << " \t:";
        for(int k = 0; k < d._x[0].cols(); ++k)
        {
          xk = Project(d2.P(i),  Vec3(d2._X.col(k)));
          double residual = (xk - d2._x[i].col(k)).norm();
          std::cout << Vec2(( xk - d2._x[i].col(k)).array().pow(2)).array().sqrt().mean() <<"\t";
          //-- Check that were measurement are not noisy the residual is small
          //  check were the measurement are noisy, the residual is important
          //if ((i != 0 && k != 0) || (i!=3 && k !=3))
          if ((i != 0 && k != 0) && (i!=3 && k !=3)) {
            EXPECT_NEAR(0.0, residual, 1e-5);
            xsum += residual;
          }
        }
        std::cout << std::endl;
    }
    double dResidual = xsum / (d2._n*d._x[0].cols());
    std::cout << std::endl << "Residual mean in not noisy measurement: " << dResidual << std::endl;
    // Check that 2D re-projection and 3D point are near to GT.
    EXPECT_NEAR(0.0, dResidual, 1e-1);
  }

  d2.ExportToPLY("test_After_Infinity.ply");
}
Example #10
0
		// Denormalize the results. See HZ page 109.
		void UnnormalizerI::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H)  {
			*H = T2.inverse() * (*H) * T1;
		}