TEST(Resection_Kernel, Multiview) {

  const int views_num = 3;
  const int points_num = 10;
  const NViewDataSet d = NRealisticCamerasRing(views_num, points_num,
    NViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K

  const int nResectionCameraIndex = 2;

  // Solve the problem and check that fitted value are good enough
  {
    Mat x = d.projected_points_[nResectionCameraIndex];
    Mat X = d.point_3d_;
    mvg::multiview::resection::PoseResectionKernel kernel(x, X);

    size_t samples_[6]={0,1,2,3,4,5};
	std::vector<size_t> samples(samples_, samples_ + 6);
	std::vector<Mat34> Ps;
    kernel.Fit(samples, &Ps);
    for (size_t i = 0; i < x.cols(); ++i) {
      EXPECT_NEAR(0.0, kernel.Error(i, Ps[0]), 1e-8);
    }

	EXPECT_EQ(1, Ps.size());

    // Check that Projection matrix is near to the GT :
    Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
                                / d.P(nResectionCameraIndex).norm();
    Mat34 COMPUTED_ProjectionMatrix = Ps[0].array() / Ps[0].norm();
    EXPECT_MATRIX_NEAR(GT_ProjectionMatrix, COMPUTED_ProjectionMatrix, 1e-8);
  }
}
TEST(Resection_L_Infinity, Robust_OneOutlier) {

  const int nViews = 3;
  const int nbPoints = 20;
  const NViewDataSet d = NRealisticCamerasRing(nViews, nbPoints,
    nViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K

  d.ExportToPLY("test_Before_Infinity.ply");
  //-- Modify a dataset (set to 0 and parse new value) (Assert good values)
  NViewDataSet d2 = d;

  const int nResectionCameraIndex = 2;
  //-- Set to 0 the future computed data to be sure of computation results :
  d2._R[nResectionCameraIndex] = Mat3::Zero();
  d2._t[nResectionCameraIndex] = Vec3::Zero();

  // Set 20% of the 3D point as outlier
  const int nbOutlier = nbPoints*0.2;
  for (int i=0; i < nbOutlier; ++i)
  {
    d2._X.col(i)(0) += 120.0;
    d2._X.col(i)(1) += -60.0;
    d2._X.col(i)(2) += 80.0;
  }

  // Solve the problem and check that fitted value are good enough
  {
    typedef  lInfinityCV::kernel::l1PoseResectionKernel KernelType;
    const Mat & pt2D = d2._x[nResectionCameraIndex];
    const Mat & pt3D = d2._X;
    KernelType kernel(pt2D, pt3D);
    ScorerEvaluator<KernelType> scorer(Square(0.1)); //Highly intolerant for the test
    Mat34 P = MaxConsensus(kernel, scorer, NULL, 128);

    // Check that Projection matrix is near to the GT :
    Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
      / d.P(nResectionCameraIndex).norm();
    Mat34 COMPUTED_ProjectionMatrix = P.array() / P.norm();

    // Extract K[R|t]
    Mat3 R,K;
    Vec3 t;
    KRt_From_P(P, &K, &R, &t);

    d2._R[nResectionCameraIndex] = R;
    d2._t[nResectionCameraIndex] = t;

    //CHeck matrix to GT, and residual
    EXPECT_NEAR( 0.0, FrobeniusDistance(GT_ProjectionMatrix, COMPUTED_ProjectionMatrix), 1e-3 );
    Mat pt4D = VStack(pt3D, Mat(Vec::Ones(pt3D.cols()).transpose()));
    EXPECT_NEAR( 0.0, RootMeanSquareError(pt2D, pt4D, COMPUTED_ProjectionMatrix), 1e-1);
  }
  d2.ExportToPLY("test_After_Infinity.ply");
}
Ejemplo n.º 3
0
TEST(Triangulation, TriangulateDLT) {

  const NViewDataSet d = NRealisticCamerasRing(2, 12);

  for (int i = 0; i < d._X.cols(); ++i) {
    const Vec3 X_gt = d._X.col(i);
    Vec3 X_estimated;
    const Vec2 x1 = d._x[0].col(i);
    const Vec2 x2 = d._x[1].col(i);
    TriangulateDLT(d.P(0), x1.homogeneous(), d.P(1), x2.homogeneous(), &X_estimated);
    EXPECT_NEAR(0, DistanceLInfinity(X_estimated, X_gt), 1e-8);
  }
}
TEST(translation_averaging, globalTi_from_tijs_softl1_Ceres) {

  const int focal = 1000;
  const int principal_Point = 500;
  //-- Setup a circular camera rig or "cardiod".
  const int iNviews = 12;
  const int iNbPoints = 6;

  const bool bCardiod = true;
  const bool bRelative_Translation_PerTriplet = false;
  std::vector<openMVG::relativeInfo > vec_relative_estimates;

  const NViewDataSet d =
    Setup_RelativeTranslations_AndNviewDataset
    (
      vec_relative_estimates,
      focal, principal_Point, iNviews, iNbPoints,
      bCardiod, bRelative_Translation_PerTriplet
    );

  d.ExportToPLY("global_translations_from_Tij_GT.ply");
  visibleCamPosToSVGSurface(d._C, "global_translations_from_Tij_GT.svg");

  // Solve the translation averaging problem:
  std::vector<Vec3> vec_translations;
  EXPECT_TRUE(solve_translations_problem_softl1(
    vec_relative_estimates, bRelative_Translation_PerTriplet, iNviews, vec_translations));

  EXPECT_EQ(iNviews, vec_translations.size());

  // Check accuracy of the found translations
  for (unsigned i = 0; i < iNviews; ++i)
  {
    const Vec3 t = vec_translations[i];
    const Mat3 & Ri = d._R[i];
    const Vec3 C_computed = - Ri.transpose() * t;

    const Vec3 C_GT = d._C[i] - d._C[0];

    //-- Check that found camera position is equal to GT value
    if (i==0)  {
      EXPECT_MATRIX_NEAR(C_computed, C_GT, 1e-6);
    }
    else  {
     EXPECT_NEAR(0.0, DistanceLInfinity(C_computed.normalized(), C_GT.normalized()), 1e-6);
    }
  }
}
Ejemplo n.º 5
0
NViewDataSet NRealisticCamerasCardioid(size_t nviews, size_t npoints,
                                        const nViewDatasetConfigurator config)
{
  //-- Setup a camera circle rig.
  NViewDataSet d;
  d._n = nviews;
  d._K.resize(nviews);
  d._R.resize(nviews);
  d._t.resize(nviews);
  d.C.resize(nviews);
  d._x.resize(nviews);
  d._x_ids.resize(nviews);

  d.X.resize(3, npoints);
  d.X.setRandom();
  d.X *= 0.6;

  Vecu all_point_ids(npoints);
  for (size_t j = 0; j < npoints; ++j)
    all_point_ids[j] = j;

  for (size_t i = 0; i < nviews; ++i) {
    Vec3 camera_center, t, jitter, lookdir;

    const double theta = i * 2 * M_PI / nviews;
    //-- Cardioid
    camera_center <<
      2*sin(theta)-(sin(2*theta)),
      0.0,
      2*cos(theta)-(cos(2*theta)); // Y axis UP
    camera_center *= config._dist;
    d.C[i] = camera_center;

    jitter.setRandom();
    jitter *= config._jitter_amount / camera_center.norm();
    lookdir = -camera_center + jitter;

    d._K[i] << config._fx,           0, config._cx,
      0,  config._fy, config._cy,
      0,           0,          1;
    d._R[i] = LookAt(lookdir);  // Y axis UP
    d._t[i] = -d._R[i] * camera_center; // [t]=[-RC] Cf HZ.
    d._x[i] = Project(d.P(i), d.X);
    d._x_ids[i] = all_point_ids;
  }
  return d;
}
Ejemplo n.º 6
0
		NViewDataSet NRealisticCamerasCardioid(size_t nviews, size_t npoints,
			const NViewDatasetConfigurator config)
		{
			// 设置相机参数
			NViewDataSet d;
			d.actual_camera_num_ = nviews;
			d.camera_matrix_.resize(nviews);
			d.rotation_matrix_.resize(nviews);
			d.translation_vector_.resize(nviews);
			d.camera_center_.resize(nviews);
			d.projected_points_.resize(nviews);
			d.projected_point_ids_.resize(nviews);

			d.point_3d_.resize(3, npoints);
			d.point_3d_.setRandom();
			d.point_3d_ *= 0.6;

			Vecu all_point_ids(npoints);
			for (size_t j = 0; j < npoints; ++j)
				all_point_ids[j] = j;

			for (size_t i = 0; i < nviews; ++i) {
				Vec3 camera_center, t, jitter, lookdir;

				const double theta = i * 2 * M_PI / nviews;
				// 心形方程式,确定中点
				camera_center <<
					2 * sin(theta) - (sin(2 * theta)),
					0.0,
					2 * cos(theta) - (cos(2 * theta)); // Y axis UP
				camera_center *= config._dist;
				d.camera_center_[i] = camera_center;

				jitter.setRandom();
				jitter *= config._jitter_amount / camera_center.norm();
				lookdir = -camera_center + jitter;

				d.camera_matrix_[i] << config._fx, 0, config._cx,
					0, config._fy, config._cy,
					0, 0, 1;
				d.rotation_matrix_[i] = LookAt(lookdir);  // Y axis UP
				d.translation_vector_[i] = -d.rotation_matrix_[i] * camera_center; // [t]=[-RC] Cf HZ.
				d.projected_points_[i] = Project(d.P(i), d.point_3d_);
				d.projected_point_ids_[i] = all_point_ids;
			}
			return d;
		}
TEST(Resection_L_Infinity, Robust_OutlierFree) {

  const int nViews = 3;
  const int nbPoints = 10;
  const NViewDataSet d = NRealisticCamerasRing(nViews, nbPoints,
    nViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K

  //-- Modify a dataset (set to 0 and parse new value) (Assert good values)
  NViewDataSet d2 = d;

  const int nResectionCameraIndex = 2;
  //-- Set to 0 the future computed data to be sure of computation results :
  d2._R[nResectionCameraIndex] = Mat3::Zero();
  d2._t[nResectionCameraIndex] = Vec3::Zero();

  // Solve the problem and check that fitted value are good enough
  {
    typedef  lInfinityCV::kernel::l1PoseResectionKernel KernelType;
    const Mat & pt2D = d2._x[nResectionCameraIndex];
    const Mat & pt3D = d2._X;
    KernelType kernel(pt2D, pt3D);
    ScorerEvaluator<KernelType> scorer(2*Square(0.6));
    Mat34 P = MaxConsensus(kernel, scorer, NULL, 128);

    // Check that Projection matrix is near to the GT :
    Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
                                / d.P(nResectionCameraIndex).norm();
    Mat34 COMPUTED_ProjectionMatrix = P.array() / P.norm();

    // Extract K[R|t]
    Mat3 R,K;
    Vec3 t;
    KRt_From_P(P, &K, &R, &t);

    d2._R[nResectionCameraIndex] = R;
    d2._t[nResectionCameraIndex] = t;

    //CHeck matrix to GT, and residual
    EXPECT_NEAR( 0.0, FrobeniusDistance(GT_ProjectionMatrix, COMPUTED_ProjectionMatrix), 1e-2 );
    Mat pt4D = VStack(pt3D, Mat(Vec::Ones(pt3D.cols()).transpose()));
    EXPECT_NEAR( 0.0, RootMeanSquareError(pt2D, pt4D, COMPUTED_ProjectionMatrix), 1e-2);
  }
}
TEST(P3P_Kneip_CVPR11, Multiview) {

  const int views_num = 3;
  const int points_num = 3;
  const NViewDataSet d = NRealisticCamerasRing(views_num, points_num,
    NViewDatasetConfigurator(1,1,0,0,5,0)); // Suppose a camera with Unit matrix as K

  const int nResectionCameraIndex = 2;

  // Solve the problem and check that fitted value are good enough
  {
    Mat x = d.projected_points_[nResectionCameraIndex];
    Mat X = d.point_3d_;
    mvg::multiview::P3P_ResectionKernel_K kernel(x, X, d.camera_matrix_[0]);

    size_t samples_[3]={0,1,2};
    std::vector<size_t> samples(samples_, samples_+3);
	std::vector<Mat34> Ps;
    kernel.Fit(samples, &Ps);

    bool bFound = false;
    char index = -1;
    for (size_t i = 0; i < Ps.size(); ++i)  {
      Mat34 GT_ProjectionMatrix = d.P(nResectionCameraIndex).array()
                                / d.P(nResectionCameraIndex).norm();
      Mat34 COMPUTED_ProjectionMatrix = Ps[i].array() / Ps[i].norm();
      if ( NormLInfinity(GT_ProjectionMatrix - COMPUTED_ProjectionMatrix) < 1e-8 )
      {
        bFound = true;
        index = i;
      }
    }
    EXPECT_TRUE(bFound);

    // Check that for the found matrix residual is small
    for (size_t i = 0; i < x.cols(); ++i) {
      EXPECT_NEAR(0.0, kernel.Error(i,Ps[index]), 1e-8);
    }
  }
}
TEST(translation_averaging, globalTi_from_tijs_Triplets_l2_chordal) {

  const int focal = 1000;
  const int principal_Point = 500;
  //-- Setup a circular camera rig or "cardiod".
  const int iNviews = 12;
  const int iNbPoints = 6;

  const bool bCardiod = true;
  const bool bRelative_Translation_PerTriplet = true;
  std::vector<openMVG::relativeInfo > vec_relative_estimates;

  const NViewDataSet d =
    Setup_RelativeTranslations_AndNviewDataset
    (
      vec_relative_estimates,
      focal, principal_Point, iNviews, iNbPoints,
      bCardiod, bRelative_Translation_PerTriplet
    );

  d.ExportToPLY("global_translations_from_Tij_GT.ply");
  visibleCamPosToSVGSurface(d._C, "global_translations_from_Tij_GT.svg");

  //-- Compute the global translations from the triplets of heading directions
  //-   with the L2 minimization of a Chordal distance
  std::vector<int> vec_edges;
  vec_edges.reserve(vec_relative_estimates.size() * 2);
  std::vector<double> vec_poses;
  vec_poses.reserve(vec_relative_estimates.size() * 3);
  std::vector<double> vec_weights;
  vec_weights.reserve(vec_relative_estimates.size());

  for(int i=0; i < vec_relative_estimates.size(); ++i)
  {
    const openMVG::relativeInfo & rel = vec_relative_estimates[i];
    vec_edges.push_back(rel.first.first);
    vec_edges.push_back(rel.first.second);

    const Vec3 EdgeDirection = -(d._R[rel.first.second].transpose() * rel.second.second.normalized());

    vec_poses.push_back(EdgeDirection(0));
    vec_poses.push_back(EdgeDirection(1));
    vec_poses.push_back(EdgeDirection(2));

    vec_weights.push_back(1.0);
  }

  const double function_tolerance=1e-7, parameter_tolerance=1e-8;
  const int max_iterations = 500;

  const double loss_width = 0.0;

  std::vector<double> X(iNviews*3);

  EXPECT_TRUE(
    solve_translations_problem_l2_chordal(
      &vec_edges[0],
      &vec_poses[0],
      &vec_weights[0],
      vec_relative_estimates.size(),
      loss_width,
      &X[0],
      function_tolerance,
      parameter_tolerance,
      max_iterations));

  // Get back the camera translations in the global frame:
  for (size_t i = 0; i < iNviews; ++i)
  {
    if (i==0) {  //First camera supposed to be at Identity
      const Vec3 C0(X[0], X[1], X[2]);
      EXPECT_NEAR(0.0, DistanceLInfinity(C0, Vec3(0,0,0)), 1e-6);
    }
    else  {
      const Vec3 t_GT = (d._C[i] - d._C[0]);

      const Vec3 CI(X[i*3], X[i*3+1], X[i*3+2]);
      const Vec3 C0(X[0], X[1], X[2]);
      const Vec3 t_computed = CI - C0;

      //-- Check that vector are colinear
      EXPECT_NEAR(0.0, DistanceLInfinity(t_computed.normalized(), t_GT.normalized()), 1e-6);
    }
  }
}
TEST(translation_averaging, globalTi_from_tijs_Triplets_ECCV14) {

  int focal = 1000;
  int principal_Point = 500;
  //-- Setup a circular camera rig or "cardioid".
  const int iNviews = 12;
  const int iNbPoints = 6;
  NViewDataSet d =
    //NRealisticCamerasRing(
    NRealisticCamerasCardioid(
    iNviews, iNbPoints,
    nViewDatasetConfigurator(focal,focal,principal_Point,principal_Point,5,0));

  d.ExportToPLY("global_translations_from_triplets_GT.ply");

  visibleCamPosToSVGSurface(d._C, "global_translations_from_triplets_GT.svg");

  // List successive triplets of the large loop of camera
  std::vector< graph::Triplet > vec_triplets;
  for (size_t i = 0; i < iNviews; ++i)
  {
    const size_t iPlus1 = modifiedMod(i+1,iNviews);
    const size_t iPlus2 = modifiedMod(i+2,iNviews);
    //-- sort the triplet index to have a monotonic ascending series of value
    size_t triplet[3] = {i, iPlus1, iPlus2};
    std::sort(&triplet[0], &triplet[3]);
    vec_triplets.push_back(Triplet(triplet[0],triplet[1],triplet[2]));
  }

  //- For each triplet compute relative translations and rotations motions
  std::vector<openMVG::relativeInfo > vec_initialEstimates;

  for (size_t i = 0; i < vec_triplets.size(); ++i)
  {
    const graph::Triplet & triplet = vec_triplets[i];
    size_t I = triplet.i, J = triplet.j , K = triplet.k;

    //-- Build camera alias over GT translations and rotations:
    const Mat3 & RI = d._R[I];
    const Mat3 & RJ = d._R[J];
    const Mat3 & RK = d._R[K];
    const Vec3 & ti = d._t[I];
    const Vec3 & tj = d._t[J];
    const Vec3 & tk = d._t[K];

    //-- Build relatives motions (that feeds the Linear program formulation)
    {
      Mat3 RijGt;
      Vec3 tij;
      RelativeCameraMotion(RI, ti, RJ, tj, &RijGt, &tij);
      vec_initialEstimates.push_back(
        std::make_pair(std::make_pair(I, J), std::make_pair(RijGt, tij)));

      Mat3 RjkGt;
      Vec3 tjk;
      RelativeCameraMotion(RJ, tj, RK, tk, &RjkGt, &tjk);
      vec_initialEstimates.push_back(
        std::make_pair(std::make_pair(J, K), std::make_pair(RjkGt, tjk)));

      Mat3 RikGt;
      Vec3 tik;
      RelativeCameraMotion(RI, ti, RK, tk, &RikGt, &tik);
      vec_initialEstimates.push_back(
        std::make_pair(std::make_pair(I, K), std::make_pair(RikGt, tik)));
    }
  }

  //-- Compute the global translations from the triplets of heading directions
  //-   with the Kyle method
  std::vector<int> vec_edges;
  vec_edges.reserve(vec_initialEstimates.size() * 2);
  std::vector<double> vec_poses;
  vec_poses.reserve(vec_initialEstimates.size() * 3);
  std::vector<double> vec_weights;
  vec_weights.reserve(vec_initialEstimates.size());

  for(int i=0; i < vec_initialEstimates.size(); ++i)
  {
    const openMVG::relativeInfo & rel = vec_initialEstimates[i];
    vec_edges.push_back(rel.first.first);
    vec_edges.push_back(rel.first.second);

    const Vec3 EdgeDirection = -(d._R[rel.first.second].transpose() * rel.second.second.normalized());

    vec_poses.push_back(EdgeDirection(0));
    vec_poses.push_back(EdgeDirection(1));
    vec_poses.push_back(EdgeDirection(2));

    vec_weights.push_back(1.0);
  }

  const double function_tolerance=1e-7, parameter_tolerance=1e-8;
  const int max_iterations = 500;

  const double loss_width = 0.0;

  std::vector<double> X(iNviews*3);

  EXPECT_TRUE(
    solve_translations_problem(
      &vec_edges[0],
      &vec_poses[0],
      &vec_weights[0],
      vec_initialEstimates.size(),
      loss_width,
      &X[0],
      function_tolerance,
      parameter_tolerance,
      max_iterations));

  // Get back the camera translations in the global frame:
  for (size_t i = 0; i < iNviews; ++i)
  {
    if (i==0) {  //First camera supposed to be at Identity
      const Vec3 C0(X[0], X[1], X[2]);
      EXPECT_NEAR(0.0, DistanceLInfinity(C0, Vec3(0,0,0)), 1e-6);
    }
    else  {
      const Vec3 t_GT = (d._C[i] - d._C[0]);

      const Vec3 CI(X[i*3], X[i*3+1], X[i*3+2]);
      const Vec3 C0(X[0], X[1], X[2]);
      const Vec3 t_computed = CI - C0;

      //-- Check that vector are colinear
      EXPECT_NEAR(0.0, DistanceLInfinity(t_computed.normalized(), t_GT.normalized()), 1e-6);
    }
  }
}
Ejemplo n.º 11
0
TEST(Translation_Structure_L_Infinity_Noisy, Outlier_OSICLP_SOLVER) {

  const int nViews = 5;
  const int nbPoints = 5;
  const double focalValue = 1000;
  const double cx = 500, cy = 500;

  const NViewDataSet d =
    //NRealisticCamerasRing(nViews, nbPoints,
    NRealisticCamerasCardioid(nViews, nbPoints,
    nViewDatasetConfigurator(focalValue,focalValue,cx,cy,5,0));

  d.ExportToPLY("test_Before_Infinity.ply");
  //-- Test triangulation of all the point
  NViewDataSet d2 = d;

  //-- Set to 0 the future computed data to be sure of computation results :
  d2._X.fill(0); //Set _Xi of dataset 2 to 0 to be sure of new data computation
  fill(d2._t.begin(), d2._t.end(), Vec3(0.0,0.0,0.0));

  {
    // Prepare Rotation matrix (premultiplied by K)
    // Apply the K matrix to the rotation
    Mat3 K;
    K << focalValue, 0, cx,
      0, focalValue, cy,
      0, 0, 1;
    std::vector<Mat3> vec_KRotation(nViews);
    for (size_t i = 0; i < nViews; ++i) {
      vec_KRotation[i] = K * d._R[i];
    }

    //set two point as outlier
    d2._x[0].col(0)(0) +=10; //Camera 0, measurement 0, noise on X coord
    d2._x[3].col(3)(1) -=8;  //Camera 3, measurement 3, noise on Y coord

    //Create the mega matrix
    Mat megaMat(4, d._n*d._x[0].cols());
    {
      int cpt = 0;
      for (size_t i=0; i<d._n;++i)
      {
        const int camIndex = i;
        for (int j=0; j<d._x[0].cols(); ++j)
        {
          megaMat(0,cpt) = d2._x[camIndex].col(j)(0);
          megaMat(1,cpt) = d2._x[camIndex].col(j)(1);
          megaMat(2,cpt) = j;
          megaMat(3,cpt) = camIndex;
          cpt++;
        }
      }
    }

    double admissibleResidual = 1.0/focalValue;
    std::vector<double> vec_solution((nViews + nbPoints + megaMat.cols())*3);
    OSI_CLP_SolverWrapper wrapperOSICLPSolver(vec_solution.size());
    TiXi_withNoise_L1_ConstraintBuilder cstBuilder( vec_KRotation, megaMat);
    std::cout << std::endl << "Bisection returns : " << std::endl;
    std::cout<< BisectionLP<TiXi_withNoise_L1_ConstraintBuilder, LP_Constraints_Sparse>(
            wrapperOSICLPSolver,
            cstBuilder,
            &vec_solution,
            admissibleResidual,
            0.0, 1e-8);

    std::cout << "Found solution:\n";
    std::copy(vec_solution.begin(), vec_solution.end(), std::ostream_iterator<double>(std::cout, " "));

    //-- First the ti and after the Xi :

    //-- Fill the ti
    for (int i=0; i < nViews; ++i)  {
      d2._t[i] = K.inverse() * Vec3(vec_solution[3*i], vec_solution[3*i+1], vec_solution[3*i+2]);
      // Change Ci to -Ri*Ci
      d2._C[i] = -d2._R[i].inverse() * d2._t[i];
    }

    for (int i=0; i < nbPoints; ++i)  {
      size_t index = 3*nViews;
      d2._X.col(i) = Vec3(vec_solution[index+i*3], vec_solution[index+i*3+1], vec_solution[index+i*3+2]);
    }

    // Compute residuals L2 from estimated parameter values :
    std::cout << std::endl << "Residual : " << std::endl;
    Vec2 xk;
    double xsum = 0.0;
    for (size_t i = 0; i < d2._n; ++i) {
        std::cout << "\nCamera : " << i << " \t:";
        for(int k = 0; k < d._x[0].cols(); ++k)
        {
          xk = Project(d2.P(i),  Vec3(d2._X.col(k)));
          double residual = (xk - d2._x[i].col(k)).norm();
          std::cout << Vec2(( xk - d2._x[i].col(k)).array().pow(2)).array().sqrt().mean() <<"\t";
          //-- Check that were measurement are not noisy the residual is small
          //  check were the measurement are noisy, the residual is important
          //if ((i != 0 && k != 0) || (i!=3 && k !=3))
          if ((i != 0 && k != 0) && (i!=3 && k !=3)) {
            EXPECT_NEAR(0.0, residual, 1e-5);
            xsum += residual;
          }
        }
        std::cout << std::endl;
    }
    double dResidual = xsum / (d2._n*d._x[0].cols());
    std::cout << std::endl << "Residual mean in not noisy measurement: " << dResidual << std::endl;
    // Check that 2D re-projection and 3D point are near to GT.
    EXPECT_NEAR(0.0, dResidual, 1e-1);
  }

  d2.ExportToPLY("test_After_Infinity.ply");
}
TEST(translation_averaging, globalTi_from_tijs_Triplets) {

  int focal = 1000;
  int principal_Point = 500;
  //-- Setup a circular camera rig or "cardiod".
  const int iNviews = 12;
  const int iNbPoints = 6;
  NViewDataSet d =
    //NRealisticCamerasRing(
      NRealisticCamerasCardioid(
      iNviews, iNbPoints,
      nViewDatasetConfigurator(focal,focal,principal_Point,principal_Point,5,0));

  d.ExportToPLY("global_translations_from_triplets_GT.ply");

  visibleCamPosToSVGSurface(d._C, "global_translations_from_triplets_GT.svg");

  // List sucessives triplets of the large loop of camera
  std::vector< graphUtils::Triplet > vec_triplets;
  for (size_t i = 0; i < iNviews; ++i)
  {
    const size_t iPlus1 = modifiedMod(i+1,iNviews);
    const size_t iPlus2 = modifiedMod(i+2,iNviews);
    //-- sort the triplet index to have a monotic ascending series of value
    size_t triplet[3] = {i, iPlus1, iPlus2};
    std::sort(&triplet[0], &triplet[3]);
    vec_triplets.push_back(Triplet(triplet[0],triplet[1],triplet[2]));
  }

  //- For each triplet compute relative translations and rotations motions
  std::vector<openMVG::lInfinityCV::relativeInfo > vec_initialEstimates;

  for (size_t i = 0; i < vec_triplets.size(); ++i)
  {
    const graphUtils::Triplet & triplet = vec_triplets[i];
    size_t I = triplet.i, J = triplet.j , K = triplet.k;

    //-- Build camera alias over GT translations and rotations:
    const Mat3 & RI = d._R[I];
    const Mat3 & RJ = d._R[J];
    const Mat3 & RK = d._R[K];
    const Vec3 & ti = d._t[I];
    const Vec3 & tj = d._t[J];
    const Vec3 & tk = d._t[K];

    //-- Build relatives motions (that feeds the Linear program formulation)
    {
      Mat3 RijGt;
      Vec3 tij;
      RelativeCameraMotion(RI, ti, RJ, tj, &RijGt, &tij);
      vec_initialEstimates.push_back(
        std::make_pair(std::make_pair(I, J), std::make_pair(RijGt, tij)));

      Mat3 RjkGt;
      Vec3 tjk;
      RelativeCameraMotion(RJ, tj, RK, tk, &RjkGt, &tjk);
      vec_initialEstimates.push_back(
        std::make_pair(std::make_pair(J, K), std::make_pair(RjkGt, tjk)));

      Mat3 RikGt;
      Vec3 tik;
      RelativeCameraMotion(RI, ti, RK, tk, &RikGt, &tik);
      vec_initialEstimates.push_back(
        std::make_pair(std::make_pair(I, K), std::make_pair(RikGt, tik)));
    }
  }

  //-- Compute the global translations from the triplets of heading directions
  //-   with the L_infinity optimization

  std::vector<double> vec_solution(iNviews*3 + vec_initialEstimates.size()/3 + 1);
  double gamma = -1.0;

  //- a. Setup the LP solver,
  //- b. Setup the constraints generator (for the dedicated L_inf problem),
  //- c. Build constraints and solve the problem,
  //- d. Get back the estimated parameters.

  //- a. Setup the LP solver,
  OSI_CLP_SolverWrapper solverLP(vec_solution.size());

  //- b. Setup the constraints generator (for the dedicated L_inf problem),
  Tifromtij_ConstraintBuilder_OneLambdaPerTrif cstBuilder(vec_initialEstimates);

  //- c. Build constraints and solve the problem (Setup constraints and solver)
  LP_Constraints_Sparse constraint;
  cstBuilder.Build(constraint);
  solverLP.setup(constraint);
  //-- Solving
  EXPECT_TRUE(solverLP.solve()); // the linear program must have a solution

  //- d. Get back the estimated parameters.
  solverLP.getSolution(vec_solution);
  gamma = vec_solution[vec_solution.size()-1];

  //--
  //-- Unit test checking about the found solution
  //--
  EXPECT_NEAR(0.0, gamma, 1e-6); // Gamma must be 0, no noise, perfect data have been sent

  std::cout << "Found solution with gamma = " << gamma << std::endl;

  //-- Get back computed camera translations
  std::vector<double> vec_camTranslation(iNviews*3,0);
  std::copy(&vec_solution[0], &vec_solution[iNviews*3], &vec_camTranslation[0]);

  //-- Get back computed lambda factors
  std::vector<double> vec_camRelLambdas(&vec_solution[iNviews*3], &vec_solution[iNviews*3 + vec_initialEstimates.size()/3]);
  // lambda factors must be equal to 1.0 (no compression, no dilation);
  EXPECT_NEAR(vec_initialEstimates.size()/3, std::accumulate (vec_camRelLambdas.begin(), vec_camRelLambdas.end(), 0.0), 1e-6);

  // True camera C
  std::cout << std::endl << "Camera centers (Ground truth): " << std::endl;
  for (size_t i = 0; i < iNviews; ++i)
  {
    std::cout << i << ": " << d._C[i].transpose() - d._C[0].transpose() << std::endl;
  }

  // Get back the camera translations in the global frame:
  std::cout << std::endl << "Camera centers (Computed): " << std::endl;
  for (size_t i = 0; i < iNviews; ++i)
  {
    const Vec3 C_GT = d._C[i].transpose() - d._C[0].transpose(); //First camera supposed to be at Identity

    Vec3 t(vec_camTranslation[i*3], vec_camTranslation[i*3+1], vec_camTranslation[i*3+2]);
    const Mat3 & Ri = d._R[i];
    const Vec3 C_computed = - Ri.transpose() * t;

    //-- Check that found camera position is equal to GT value
    EXPECT_NEAR(0.0, DistanceLInfinity(C_computed, C_GT), 1e-6);

    std::cout << i << ": " << C_computed.transpose() << std::endl;
  }
}
TEST(Translation_Structure_L_Infinity, OSICLP_SOLVER) {

  const size_t kViewNum = 3;
  const size_t kPointsNum = 6;
  const NViewDataSet d = NRealisticCamerasRing(kViewNum, kPointsNum,
    NViewDatasetConfigurator(1,1,0,0,5,0)); // 假设相机内参为单位阵

  d.ExportToPLY("test_Before_Infinity.ply");
  // 对所有点进行三角化测试
  NViewDataSet d2 = d;

  //-- Set to 0 the future computed data to be sure of computation results :
  d2.point_3d_.fill(0); //Set _Xi of dataset 2 to 0 to be sure of new data computation
  std::fill(d2.translation_vector_.begin(), d2.translation_vector_.end(), Vec3(0.0,0.0,0.0));

  //Create the mega matrix
  Mat megaMat(4, d.actual_camera_num_*d.projected_points_[0].cols());
  {
    size_t cpt = 0;
    for (size_t i=0; i<d.actual_camera_num_;++i)
    {
      const size_t camera_index = i;
      for (size_t j=0; j < d.projected_points_[0].cols(); ++j)
      {
        megaMat(0,cpt) = d.projected_points_[camera_index].col(j)(0);
        megaMat(1,cpt) = d.projected_points_[camera_index].col(j)(1);
        megaMat(2,cpt) = j;
        megaMat(3,cpt) = camera_index;
        cpt++;
      }
    }
  }

  // Solve the problem and check that fitted value are good enough
  {
    std::vector<double> vec_solution((kViewNum + kPointsNum)*3);

    OSI_CLP_SolverWrapper wrapper_osiclp_solver(vec_solution.size());
    Translation_Structure_L1_ConstraintBuilder constraint_builder( d.rotation_matrix_, megaMat);
    EXPECT_TRUE(
      (BisectionLinearProgramming<Translation_Structure_L1_ConstraintBuilder, LP_Constraints_Sparse>(
      wrapper_osiclp_solver,
      constraint_builder,
      &vec_solution,
      1.0,
      0.0))
    );

    // Move computed value to dataset for residual estimation.
    {
      //-- fill the ti
      for (size_t i=0; i < kViewNum; ++i)
      {
        size_t index = i*3;
        d2.translation_vector_[i] = Vec3(vec_solution[index], vec_solution[index+1], vec_solution[index+2]);
        // Change Ci to -Ri*Ci
        d2.camera_center_[i] = -d2.rotation_matrix_[i] * d2.translation_vector_[i];
      }

      //-- Now the Xi :
      for (size_t i=0; i < kPointsNum; ++i) {
        size_t index = 3*kViewNum;
        d2.point_3d_.col(i) = Vec3(vec_solution[index+i*3], vec_solution[index+i*3+1], vec_solution[index+i*3+2]);
      }
    }

    // Compute residuals L2 from estimated parameter values :
    Vec2 xk, xsum(0.0,0.0);
    for (size_t i = 0; i < d2.actual_camera_num_; ++i) {
      for(size_t k = 0; k < d.projected_points_[0].cols(); ++k)
      {
        xk = Project(d2.P(i), Vec3(d2.point_3d_.col(k)));
        xsum += Vec2(( xk - d2.projected_points_[i].col(k)).array().pow(2));
      }
    }
    double dResidual2D = (xsum.array().sqrt().sum());

    // Check that 2D re-projection and 3D point are near to GT.
    EXPECT_NEAR(0.0, dResidual2D, 1e-4);
  }

  d2.ExportToPLY("test_After_Infinity.ply");
}