libmv_Reconstruction *libmv_solveReconstructionImpl(
  const std::vector<std::string> &images,
  const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
  libmv_ReconstructionOptions* libmv_reconstruction_options)
{
  Ptr<Feature2D> edetector = ORB::create(10000);
  Ptr<Feature2D> edescriber = xfeatures2d::DAISY::create();
  //Ptr<Feature2D> edescriber = xfeatures2d::LATCH::create(64, true, 4);

  cout << "Initialize nViewMatcher ... ";
  libmv::correspondence::nRobustViewMatching nViewMatcher(edetector, edescriber);

  cout << "OK" << endl << "Performing Cross Matching ... ";
  nViewMatcher.computeCrossMatch(images); cout << "OK" << endl;

  // Building tracks
  libmv::Tracks tracks;
  libmv::Matches matches = nViewMatcher.getMatches();
  parser_2D_tracks( matches, tracks );

  // Perform reconstruction
  return libmv_solveReconstruction(tracks,
                                   libmv_camera_intrinsics_options,
                                   libmv_reconstruction_options);
}
  virtual void run(InputArrayOfArrays _points2d)
  {
    std::vector<Mat> points2d;
    _points2d.getMatVector(points2d);
    CV_Assert( _points2d.total() >= 2 );

    // Parse 2d points to Tracks
    Tracks tracks;
    parser_2D_tracks(points2d, tracks);

    // Set libmv logs level
    libmv_initLogging("");

    if (libmv_reconstruction_options_.verbosity_level >= 0)
    {
      libmv_startDebugLogging();
      libmv_setLoggingVerbosity(
        libmv_reconstruction_options_.verbosity_level);
    }

    // Perform reconstruction
    libmv_reconstruction_ =
      *libmv_solveReconstruction(tracks,
                                 &libmv_camera_intrinsics_options_,
                                 &libmv_reconstruction_options_);
  }
/* Solve camera/object motion and reconstruct 3D markers position
 * from a prepared reconstruction context.
 *
 * stop is not actually used at this moment, so reconstruction
 * job could not be stopped.
 *
 * do_update, progress and stat_message are set by reconstruction
 * callback in libmv side and passing to an interface.
 */
void BKE_tracking_reconstruction_solve(MovieReconstructContext *context, short *stop, short *do_update,
                                       float *progress, char *stats_message, int message_size)
{
	float error;

	ReconstructProgressData progressdata;

	libmv_CameraIntrinsicsOptions camera_intrinsics_options;
	libmv_ReconstructionOptions reconstruction_options;

	progressdata.stop = stop;
	progressdata.do_update = do_update;
	progressdata.progress = progress;
	progressdata.stats_message = stats_message;
	progressdata.message_size = message_size;

	camraIntrincicsOptionsFromContext(&camera_intrinsics_options, context);
	reconstructionOptionsFromContext(&reconstruction_options, context);

	if (context->motion_flag & TRACKING_MOTION_MODAL) {
		context->reconstruction = libmv_solveModal(context->tracks,
		                                           &camera_intrinsics_options,
		                                           &reconstruction_options,
		                                           reconstruct_update_solve_cb, &progressdata);
	}
	else {
		context->reconstruction = libmv_solveReconstruction(context->tracks,
		                                                    &camera_intrinsics_options,
		                                                    &reconstruction_options,
		                                                    reconstruct_update_solve_cb, &progressdata);

		if (context->select_keyframes) {
			/* store actual keyframes used for reconstruction to update them in the interface later */
			context->keyframe1 = reconstruction_options.keyframe1;
			context->keyframe2 = reconstruction_options.keyframe2;
		}
	}

	error = libmv_reprojectionError(context->reconstruction);

	context->reprojection_error = error;
}