コード例 #1
0
  virtual void run(InputArrayOfArrays _points2d)
  {
    std::vector<Mat> points2d;
    _points2d.getMatVector(points2d);
    CV_Assert( _points2d.total() >= 2 );

    // Parse 2d points to Tracks
    Tracks tracks;
    parser_2D_tracks(points2d, tracks);

    // Set libmv logs level
    libmv_initLogging("");

    if (libmv_reconstruction_options_.verbosity_level >= 0)
    {
      libmv_startDebugLogging();
      libmv_setLoggingVerbosity(
        libmv_reconstruction_options_.verbosity_level);
    }

    // Perform reconstruction
    libmv_reconstruction_ =
      *libmv_solveReconstruction(tracks,
                                 &libmv_camera_intrinsics_options_,
                                 &libmv_reconstruction_options_);
  }
コード例 #2
0
libmv_Reconstruction *libmv_solveReconstructionImpl(
  const std::vector<std::string> &images,
  const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
  libmv_ReconstructionOptions* libmv_reconstruction_options)
{
  Ptr<Feature2D> edetector = ORB::create(10000);
  Ptr<Feature2D> edescriber = xfeatures2d::DAISY::create();
  //Ptr<Feature2D> edescriber = xfeatures2d::LATCH::create(64, true, 4);

  cout << "Initialize nViewMatcher ... ";
  libmv::correspondence::nRobustViewMatching nViewMatcher(edetector, edescriber);

  cout << "OK" << endl << "Performing Cross Matching ... ";
  nViewMatcher.computeCrossMatch(images); cout << "OK" << endl;

  // Building tracks
  libmv::Tracks tracks;
  libmv::Matches matches = nViewMatcher.getMatches();
  parser_2D_tracks( matches, tracks );

  // Perform reconstruction
  return libmv_solveReconstruction(tracks,
                                   libmv_camera_intrinsics_options,
                                   libmv_reconstruction_options);
}
コード例 #3
0
ファイル: ofApp.cpp プロジェクト: dotchang/myOcvSfm
//--------------------------------------------------------------
void ofApp::setup(){
	camera_pov = false;

	// Read input parameters
	
	// Read 2D points from text file
	tracks_filename = "data/desktop_tracks.txt";
	f = 1914.0;
	cx = 640;
	cy = 360;
	std::vector<cv::Mat> points2d;
	parser_2D_tracks(tracks_filename, points2d);

	// Set the camera calibration matrix
	cv::Matx33d K = cv::Matx33d(f, 0, cx,
								0, f, cy,
								0, 0, 1);

	/// Reconstruct the scene using the 2d correspondences

	is_projective = true;
	cv::sfm::reconstruct(points2d, Rs_est, ts_est, K, points3d_estimated, is_projective);

	// Print output

	cout << "\n----------------------------\n" << endl;
	cout << "Reconstruction: " << endl;
	cout << "============================" << endl;
	cout << "Estimated 3D points: " << points3d_estimated.size() << endl;
	cout << "Estimated cameras: " << Rs_est.size() << endl;
	cout << "Refined intrinsics: " << endl << K << endl << endl;

	cout << "3D Visualization: " << endl;
	cout << "============================" << endl;

	/// Create 3D windows
	ofSetWindowTitle("Estimation Coordinate Frame");
	ofSetBackgroundColor(ofColor::black);

	// Create the pointcloud
	cout << "Recovering points  ... ";

	// recover estimated points3d
	for (int i = 0; i < points3d_estimated.size(); ++i)
		point_cloud_est.push_back(cv::Vec3f(points3d_estimated[i]));

	cout << "[DONE]" << endl;

	/// Recovering cameras
	cout << "Recovering cameras ... ";

	for (size_t i = 0; i < Rs_est.size(); ++i)
		path_est.push_back(cv::Affine3d(Rs_est[i], ts_est[i]));

	cout << "[DONE]" << endl;

	/// Add cameras
	cout << "Rendering Trajectory  ... ";

	/// Wait for key 'q' to close the window
	cout << endl << "Press:                       " << endl;
	cout << " 's' to switch the camera pov" << endl;
	cout << " 'q' to close the windows    " << endl;

	if (path_est.size() > 0)
	{
		// animated trajectory
		idx = 0;
		forw = -1;
		n = static_cast<int>(path_est.size());
	}

	cam.setNearClip(0.001f);
	cam.setDistance(5.0f);
}