Esempio n. 1
0
FrameProcessor::FrameProcessor(std::string intrinsicsFile, std::string extrinsicsFile, Size frameSize){
	FileStorage intrinsics("device/camera_properties/intrinsics.yml", FileStorage::READ);
    FileStorage extrinsics("device/camera_properties/extrinsics.yml", FileStorage::READ);

    intrinsics["M1"] >> M1;
    intrinsics["M2"] >> M2;
    intrinsics["D1"] >> D1;
    intrinsics["D2"] >> D2;
    extrinsics["R1"] >> R1;
    extrinsics["R2"] >> R2;
    extrinsics["P1"] >> P1;
    extrinsics["P2"] >> P2;
    extrinsics["Q"] >> Q;

    intrinsics.release();
    extrinsics.release();

    initUndistortRectifyMap(M1, D1, R1, P1, frameSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(M2, D2, R2, P2, frameSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    // BlockMatcher.state->preFilterType = CV_STEREO_BM_XSOBEL; //CV_STEREO_BM_NORMALIZED_RESPONSE;
    // BlockMatcher.state->preFilterSize = 9;
     BlockMatcher.state->preFilterCap = 45;
     BlockMatcher.state->SADWindowSize = 31;
    // BlockMatcher.state->minDisparity = 0;
     BlockMatcher.state->numberOfDisparities =  128;
    // BlockMatcher.state->textureThreshold = 10;
    // BlockMatcher.state->uniquenessRatio = 15;
     //BlockMatcher.state->speckleRange = 60;
     //BlockMatcher.state->speckleWindowSize = 20;
    // BlockMatcher.state->trySmallerWindows = 0;
    // BlockMatcher.state->roi1 = BlockMatcher.state->roi2 = cvRect(160,120,480,360);
    // BlockMatcher.state->disp12MaxDiff = -1;
}
Esempio n. 2
0
int main(int argc, char **argv)
{
  if (argc != 2)
  {
    printf("usage: ./borg LASERFILE\n");
    return 1;
  }
  const char *laserfile = argv[1];

  FILE *f = fopen(laserfile,"r");
  if (!f)
  {
    printf("couldn't open [%s]\n", laserfile);
    return 1;
  }
  FILE *out = fopen("points.txt","w");
  int line = 0;
  const double encoder_offset = 240;
  while (!feof(f))
  {
    line++;
    double laser_ang, row, col;
    if (3 != fscanf(f, "%lf %lf %lf\n", &laser_ang, &row, &col))
    {
      printf("error parsing line %d\n", line);
      break;
    }
    if ((line % 1) != 0)
      continue;
    extrinsics(tilt, (encoder_offset - laser_ang)*M_PI/180,
               0, 135*M_PI/180,
               0.48, 0.02, 0.22);
    intrinsics(col, row);
    intersect();
    camera_to_world();
    fprintf(out, "%f %f %f\n", 
      world_point[0], 
      world_point[1], 
      world_point[2]);
  }

  fclose(f);
  fclose(out);

  return 0;
}
Status KeyframeVisualOdometry::InitializeSecondView(
    const std::vector<Feature>& features,
    const std::vector<Descriptor>& descriptors) {
  CHECK_EQ(features.size(), descriptors.size());

  // Try to match features and descriptors from the first image against those
  // from this image.
  View::Ptr first_view = View::GetView(current_keyframe_);
  CHECK_NOTNULL(first_view.get());

  std::vector<Feature> old_features;
  std::vector<Descriptor> old_descriptors;
  first_view->GetFeaturesAndDescriptors(&old_features, &old_descriptors);

  NaiveMatcher2D2D feature_matcher;
  feature_matcher.AddImageFeatures(old_features, old_descriptors);
  feature_matcher.AddImageFeatures(features, descriptors);

  // Use all matches for 2D to 2D feature matching (not just the best n).
  const bool kTempOption = options_.matcher_options.only_keep_best_matches;
  options_.matcher_options.only_keep_best_matches = false;

  PairwiseImageMatchList image_matches;
  if (!feature_matcher.MatchImages(options_.matcher_options, image_matches)) {
    options_.matcher_options.only_keep_best_matches = kTempOption;
    return Status::Cancelled("Failed to match first and second images.");
  }
  options_.matcher_options.only_keep_best_matches = kTempOption;

  CHECK_EQ(1, image_matches.size());
  PairwiseImageMatch image_match = image_matches[0];
  FeatureMatchList feature_matches = image_match.feature_matches_;

  // Check for sufficient matches.
  if (feature_matches.size() < 8) {
    return Status::Cancelled(
        "Not enough matched points to compute a fundamental matrix.");
  }

  // Get the fundamental matrix between the first two cameras using RANSAC.
  FundamentalMatrixRansacProblem f_problem;
  f_problem.SetData(feature_matches);

  Ransac<FeatureMatch, FundamentalMatrixRansacModel> f_solver;
  f_solver.SetOptions(options_.fundamental_matrix_ransac_options);
  f_solver.Run(f_problem);

  if (!f_problem.SolutionFound()) {
    return Status::Cancelled(
        "Failed to compute a fundamental matrix with RANSAC.");
  }
  Matrix3d F = f_problem.Model().F_;

  // Get the essential matrix between the first two cameras.
  EssentialMatrixSolver e_solver;
  Matrix3d E = e_solver.ComputeEssentialMatrix(F, intrinsics_, intrinsics_);

  // Extract a relative pose from the essential matrix.
  Pose relative_pose;
  if (!e_solver.ComputeExtrinsics(E, feature_matches, intrinsics_, intrinsics_,
                                  relative_pose)) {
    return Status::Cancelled("Failed to decompose essential matrix.");
  }

  // If we got to here, we can initialize a second view.
  CameraExtrinsics extrinsics(relative_pose);

  Camera camera2;
  camera2.SetExtrinsics(extrinsics);
  camera2.SetIntrinsics(intrinsics_);
  View::Ptr second_view = View::Create(camera2);
  second_view->CreateAndAddObservations(features, descriptors);

  // Initialize tracks for all observations in the second view.
  tracks_.clear();
  int triangulated_count = 0;
  for (auto& observation : second_view->Observations()) {
    Landmark::Ptr track = Landmark::Create();

    // If the same feature was seen in the first view, triangulate it.
    // TODO: This is pretty inefficient, even though we only do it once.
    bool found_matched_observation = false;
    for (const auto& feature_match : f_problem.Inliers()) {
      if (feature_match.feature2_ == observation->Feature()) {
        for (const auto& old_observation : first_view->Observations()) {
          if (feature_match.feature1_ == old_observation->Feature()) {
            track->IncorporateObservation(old_observation);
            found_matched_observation = true;
            break;
          }
        }
      }
      if (found_matched_observation) break;
    }

    // Now add the observation from the second view.
    if (track->IncorporateObservation(observation)) {
      if (found_matched_observation) {
        CHECK(track->IsEstimated());
        triangulated_count++;
      }
    }
    track->SetDescriptor(observation->Descriptor());
    tracks_.push_back(track->Index());
  }
  printf("triangulated %d\n", triangulated_count);

  // Annotate tracks and features in the second frame.
  if (options_.draw_features) {
    annotator_.AnnotateFeatures(features);
  }
  if (options_.draw_tracks) {
    annotator_.AnnotateTracks(tracks_);
  }
  if (options_.draw_tracks || options_.draw_features) {
    annotator_.Draw();
  }

  if (triangulated_count < options_.num_landmarks_to_initialize) {
    // Delete all landmarks and return false.
    Landmark::ResetLandmarks();
    tracks_.clear();
    return Status::Cancelled("Did not triangulate enough landmarks to initialize.");
  }

  // We successfully initialized! Store the new view.
  current_keyframe_ = second_view->Index();
  view_indices_.push_back(second_view->Index());
  return Status::Ok();
}
int main(int argc,char ** argv)
{
    QApplication app(argc, argv);

    osg::ref_ptr<PoLAR::Image<unsigned char> > myImage;
    osg::ref_ptr<PoLAR::VideoPlayer> camera;
    int width = 1024;
    int height = 768;
    int cameraNumber = 0; // the device number: /dev/videoX

    if(argc < 2)
    {
        std::cerr << "Syntax: " << argv[0] << " <3D model> [<texture>]" << std::endl;
        exit(0);
    }

    if(argc > 2)
    {
        // load static image. First check if the file exists
        if(PoLAR::Util::fileExists(argv[2]))
            myImage = new PoLAR::Image<unsigned char>(argv[2], true);
        else
        {
            std::cerr << "File " << argv[2] << " not found" << std::endl;
            exit(0);
        }
    }
    else
    {
        // create webcam manager
        camera = new PoLAR::VideoPlayer(cameraNumber);
        if(!camera.get())
        {
            std::cerr << "Camera initialisation failed" << std::endl;
            exit(0);
        }
        // create texture from webcam stream
        myImage = new PoLAR::Image<unsigned char>(camera.get());
    }

    // create viewer
    SphereViewer viewer(myImage.get(), camera.get());
    viewer.resize(width, height);

    // load 3D model
    osg::ref_ptr<osg::Node> loadedModel = osgDB::readNodeFile(argv[1]);
    if(!loadedModel)
    {
        std::cerr << "Unable to load model" << std::endl;
        exit(0);
    }
    osgUtil::SmoothingVisitor smooth;
    osgUtil::Optimizer optimizer;
    optimizer.optimize(loadedModel.get());
    loadedModel->accept(smooth);
    osg::ref_ptr<PoLAR::Object3D> object = new PoLAR::Object3D(loadedModel.get());
    object->setEditOn();
    // add texture to 3D model
    bool flipTexture;
#if defined(WIN32) || defined(_WIN32)
    flipTexture = false;
#else
    flipTexture = true;
#endif
    object->setObjectTextureState(myImage.get(), flipTexture);

    // add window/level support to texture
    osg::ref_ptr<osg::Program> program = new osg::Program;
    PoLAR::ShaderManager::instanciateWLShaderSources(program.get());
    PoLAR::ShaderManager::instanciateWLShaderUniforms(object->getOriginalNode()->getOrCreateStateSet(), myImage.get());
    object->getOriginalNode()->getOrCreateStateSet()->setAttributeAndModes(program, osg::StateAttribute::ON|osg::StateAttribute::OVERRIDE);

    // add 3D model to viewer
    viewer.addObject3D(object.get());

    // set default projection
    osg::Matrixd proj = viewer.createVisionProjection(width, height);
    viewer.setProjection(proj);
    // create custom projection
    osg::Matrixd extrinsics(1, 0, 0, 0,
                            0, 0, 1, 0,
                            0, -1, 0, 0,
                            0, 0, proj(3,2), 1);
    viewer.setPose(extrinsics);

    // show the iewer
    viewer.center();
    viewer.show();

    // launch webcam stream
    if(camera.valid()) camera->play();

    // start image edition mode
    viewer.startEditImageSlot();
    viewer.startWindowLevelSlot(myImage.get());

    app.connect( &app, SIGNAL(lastWindowClosed()), &app, SLOT(quit()) );
    return app.exec();
}