Ejemplo n.º 1
0
/// List the view indexes that have valid camera intrinsic and pose.
static std::set<IndexT> Get_Valid_Views
(
  const SfM_Data & sfm_data
)
{
  std::set<IndexT> valid_idx;
  for (Views::const_iterator it = sfm_data.getViews().begin();
    it != sfm_data.getViews().end(); ++it)
  {
    const View * v = it->second.get();
    const IndexT id_view = v->id_view;
    const IndexT id_intrinsic = v->id_intrinsic;
    const IndexT id_pose = v->id_pose;

    bool bDefined =
      id_intrinsic != UndefinedIndexT &&
      sfm_data.getIntrinsics().find(id_intrinsic) != sfm_data.getIntrinsics().end() &&
      id_pose != UndefinedIndexT &&
      sfm_data.getPoses().find(id_pose) != sfm_data.getPoses().end();

    if (bDefined)
    {
      valid_idx.insert(id_view);
    }
  }
  return valid_idx;
}
Ejemplo n.º 2
0
///Check that each pose have a valid intrinsic and pose id in the existing View ids
bool ValidIds(const SfM_Data & sfm_data, ESfM_Data flags_part)
{
  const bool bCheck_Intrinsic = (flags_part & INTRINSICS) == INTRINSICS;
  const bool bCheck_Extrinsic = (flags_part & EXTRINSICS) == EXTRINSICS;

  std::set<IndexT> set_id_intrinsics;
  transform(sfm_data.getIntrinsics().begin(), sfm_data.getIntrinsics().end(),
    std::inserter(set_id_intrinsics, set_id_intrinsics.begin()), std::RetrieveKey());

  std::set<IndexT> set_id_extrinsics; //unique so can use a set
  transform(sfm_data.getPoses().begin(), sfm_data.getPoses().end(),
    std::inserter(set_id_extrinsics, set_id_extrinsics.begin()), std::RetrieveKey());

  // Collect existing id_intrinsic && id_extrinsic from views
  std::set<IndexT> reallyDefined_id_intrinsics;
  std::set<IndexT> reallyDefined_id_extrinsics;
  for (Views::const_iterator iter = sfm_data.getViews().begin();
    iter != sfm_data.getViews().end();
    ++iter)
  {
    // If a pose is defined, at least the intrinsic must be valid,
    // In order to generate a valid camera.
    const IndexT id_pose = iter->second.get()->id_pose;
    const IndexT id_intrinsic = iter->second.get()->id_intrinsic;

    if (set_id_extrinsics.count(id_pose))
      reallyDefined_id_extrinsics.insert(id_pose); //at least it exists

    if (set_id_intrinsics.count(id_intrinsic))
      reallyDefined_id_intrinsics.insert(id_intrinsic); //at least it exists
  }
  // Check if defined intrinsic & extrinsic are at least connected to views
  bool bRet = true;
  if (bCheck_Intrinsic)
    bRet &= set_id_intrinsics.size() == reallyDefined_id_intrinsics.size();

  if (bCheck_Extrinsic)
    bRet &= set_id_extrinsics.size() == reallyDefined_id_extrinsics.size();

  if (bRet == false)
    std::cout << "There is orphan intrinsics data or poses (do not depend on any view)" << std::endl;

  return bRet;
}
Ejemplo n.º 3
0
 /// Triangulate a given track from a selection of observations
 Vec3 track_sample_triangulation(
   const SfM_Data & sfm_data,
   const Observations & obs,
   const std::set<IndexT> & samples) const
 {
   Triangulation trianObj;
   for (auto& it : samples)
   {
     const IndexT & idx = it;
     Observations::const_iterator itObs = obs.begin();
     std::advance(itObs, idx);
     const View * view = sfm_data.views.at(itObs->first).get();
     const IntrinsicBase * cam = sfm_data.getIntrinsics().at(view->id_intrinsic).get();
     const Pose3 & pose = sfm_data.poses.at(view->id_pose);
     trianObj.add(
       cam->get_projective_equivalent(pose),
       cam->get_ud_pixel(itObs->second.x));
   }
   return trianObj.compute();
 }
Ejemplo n.º 4
0
/// Compute the Root Mean Square Error of the residuals
double RMSE(const SfM_Data & sfm_data)
{
  // Compute residuals for each observation
  std::vector<double> vec;
  for(Landmarks::const_iterator iterTracks = sfm_data.getLandmarks().begin();
      iterTracks != sfm_data.getLandmarks().end();
      ++iterTracks)
  {
    const Observations & obs = iterTracks->second.obs;
    for(Observations::const_iterator itObs = obs.begin();
      itObs != obs.end(); ++itObs)
    {
      const View * view = sfm_data.getViews().find(itObs->first)->second.get();
      const Pose3 & pose = sfm_data.getPoses().find(view->id_pose)->second;
      const std::shared_ptr<IntrinsicBase> intrinsic = sfm_data.getIntrinsics().find(view->id_intrinsic)->second;
      const Vec2 residual = intrinsic->residual(pose, iterTracks->second.X, itObs->second.x);
      vec.push_back( residual(0) );
      vec.push_back( residual(1) );
    }
  }
  const Eigen::Map<Eigen::RowVectorXd> residuals(&vec[0], vec.size());
  const double RMSE = std::sqrt(residuals.squaredNorm() / vec.size());
  return RMSE;
}
Ejemplo n.º 5
0
  /// Use guided matching to find corresponding 2-view correspondences
  void match(
    const SfM_Data & sfm_data,
    const Pair_Set & pairs,
    const std::shared_ptr<Regions_Provider> & regions_provider)
  {
    C_Progress_display my_progress_bar( pairs.size(), std::cout,
      "Compute pairwise fundamental guided matching:\n" );
  #ifdef OPENMVG_USE_OPENMP
    #pragma omp parallel
  #endif // OPENMVG_USE_OPENMP
    for (Pair_Set::const_iterator it = pairs.begin(); it != pairs.end(); ++it)
    {
  #ifdef OPENMVG_USE_OPENMP
      #pragma omp single nowait
  #endif // OPENMVG_USE_OPENMP
      {
      // --
      // Perform GUIDED MATCHING
      // --
      // Use the computed model to check valid correspondences
      // - by considering geometric error and descriptor distance ratio.
      std::vector<IndMatch> vec_corresponding_indexes;

      const View * viewL = sfm_data.getViews().at(it->first).get();
      const Poses::const_iterator iterPoseL = sfm_data.getPoses().find(viewL->id_pose);
      const Intrinsics::const_iterator iterIntrinsicL = sfm_data.getIntrinsics().find(viewL->id_intrinsic);
      const View * viewR = sfm_data.getViews().at(it->second).get();
      const Poses::const_iterator iterPoseR = sfm_data.getPoses().find(viewR->id_pose);
      const Intrinsics::const_iterator iterIntrinsicR = sfm_data.getIntrinsics().find(viewR->id_intrinsic);

      Mat xL, xR;
      PointsToMat(iterIntrinsicL->second.get(), regions_provider->regions_per_view.at(it->first)->GetRegionsPositions(), xL);
      PointsToMat(iterIntrinsicR->second.get(), regions_provider->regions_per_view.at(it->second)->GetRegionsPositions(), xR);

      const Mat34 P_L = iterIntrinsicL->second.get()->get_projective_equivalent(iterPoseL->second);
      const Mat34 P_R = iterIntrinsicR->second.get()->get_projective_equivalent(iterPoseR->second);

      const Mat3 F_lr = F_from_P(P_L, P_R);
      const double thresholdF = 4.0;

#if defined(EXHAUSTIVE_MATCHING)
      // Guided matching considering geometric error and descriptor distance ratio
      geometry_aware::GuidedMatching
        <Mat3, openMVG::fundamental::kernel::EpipolarDistanceError,
        DescriptorT, L2_Vectorized<DescriptorT::bin_type> >(
        F_lr, xL, desc_provider.at(it->first), xR, desc_provider.at(it->second),
        Square(thresholdF), Square(0.8),
        vec_corresponding_indexes);
#else
      const Vec3 epipole2  = epipole_from_P(P_R, iterPoseL->second);

      const features::Regions * regions = regions_provider->regions_per_view.at(it->first).get();
      if (regions->IsScalar())
      {
        // L2 Metric (Handle descriptor internal type)
        if(regions->Type_id() == typeid(unsigned char).name())
        {
          geometry_aware::GuidedMatching_Fundamental_Fast<
          openMVG::fundamental::kernel::EpipolarDistanceError,
          L2_Vectorized<unsigned char> >
          ( F_lr,
            epipole2,
            regions_provider->regions_per_view.at(it->first).get(),
            iterIntrinsicR->second.get()->w(), iterIntrinsicR->second.get()->h(),
            regions_provider->regions_per_view.at(it->second).get(),
            Square(thresholdF), Square(0.8),
            vec_corresponding_indexes);
        }
        else
        if(regions->Type_id() == typeid(float).name())
        {
          geometry_aware::GuidedMatching_Fundamental_Fast<
          openMVG::fundamental::kernel::EpipolarDistanceError,
          L2_Vectorized<float> >
          ( F_lr,
            epipole2,
            regions_provider->regions_per_view.at(it->first).get(),
            iterIntrinsicR->second.get()->w(), iterIntrinsicR->second.get()->h(),
            regions_provider->regions_per_view.at(it->second).get(),
            Square(thresholdF), Square(0.8),
            vec_corresponding_indexes);
        }
        else
        if(regions->Type_id() == typeid(double).name())
        {
          geometry_aware::GuidedMatching_Fundamental_Fast<
          openMVG::fundamental::kernel::EpipolarDistanceError,
          L2_Vectorized<double> >
          ( F_lr,
            epipole2,
            regions_provider->regions_per_view.at(it->first).get(),
            iterIntrinsicR->second.get()->w(), iterIntrinsicR->second.get()->h(),
            regions_provider->regions_per_view.at(it->second).get(),
            Square(thresholdF), Square(0.8),
            vec_corresponding_indexes);
        }
      }
      else
      if (regions->IsBinary() && regions->Type_id() == typeid(unsigned char).name())
      {
        // Hamming metric
        geometry_aware::GuidedMatching_Fundamental_Fast<
        openMVG::fundamental::kernel::EpipolarDistanceError,
        Hamming<unsigned char> >
        ( F_lr,
          epipole2,
          regions_provider->regions_per_view.at(it->first).get(),
          iterIntrinsicR->second.get()->w(), iterIntrinsicR->second.get()->h(),
          regions_provider->regions_per_view.at(it->second).get(),
          Square(thresholdF), 0.8,
          vec_corresponding_indexes);
      }

#endif

  #ifdef OPENMVG_USE_OPENMP
      #pragma omp critical
  #endif // OPENMVG_USE_OPENMP
        {
          ++my_progress_bar;
          for (size_t i = 0; i < vec_corresponding_indexes.size(); ++i)
            putatives_matches[*it].push_back(vec_corresponding_indexes[i]);
        }
      }
    }
  }
Ejemplo n.º 6
0
  /// Filter inconsistent correspondences by using 3-view correspondences on view triplets
  void filter(
    const SfM_Data & sfm_data,
    const Pair_Set & pairs,
    const std::shared_ptr<Regions_Provider> & regions_provider)
  {
    // Compute triplets
    // Triangulate triplet tracks
    //  - keep valid one

    typedef std::vector< graphUtils::Triplet > Triplets;
    const Triplets triplets = graphUtils::tripletListing(pairs);

    C_Progress_display my_progress_bar( triplets.size(), std::cout,
      "Per triplet tracks validation (discard spurious correspondences):\n" );
  #ifdef OPENMVG_USE_OPENMP
      #pragma omp parallel
  #endif // OPENMVG_USE_OPENMP
    for( Triplets::const_iterator it = triplets.begin(); it != triplets.end(); ++it)
    {
  #ifdef OPENMVG_USE_OPENMP
      #pragma omp single nowait
  #endif // OPENMVG_USE_OPENMP
      {
        #ifdef OPENMVG_USE_OPENMP
          #pragma omp critical
        #endif // OPENMVG_USE_OPENMP
        {++my_progress_bar;}

        const graphUtils::Triplet & triplet = *it;
        const IndexT I = triplet.i, J = triplet.j , K = triplet.k;

        openMVG::tracks::STLMAPTracks map_tracksCommon;
        openMVG::tracks::TracksBuilder tracksBuilder;
        {
          PairWiseMatches map_matchesIJK;
          if(putatives_matches.find(std::make_pair(I,J)) != putatives_matches.end())
            map_matchesIJK.insert(*putatives_matches.find(std::make_pair(I,J)));

          if(putatives_matches.find(std::make_pair(I,K)) != putatives_matches.end())
            map_matchesIJK.insert(*putatives_matches.find(std::make_pair(I,K)));

          if(putatives_matches.find(std::make_pair(J,K)) != putatives_matches.end())
            map_matchesIJK.insert(*putatives_matches.find(std::make_pair(J,K)));

          if (map_matchesIJK.size() >= 2) {
            tracksBuilder.Build(map_matchesIJK);
            tracksBuilder.Filter(3);
            tracksBuilder.ExportToSTL(map_tracksCommon);
          }

          // Triangulate the tracks
          for (tracks::STLMAPTracks::const_iterator iterTracks = map_tracksCommon.begin();
            iterTracks != map_tracksCommon.end(); ++iterTracks) {
            {
              const tracks::submapTrack & subTrack = iterTracks->second;
              Triangulation trianObj;
              for (tracks::submapTrack::const_iterator iter = subTrack.begin(); iter != subTrack.end(); ++iter) {
                const size_t imaIndex = iter->first;
                const size_t featIndex = iter->second;
                const View * view = sfm_data.getViews().at(imaIndex).get();
                const IntrinsicBase * cam = sfm_data.getIntrinsics().at(view->id_intrinsic).get();
                const Pose3 & pose = sfm_data.poses.at(view->id_pose);
                const Vec2 pt = regions_provider->regions_per_view.at(imaIndex)->GetRegionPosition(featIndex);
                trianObj.add(cam->get_projective_equivalent(pose), cam->get_ud_pixel(pt));
              }
              const Vec3 Xs = trianObj.compute();
              if (trianObj.minDepth() > 0 && trianObj.error() < 4.0)
              // TODO: Add an angular check ?
              {
                #ifdef OPENMVG_USE_OPENMP
                  #pragma omp critical
                #endif // OPENMVG_USE_OPENMP
                {
                  openMVG::tracks::submapTrack::const_iterator iterI, iterJ, iterK;
                  iterI = iterJ = iterK = subTrack.begin();
                  std::advance(iterJ,1);
                  std::advance(iterK,2);

                  triplets_matches[std::make_pair(I,J)].push_back(IndMatch(iterI->second, iterJ->second));
                  triplets_matches[std::make_pair(J,K)].push_back(IndMatch(iterJ->second, iterK->second));
                  triplets_matches[std::make_pair(I,K)].push_back(IndMatch(iterI->second, iterK->second));
                }
              }
            }
          }
        }
      }
    }
    // Clear putatives matches since they are no longer required
    matching::PairWiseMatches().swap(putatives_matches);
  }
Ejemplo n.º 7
0
  virtual void triangulate(SfM_Data & sfm_data) const
  {
    std::deque<IndexT> rejectedId;
    std::unique_ptr<C_Progress_display> my_progress_bar;
    if (_bConsoleVerbose)
     my_progress_bar.reset( new C_Progress_display(
      sfm_data.structure.size(),
      std::cout,
      "Blind triangulation progress:\n" ));
#ifdef OPENMVG_USE_OPENMP
    #pragma omp parallel
#endif
    for(Landmarks::iterator iterTracks = sfm_data.structure.begin();
      iterTracks != sfm_data.structure.end();
      ++iterTracks)
    {
#ifdef OPENMVG_USE_OPENMP
    #pragma omp single nowait
#endif
      {
        if (_bConsoleVerbose)
        {
#ifdef OPENMVG_USE_OPENMP
    #pragma omp critical
#endif
          ++(*my_progress_bar);
        }
        // Triangulate each landmark
        Triangulation trianObj;
        const Observations & obs = iterTracks->second.obs;
        for(Observations::const_iterator itObs = obs.begin();
          itObs != obs.end(); ++itObs)
        {
          const View * view = sfm_data.views.at(itObs->first).get();
          const IntrinsicBase * cam = sfm_data.getIntrinsics().at(view->id_intrinsic).get();
          const Pose3 & pose = sfm_data.poses.at(view->id_pose);
          trianObj.add(
            cam->get_projective_equivalent(pose),
            cam->get_ud_pixel(itObs->second.x));
        }
        // Compute the 3D point
        const Vec3 X = trianObj.compute();
        if (trianObj.minDepth() > 0) // Keep the point only if it have a positive depth
        {
          iterTracks->second.X = X;
        }
        else
        {
#ifdef OPENMVG_USE_OPENMP
          #pragma omp critical
#endif
          {
            rejectedId.push_front(iterTracks->first);
          }
        }
      }
    }
    // Erase the unsuccessful triangulated tracks
    for (auto& it : rejectedId)
    {
      sfm_data.structure.erase(it);
    }
  }
Ejemplo n.º 8
0
  /// Robustly try to estimate the best 3D point using a ransac Scheme
  /// Return true for a successful triangulation
  bool robust_triangulation(
    const SfM_Data & sfm_data,
    const Observations & obs,
    Vec3 & X,
    const IndexT min_required_inliers = 3,
    const IndexT min_sample_index = 3) const
  {
    const double dThresholdPixel = 4.0; // TODO: make this parameter customizable

    const IndexT nbIter = obs.size(); // TODO: automatic computation of the number of iterations?

    // - Ransac variables
    Vec3 best_model;
    std::set<IndexT> best_inlier_set;
    double best_error = std::numeric_limits<double>::max();

    // - Ransac loop
    for (IndexT i = 0; i < nbIter; ++i)
    {
      std::vector<size_t> vec_samples;
      robust::UniformSample(min_sample_index, obs.size(), &vec_samples);
      const std::set<IndexT> samples(vec_samples.begin(), vec_samples.end());

      // Hypothesis generation.
      const Vec3 current_model = track_sample_triangulation(sfm_data, obs, samples);

      // Test validity of the hypothesis
      // - chierality (for the samples)
      // - residual error

      // Chierality (Check the point is in front of the sampled cameras)
      bool bChierality = true;
      for (auto& it : samples){
        Observations::const_iterator itObs = obs.begin();
        std::advance(itObs, it);
      	const View * view = sfm_data.views.at(itObs->first).get();
        const IntrinsicBase * cam = sfm_data.getIntrinsics().at(view->id_intrinsic).get();
        const Pose3 & pose = sfm_data.poses.at(view->id_pose);
        const double z = pose.depth(current_model); // TODO: cam->depth(pose(X));
        bChierality &= z > 0;
      }

      if (!bChierality)
        continue;

      std::set<IndexT> inlier_set;
      double current_error = 0.0;
      // Classification as inlier/outlier according pixel residual errors.
      for (Observations::const_iterator itObs = obs.begin();
          itObs != obs.end(); ++itObs)
      {
        const View * view = sfm_data.views.at(itObs->first).get();
        const IntrinsicBase * intrinsic = sfm_data.getIntrinsics().at(view->id_intrinsic).get();
        const Pose3 & pose = sfm_data.poses.at(view->id_pose);
        const Vec2 residual = intrinsic->residual(pose, current_model, itObs->second.x);
        const double residual_d = residual.norm();
        if (residual_d < dThresholdPixel)
        {
          inlier_set.insert(itObs->first);
          current_error += residual_d;
        }
        else
        {
          current_error += dThresholdPixel;
        }
      }
      // Does the hypothesis is the best one we have seen and have sufficient inliers.
      if (current_error < best_error && inlier_set.size() >= min_required_inliers)
      {
        X = best_model = current_model;
        best_inlier_set = inlier_set;
        best_error = current_error;
      }
    }
    return !best_inlier_set.empty();
  }