Ejemplo n.º 1
0
void GuidedMatching(
  const ModelArg & mod, // The model
  const Mat & xLeft,    // The left data points
  const Mat & xRight,   // The right data points
  double errorTh,       // Maximal authorized error threshold
  std::vector<IndMatch> & vec_corresponding_index) // Ouput corresponding index
{
  assert(xLeft.rows() == xRight.rows());

  // Looking for the corresponding points that have
  //  the smallest distance (smaller than the provided Threshold)

  for (size_t i = 0; i < xLeft.cols(); ++i) {

    double min = std::numeric_limits<double>::max();
    IndMatch match;
    for (size_t j = 0; j < xRight.cols(); ++j) {
      // Compute error to the model
      double err = ErrorArg::Error(
        mod,  // The model
        xLeft.col(i), xRight.col(j)); // The corresponding points
      // if smaller error update corresponding index
      if (err < errorTh && err < min) {
        min = err;
        match = IndMatch(i,j);
      }
    }
    if (min < errorTh)  {
      // save the best corresponding index
      vec_corresponding_index.push_back(match);
    }
  }

  // Remove duplicates (when multiple points at same position exist)
  IndMatch::getDeduplicated(vec_corresponding_index);
}
Ejemplo n.º 2
0
  /// Filter inconsistent correspondences by using 3-view correspondences on view triplets
  void filter(
    const SfM_Data & sfm_data,
    const Pair_Set & pairs,
    const std::shared_ptr<Regions_Provider> & regions_provider)
  {
    // Compute triplets
    // Triangulate triplet tracks
    //  - keep valid one

    typedef std::vector< graphUtils::Triplet > Triplets;
    const Triplets triplets = graphUtils::tripletListing(pairs);

    C_Progress_display my_progress_bar( triplets.size(), std::cout,
      "Per triplet tracks validation (discard spurious correspondences):\n" );
  #ifdef OPENMVG_USE_OPENMP
      #pragma omp parallel
  #endif // OPENMVG_USE_OPENMP
    for( Triplets::const_iterator it = triplets.begin(); it != triplets.end(); ++it)
    {
  #ifdef OPENMVG_USE_OPENMP
      #pragma omp single nowait
  #endif // OPENMVG_USE_OPENMP
      {
        #ifdef OPENMVG_USE_OPENMP
          #pragma omp critical
        #endif // OPENMVG_USE_OPENMP
        {++my_progress_bar;}

        const graphUtils::Triplet & triplet = *it;
        const IndexT I = triplet.i, J = triplet.j , K = triplet.k;

        openMVG::tracks::STLMAPTracks map_tracksCommon;
        openMVG::tracks::TracksBuilder tracksBuilder;
        {
          PairWiseMatches map_matchesIJK;
          if(putatives_matches.find(std::make_pair(I,J)) != putatives_matches.end())
            map_matchesIJK.insert(*putatives_matches.find(std::make_pair(I,J)));

          if(putatives_matches.find(std::make_pair(I,K)) != putatives_matches.end())
            map_matchesIJK.insert(*putatives_matches.find(std::make_pair(I,K)));

          if(putatives_matches.find(std::make_pair(J,K)) != putatives_matches.end())
            map_matchesIJK.insert(*putatives_matches.find(std::make_pair(J,K)));

          if (map_matchesIJK.size() >= 2) {
            tracksBuilder.Build(map_matchesIJK);
            tracksBuilder.Filter(3);
            tracksBuilder.ExportToSTL(map_tracksCommon);
          }

          // Triangulate the tracks
          for (tracks::STLMAPTracks::const_iterator iterTracks = map_tracksCommon.begin();
            iterTracks != map_tracksCommon.end(); ++iterTracks) {
            {
              const tracks::submapTrack & subTrack = iterTracks->second;
              Triangulation trianObj;
              for (tracks::submapTrack::const_iterator iter = subTrack.begin(); iter != subTrack.end(); ++iter) {
                const size_t imaIndex = iter->first;
                const size_t featIndex = iter->second;
                const View * view = sfm_data.getViews().at(imaIndex).get();
                const IntrinsicBase * cam = sfm_data.getIntrinsics().at(view->id_intrinsic).get();
                const Pose3 & pose = sfm_data.poses.at(view->id_pose);
                const Vec2 pt = regions_provider->regions_per_view.at(imaIndex)->GetRegionPosition(featIndex);
                trianObj.add(cam->get_projective_equivalent(pose), cam->get_ud_pixel(pt));
              }
              const Vec3 Xs = trianObj.compute();
              if (trianObj.minDepth() > 0 && trianObj.error() < 4.0)
              // TODO: Add an angular check ?
              {
                #ifdef OPENMVG_USE_OPENMP
                  #pragma omp critical
                #endif // OPENMVG_USE_OPENMP
                {
                  openMVG::tracks::submapTrack::const_iterator iterI, iterJ, iterK;
                  iterI = iterJ = iterK = subTrack.begin();
                  std::advance(iterJ,1);
                  std::advance(iterK,2);

                  triplets_matches[std::make_pair(I,J)].push_back(IndMatch(iterI->second, iterJ->second));
                  triplets_matches[std::make_pair(J,K)].push_back(IndMatch(iterJ->second, iterK->second));
                  triplets_matches[std::make_pair(I,K)].push_back(IndMatch(iterI->second, iterK->second));
                }
              }
            }
          }
        }
      }
    }
    // Clear putatives matches since they are no longer required
    matching::PairWiseMatches().swap(putatives_matches);
  }
Ejemplo n.º 3
0
  void Match_HashedDescriptions
  (
    const HashedDescriptions& hashed_descriptions1,
    const MatrixT & descriptions1,
    const HashedDescriptions& hashed_descriptions2,
    const MatrixT & descriptions2,
    IndMatches * pvec_indices,
    std::vector<DistanceType> * pvec_distances,
    const int NN = 2
  ) const
  {
    typedef L2_Vectorized<typename MatrixT::Scalar> MetricT;
    MetricT metric;

    static const int kNumTopCandidates = 10;

    // Preallocate the candidate descriptors container.
    std::vector<int> candidate_descriptors;
    candidate_descriptors.reserve(hashed_descriptions2.hashed_desc.size());

    // Preallocated hamming distances. Each column indicates the hamming distance
    // and the rows collect the descriptor ids with that
    // distance. num_descriptors_with_hamming_distance keeps track of how many
    // descriptors have that distance.
    Eigen::MatrixXi candidate_hamming_distances(
      hashed_descriptions2.hashed_desc.size(), nb_hash_code_ + 1);
    Eigen::VectorXi num_descriptors_with_hamming_distance(nb_hash_code_ + 1);

    // Preallocate the container for keeping euclidean distances.
    std::vector<std::pair<DistanceType, int> > candidate_euclidean_distances;
    candidate_euclidean_distances.reserve(kNumTopCandidates);

    // A preallocated vector to determine if we have already used a particular
    // feature for matching (i.e., prevents duplicates).
    std::vector<bool> used_descriptor(hashed_descriptions2.hashed_desc.size());

    typedef matching::Hamming<stl::dynamic_bitset::BlockType> HammingMetricType;
    static const HammingMetricType metricH = {};
    for (int i = 0; i < hashed_descriptions1.hashed_desc.size(); ++i)
    {
      candidate_descriptors.clear();
      num_descriptors_with_hamming_distance.setZero();
      candidate_euclidean_distances.clear();

      const auto& hashed_desc = hashed_descriptions1.hashed_desc[i];

      // Accumulate all descriptors in each bucket group that are in the same
      // bucket id as the query descriptor.
      for (int j = 0; j < nb_bucket_groups_; ++j)
      {
        const uint16_t bucket_id = hashed_desc.bucket_ids[j];
        for (const auto& feature_id : hashed_descriptions2.buckets[j][bucket_id])
        {
          candidate_descriptors.push_back(feature_id);
          used_descriptor[feature_id] = false;
        }
      }

      // Skip matching this descriptor if there are not at least NN candidates.
      if (candidate_descriptors.size() <= NN)
      {
        continue;
      }

      // Compute the hamming distance of all candidates based on the comp hash
      // code. Put the descriptors into buckets corresponding to their hamming
      // distance.
      for (const int candidate_id : candidate_descriptors)
      {
        if (!used_descriptor[candidate_id]) // avoid selecting the same candidate multiple times
        {
          used_descriptor[candidate_id] = true;

          const HammingMetricType::ResultType hamming_distance = metricH(
            hashed_desc.hash_code.data(),
            hashed_descriptions2.hashed_desc[candidate_id].hash_code.data(),
            hashed_desc.hash_code.num_blocks());
          candidate_hamming_distances(
              num_descriptors_with_hamming_distance(hamming_distance)++,
              hamming_distance) = candidate_id;
        }
      }

      // Compute the euclidean distance of the k descriptors with the best hamming
      // distance.
      candidate_euclidean_distances.reserve(kNumTopCandidates);
      for (int j = 0; j < candidate_hamming_distances.cols() &&
        (candidate_euclidean_distances.size() < kNumTopCandidates); ++j)
      {
        for(int k = 0; k < num_descriptors_with_hamming_distance(j) &&
          (candidate_euclidean_distances.size() < kNumTopCandidates); ++k)
        {
          const int candidate_id = candidate_hamming_distances(k, j);
          const DistanceType distance = metric(
            descriptions2.row(candidate_id).data(),
            descriptions1.row(i).data(),
            descriptions1.cols());

          candidate_euclidean_distances.push_back(std::make_pair(distance, candidate_id));
        }
      }

      // Assert that each query is having at least NN retrieved neighbors
      if (candidate_euclidean_distances.size() >= NN)
      {
        // Find the top NN candidates based on euclidean distance.
        std::partial_sort(candidate_euclidean_distances.begin(),
          candidate_euclidean_distances.begin() + NN,
          candidate_euclidean_distances.end());
        // save resulting neighbors
        for (int l = 0; l < NN; ++l)
        {
          pvec_distances->push_back(candidate_euclidean_distances[l].first);
          pvec_indices->push_back(IndMatch(i,candidate_euclidean_distances[l].second));
        }
      }
      //else -> too few candidates... (save no one)
    }
  }