bool WHeadPositionCorrection::computeInverseOperation( MatrixT* const g, const MatrixT& lf ) const
{
    WLTimeProfiler profiler( CLASS, __func__, true );

    const float snr = 25;
    const MatrixT noiseCov = MatrixT::Identity( lf.rows(), lf.rows() );

    // Leafield transpose matrix
    const MatrixT LT = lf.transpose();

    // WinvLT = W^-1 * LT
    SpMatrixT w = SpMatrixT( lf.cols(), lf.cols() );
    w.setIdentity();
    w.makeCompressed();

    SparseLU< SpMatrixT > spSolver;
    spSolver.compute( w );
    if( spSolver.info() != Eigen::Success )
    {
        wlog::error( CLASS ) << "spSolver.compute( weighting ) not succeeded: " << spSolver.info();
        return false;
    }
    const MatrixT WinvLT = spSolver.solve( LT ); // needs dense matrix, returns dense matrix
    if( spSolver.info() != Eigen::Success )
    {
        wlog::error( CLASS ) << "spSolver.solve( LT ) not succeeded: " << spSolver.info();
        return false;
    }
    wlog::debug( CLASS ) << "WinvLT " << WinvLT.rows() << " x " << WinvLT.cols();

    // LWL = L * W^-1 * LT
    const MatrixT LWL = lf * WinvLT;
    wlog::debug( CLASS ) << "LWL " << LWL.rows() << " x " << LWL.cols();

    // alpha = sqrt(trace(LWL)/(snr * num_sensors));
    double alpha = sqrt( LWL.trace() / ( snr * lf.rows() ) );

    // G = W^-1 * LT * inv( (L W^-1 * LT) + alpha^2 * Cn )
    const MatrixT toInv = LWL + pow( alpha, 2 ) * noiseCov;
    const MatrixT inv = toInv.inverse();
    *g = WinvLT * inv;

    WAssertDebug( g->rows() == lf.cols() && g->cols() == lf.rows(), "Dimension of G and L does not match." );
    return true;
}
예제 #2
0
 static Eigen::VectorXf GetZeroMeanDescriptor
 (
   const MatrixT & descriptions
 )
 {
   Eigen::VectorXf zero_mean_descriptor;
   if (descriptions.rows() == 0) {
     return zero_mean_descriptor;
   }
   // Compute the ZeroMean descriptor
   zero_mean_descriptor.setZero(descriptions.cols());
   const typename MatrixT::Index nbDescriptions = descriptions.rows();
   for (int i = 0; i < nbDescriptions; ++i)
   {
     for (int j = 0; j < descriptions.cols(); ++j)
       zero_mean_descriptor(j) += descriptions(i,j);
   }
   return zero_mean_descriptor / static_cast<double>(nbDescriptions);
 }
예제 #3
0
  void Match_HashedDescriptions
  (
    const HashedDescriptions& hashed_descriptions1,
    const MatrixT & descriptions1,
    const HashedDescriptions& hashed_descriptions2,
    const MatrixT & descriptions2,
    IndMatches * pvec_indices,
    std::vector<DistanceType> * pvec_distances,
    const int NN = 2
  ) const
  {
    typedef L2_Vectorized<typename MatrixT::Scalar> MetricT;
    MetricT metric;

    static const int kNumTopCandidates = 10;

    // Preallocate the candidate descriptors container.
    std::vector<int> candidate_descriptors;
    candidate_descriptors.reserve(hashed_descriptions2.hashed_desc.size());

    // Preallocated hamming distances. Each column indicates the hamming distance
    // and the rows collect the descriptor ids with that
    // distance. num_descriptors_with_hamming_distance keeps track of how many
    // descriptors have that distance.
    Eigen::MatrixXi candidate_hamming_distances(
      hashed_descriptions2.hashed_desc.size(), nb_hash_code_ + 1);
    Eigen::VectorXi num_descriptors_with_hamming_distance(nb_hash_code_ + 1);

    // Preallocate the container for keeping euclidean distances.
    std::vector<std::pair<DistanceType, int> > candidate_euclidean_distances;
    candidate_euclidean_distances.reserve(kNumTopCandidates);

    // A preallocated vector to determine if we have already used a particular
    // feature for matching (i.e., prevents duplicates).
    std::vector<bool> used_descriptor(hashed_descriptions2.hashed_desc.size());

    typedef matching::Hamming<stl::dynamic_bitset::BlockType> HammingMetricType;
    static const HammingMetricType metricH = {};
    for (int i = 0; i < hashed_descriptions1.hashed_desc.size(); ++i)
    {
      candidate_descriptors.clear();
      num_descriptors_with_hamming_distance.setZero();
      candidate_euclidean_distances.clear();

      const auto& hashed_desc = hashed_descriptions1.hashed_desc[i];

      // Accumulate all descriptors in each bucket group that are in the same
      // bucket id as the query descriptor.
      for (int j = 0; j < nb_bucket_groups_; ++j)
      {
        const uint16_t bucket_id = hashed_desc.bucket_ids[j];
        for (const auto& feature_id : hashed_descriptions2.buckets[j][bucket_id])
        {
          candidate_descriptors.push_back(feature_id);
          used_descriptor[feature_id] = false;
        }
      }

      // Skip matching this descriptor if there are not at least NN candidates.
      if (candidate_descriptors.size() <= NN)
      {
        continue;
      }

      // Compute the hamming distance of all candidates based on the comp hash
      // code. Put the descriptors into buckets corresponding to their hamming
      // distance.
      for (const int candidate_id : candidate_descriptors)
      {
        if (!used_descriptor[candidate_id]) // avoid selecting the same candidate multiple times
        {
          used_descriptor[candidate_id] = true;

          const HammingMetricType::ResultType hamming_distance = metricH(
            hashed_desc.hash_code.data(),
            hashed_descriptions2.hashed_desc[candidate_id].hash_code.data(),
            hashed_desc.hash_code.num_blocks());
          candidate_hamming_distances(
              num_descriptors_with_hamming_distance(hamming_distance)++,
              hamming_distance) = candidate_id;
        }
      }

      // Compute the euclidean distance of the k descriptors with the best hamming
      // distance.
      candidate_euclidean_distances.reserve(kNumTopCandidates);
      for (int j = 0; j < candidate_hamming_distances.cols() &&
        (candidate_euclidean_distances.size() < kNumTopCandidates); ++j)
      {
        for(int k = 0; k < num_descriptors_with_hamming_distance(j) &&
          (candidate_euclidean_distances.size() < kNumTopCandidates); ++k)
        {
          const int candidate_id = candidate_hamming_distances(k, j);
          const DistanceType distance = metric(
            descriptions2.row(candidate_id).data(),
            descriptions1.row(i).data(),
            descriptions1.cols());

          candidate_euclidean_distances.push_back(std::make_pair(distance, candidate_id));
        }
      }

      // Assert that each query is having at least NN retrieved neighbors
      if (candidate_euclidean_distances.size() >= NN)
      {
        // Find the top NN candidates based on euclidean distance.
        std::partial_sort(candidate_euclidean_distances.begin(),
          candidate_euclidean_distances.begin() + NN,
          candidate_euclidean_distances.end());
        // save resulting neighbors
        for (int l = 0; l < NN; ++l)
        {
          pvec_distances->push_back(candidate_euclidean_distances[l].first);
          pvec_indices->push_back(IndMatch(i,candidate_euclidean_distances[l].second));
        }
      }
      //else -> too few candidates... (save no one)
    }
  }
예제 #4
0
  HashedDescriptions CreateHashedDescriptions
  (
    const MatrixT & descriptions,
    const Eigen::VectorXf & zero_mean_descriptor
  ) const
  {
    // Steps:
    //   1) Compute hash code and hash buckets (based on the zero_mean_descriptor).
    //   2) Construct buckets.

    HashedDescriptions hashed_descriptions;
    if (descriptions.rows() == 0) {
      return hashed_descriptions;
    }

    // Create hash codes for each description.
    {
      // Allocate space for hash codes.
      const typename MatrixT::Index nbDescriptions = descriptions.rows();
      hashed_descriptions.hashed_desc.resize(nbDescriptions);
      Eigen::VectorXf descriptor(descriptions.cols());
      for (int i = 0; i < nbDescriptions; ++i)
      {
        // Allocate space for each bucket id.
        hashed_descriptions.hashed_desc[i].bucket_ids.resize(nb_bucket_groups_);

        for (int k = 0; k < descriptions.cols(); ++k)
        {
          descriptor(k) = descriptions(i,k);
        }
        descriptor -= zero_mean_descriptor;

        auto& hash_code = hashed_descriptions.hashed_desc[i].hash_code;
        hash_code = stl::dynamic_bitset(descriptions.cols());

        // Compute hash code.
        const Eigen::VectorXf primary_projection = primary_hash_projection_ * descriptor;
        for (int j = 0; j < nb_hash_code_; ++j)
        {
          hash_code[j] = primary_projection(j) > 0;
        }

        // Determine the bucket index for each group.
        Eigen::VectorXf secondary_projection;
        for (int j = 0; j < nb_bucket_groups_; ++j)
        {
          uint16_t bucket_id = 0;
          secondary_projection = secondary_hash_projection_[j] * descriptor;

          for (int k = 0; k < nb_bits_per_bucket_; ++k)
          {
            bucket_id = (bucket_id << 1) + (secondary_projection(k) > 0 ? 1 : 0);
          }
          hashed_descriptions.hashed_desc[i].bucket_ids[j] = bucket_id;
        }
      }
    }
    // Build the Buckets
    {
      hashed_descriptions.buckets.resize(nb_bucket_groups_);
      for (int i = 0; i < nb_bucket_groups_; ++i)
      {
        hashed_descriptions.buckets[i].resize(nb_buckets_per_group_);

        // Add the descriptor ID to the proper bucket group and id.
        for (int j = 0; j < hashed_descriptions.hashed_desc.size(); ++j)
        {
          const uint16_t bucket_id = hashed_descriptions.hashed_desc[j].bucket_ids[i];
          hashed_descriptions.buckets[i][bucket_id].push_back(j);
        }
      }
    }
    return hashed_descriptions;
  }