예제 #1
0
파일: banded_solve.hpp 프로젝트: svark/asti
void
banded_lu_decompose(MatrixT & _A)
{
    size_t n = _A.rows();
    for(size_t k = 0; k < n - 1; ++k) {
        _A(k + 1, k)     = _A(k + 1, k) / _A(k, k);
        _A(k + 1, k + 1) -=  _A(k + 1, k) * _A(k, k + 1);
    }
}
예제 #2
0
 static Eigen::VectorXf GetZeroMeanDescriptor
 (
   const MatrixT & descriptions
 )
 {
   Eigen::VectorXf zero_mean_descriptor;
   if (descriptions.rows() == 0) {
     return zero_mean_descriptor;
   }
   // Compute the ZeroMean descriptor
   zero_mean_descriptor.setZero(descriptions.cols());
   const typename MatrixT::Index nbDescriptions = descriptions.rows();
   for (int i = 0; i < nbDescriptions; ++i)
   {
     for (int j = 0; j < descriptions.cols(); ++j)
       zero_mean_descriptor(j) += descriptions(i,j);
   }
   return zero_mean_descriptor / static_cast<double>(nbDescriptions);
 }
예제 #3
0
파일: banded_solve.hpp 프로젝트: svark/asti
void
banded_lu_solve(const MatrixT & _A, VectorT & b)
{
    size_t n = _A.rows();
    for(size_t j = 0;j < n; ++j)
    {
        b[j + 1] -= _A(j + 1, j) * b[j];
    }
    for(size_t j = n - 1;j >= 0; --j)
    {
        b[j] = b[j] / _A(j, j) ;
        b[j - 1] -= _A(j - 1, j) * b[j];
    }
    return ;
}
예제 #4
0
SMPMatrixElf::MatrixT SMPMatrixElf::multiply(const MatrixT& left, const MatrixT& right)
{
    if (left.empty() || right.empty()) return MatrixT();
    MatrixT c(left.rows(), right.columns());

    size_t elementsPerBlock = 10*10;
    size_t columnsPerBlock = (size_t)ceil(sqrt(elementsPerBlock));
    columnsPerBlock = min(columnsPerBlock, c.columns());

    size_t rowsPerBlock = div_ceil(elementsPerBlock, columnsPerBlock);

    size_t blocksPerRow = div_ceil(c.columns(), columnsPerBlock);
    size_t blocksPerColumns = div_ceil(c.rows(), rowsPerBlock);

    size_t blockCount = blocksPerRow * blocksPerColumns;

    MatrixT transposed = right.transposed();

    #pragma omp parallel for
    for(size_t index=0; index<blockCount; index++)
    {
        size_t rowStart = (index / blocksPerRow) * rowsPerBlock;
        size_t columnStart = (index % blocksPerRow) * columnsPerBlock;

        size_t columnEnd = min(columnStart+columnsPerBlock, c.columns());
        size_t rowEnd = min(rowStart+rowsPerBlock, c.rows());

        for(size_t y=rowStart; y<rowEnd; y++)
        {
            for(size_t x=columnStart; x<columnEnd; x++)
            {
                float val = 0;
                for(size_t i=0; i<left.columns(); i++)
                {
                    val += left(y,i) * transposed(x,i);
                }
                c(y,x) = val;
            }
        }

    }

    return c;
}
bool WHeadPositionCorrection::computeInverseOperation( MatrixT* const g, const MatrixT& lf ) const
{
    WLTimeProfiler profiler( CLASS, __func__, true );

    const float snr = 25;
    const MatrixT noiseCov = MatrixT::Identity( lf.rows(), lf.rows() );

    // Leafield transpose matrix
    const MatrixT LT = lf.transpose();

    // WinvLT = W^-1 * LT
    SpMatrixT w = SpMatrixT( lf.cols(), lf.cols() );
    w.setIdentity();
    w.makeCompressed();

    SparseLU< SpMatrixT > spSolver;
    spSolver.compute( w );
    if( spSolver.info() != Eigen::Success )
    {
        wlog::error( CLASS ) << "spSolver.compute( weighting ) not succeeded: " << spSolver.info();
        return false;
    }
    const MatrixT WinvLT = spSolver.solve( LT ); // needs dense matrix, returns dense matrix
    if( spSolver.info() != Eigen::Success )
    {
        wlog::error( CLASS ) << "spSolver.solve( LT ) not succeeded: " << spSolver.info();
        return false;
    }
    wlog::debug( CLASS ) << "WinvLT " << WinvLT.rows() << " x " << WinvLT.cols();

    // LWL = L * W^-1 * LT
    const MatrixT LWL = lf * WinvLT;
    wlog::debug( CLASS ) << "LWL " << LWL.rows() << " x " << LWL.cols();

    // alpha = sqrt(trace(LWL)/(snr * num_sensors));
    double alpha = sqrt( LWL.trace() / ( snr * lf.rows() ) );

    // G = W^-1 * LT * inv( (L W^-1 * LT) + alpha^2 * Cn )
    const MatrixT toInv = LWL + pow( alpha, 2 ) * noiseCov;
    const MatrixT inv = toInv.inverse();
    *g = WinvLT * inv;

    WAssertDebug( g->rows() == lf.cols() && g->cols() == lf.rows(), "Dimension of G and L does not match." );
    return true;
}
예제 #6
0
  HashedDescriptions CreateHashedDescriptions
  (
    const MatrixT & descriptions,
    const Eigen::VectorXf & zero_mean_descriptor
  ) const
  {
    // Steps:
    //   1) Compute hash code and hash buckets (based on the zero_mean_descriptor).
    //   2) Construct buckets.

    HashedDescriptions hashed_descriptions;
    if (descriptions.rows() == 0) {
      return hashed_descriptions;
    }

    // Create hash codes for each description.
    {
      // Allocate space for hash codes.
      const typename MatrixT::Index nbDescriptions = descriptions.rows();
      hashed_descriptions.hashed_desc.resize(nbDescriptions);
      Eigen::VectorXf descriptor(descriptions.cols());
      for (int i = 0; i < nbDescriptions; ++i)
      {
        // Allocate space for each bucket id.
        hashed_descriptions.hashed_desc[i].bucket_ids.resize(nb_bucket_groups_);

        for (int k = 0; k < descriptions.cols(); ++k)
        {
          descriptor(k) = descriptions(i,k);
        }
        descriptor -= zero_mean_descriptor;

        auto& hash_code = hashed_descriptions.hashed_desc[i].hash_code;
        hash_code = stl::dynamic_bitset(descriptions.cols());

        // Compute hash code.
        const Eigen::VectorXf primary_projection = primary_hash_projection_ * descriptor;
        for (int j = 0; j < nb_hash_code_; ++j)
        {
          hash_code[j] = primary_projection(j) > 0;
        }

        // Determine the bucket index for each group.
        Eigen::VectorXf secondary_projection;
        for (int j = 0; j < nb_bucket_groups_; ++j)
        {
          uint16_t bucket_id = 0;
          secondary_projection = secondary_hash_projection_[j] * descriptor;

          for (int k = 0; k < nb_bits_per_bucket_; ++k)
          {
            bucket_id = (bucket_id << 1) + (secondary_projection(k) > 0 ? 1 : 0);
          }
          hashed_descriptions.hashed_desc[i].bucket_ids[j] = bucket_id;
        }
      }
    }
    // Build the Buckets
    {
      hashed_descriptions.buckets.resize(nb_bucket_groups_);
      for (int i = 0; i < nb_bucket_groups_; ++i)
      {
        hashed_descriptions.buckets[i].resize(nb_buckets_per_group_);

        // Add the descriptor ID to the proper bucket group and id.
        for (int j = 0; j < hashed_descriptions.hashed_desc.size(); ++j)
        {
          const uint16_t bucket_id = hashed_descriptions.hashed_desc[j].bucket_ids[i];
          hashed_descriptions.buckets[i][bucket_id].push_back(j);
        }
      }
    }
    return hashed_descriptions;
  }