示例#1
0
void testVectorType(const VectorType& base)
{
  typedef typename internal::traits<VectorType>::Index Index;
  typedef typename internal::traits<VectorType>::Scalar Scalar;

  const Index size = base.size();
  
  Scalar high = internal::random<Scalar>(-500,500);
  Scalar low = (size == 1 ? high : internal::random<Scalar>(-500,500));
  if (low>high) std::swap(low,high);

  const Scalar step = ((size == 1) ? 1 : (high-low)/(size-1));

  // check whether the result yields what we expect it to do
  VectorType m(base);
  m.setLinSpaced(size,low,high);

  VectorType n(size);
  for (int i=0; i<size; ++i)
    n(i) = low+i*step;

  VERIFY_IS_APPROX(m,n);

  // random access version
  m = VectorType::LinSpaced(size,low,high);
  VERIFY_IS_APPROX(m,n);

  // Assignment of a RowVectorXd to a MatrixXd (regression test for bug #79).
  VERIFY( (MatrixXd(RowVectorXd::LinSpaced(3, 0, 1)) - RowVector3d(0, 0.5, 1)).norm() < std::numeric_limits<Scalar>::epsilon() );

  // These guys sometimes fail! This is not good. Any ideas how to fix them!?
  //VERIFY( m(m.size()-1) == high );
  //VERIFY( m(0) == low );

  // sequential access version
  m = VectorType::LinSpaced(Sequential,size,low,high);
  VERIFY_IS_APPROX(m,n);

  // These guys sometimes fail! This is not good. Any ideas how to fix them!?
  //VERIFY( m(m.size()-1) == high );
  //VERIFY( m(0) == low );

  // check whether everything works with row and col major vectors
  Matrix<Scalar,Dynamic,1> row_vector(size);
  Matrix<Scalar,1,Dynamic> col_vector(size);
  row_vector.setLinSpaced(size,low,high);
  col_vector.setLinSpaced(size,low,high);
  VERIFY( row_vector.isApprox(col_vector.transpose(), NumTraits<Scalar>::epsilon()));

  Matrix<Scalar,Dynamic,1> size_changer(size+50);
  size_changer.setLinSpaced(size,low,high);
  VERIFY( size_changer.size() == size );

  typedef Matrix<Scalar,1,1> ScalarMatrix;
  ScalarMatrix scalar;
  scalar.setLinSpaced(1,low,high);
  VERIFY_IS_APPROX( scalar, ScalarMatrix::Constant(high) );
  VERIFY_IS_APPROX( ScalarMatrix::LinSpaced(1,low,high), ScalarMatrix::Constant(high) );
}
示例#2
0
 /**
  * Computes the trace of the product of two kernel operators, i.e. of
  * \f$ tr( X_1 Y_1 D_1 Y_1^\dagger X1^\dagger  X_2 Y_2 D_2 Y_2^\dagger X_2^\dagger) \f$
  *
  */
 static Scalar traceProduct(const FSpaceCPtr &fs,
                            
                            const FMatrixCPtr &mX1, 
                            const ScalarAltMatrix  &mY1,
                            const RealVector &mD1,
                            
                            const FMatrixCPtr &mX2, 
                            const ScalarAltMatrix  &mY2,
                            const RealVector &mD2) {
     
     ScalarMatrix m = fs->k(mX1, mY1, mX2, mY2);
     
     return (m.adjoint() * mD1.asDiagonal() * m * mD2.asDiagonal()).trace();
 }
示例#3
0
 static inline Eigen::Block<ScalarMatrix> getBlock(ScalarMatrix &m, const std::vector<Index> &offsets, size_t i, size_t j) {
     return m.block(offsets[i], offsets[j], offsets[i+1] - offsets[i], offsets[j+1]-offsets[j]);
 }
示例#4
0
            int run(const log4cxx::LoggerPtr &logger, KernelEVD<Scalar> &builder) const {
                KQP_SCALAR_TYPEDEFS(Scalar);
                
                KQP_LOG_INFO_F(logger, "Kernel EVD with dense vectors and builder \"%s\" (pre-images = %d, linear combination = %d)", %KQP_DEMANGLE(builder) %max_preimages %max_lc);
                
                ScalarMatrix matrix(n,n);
                matrix.setConstant(0);
                
                // Construction
                for(int i = 0; i < nb_add; i++) {
                    
                    Scalar alpha = Eigen::internal::abs(Eigen::internal::random_impl<Scalar>::run()) + 1e-3;
                    
                    int k = (int)std::abs(Eigen::internal::random_impl<double>::run() * (double)(max_preimages-min_preimages)) + min_preimages;
                    int p = (int)std::abs(Eigen::internal::random_impl<double>::run() * (double)(max_lc-min_lc)) + min_lc;
                    KQP_LOG_INFO(logger, boost::format("Pre-images (%dx%d) and linear combination (%dx%d)") % n % k % k % p);
                    
                    // Generate a number of pre-images
                    ScalarMatrix m = ScalarMatrix::Random(n, k);
                    
                    // Generate the linear combination matrix
                    ScalarMatrix mA = ScalarMatrix::Random(k, p);
                    
                    matrix.template selfadjointView<Eigen::Lower>().rankUpdate(m * mA, alpha);
                    
                    
                    builder.add(alpha, FMatrixPtr(new Dense<Scalar>(m)), mA);
                }
                
                // Computing via EVD
                KQP_LOG_INFO(logger, "Computing an LDLT decomposition");
                
                typedef Eigen::LDLT<ScalarMatrix> LDLT;
                LDLT ldlt = matrix.template selfadjointView<Eigen::Lower>().ldlt();
                ScalarMatrix mL = ldlt.matrixL();
                mL = ldlt.transpositionsP().transpose() * mL;
                FMatrixPtr  mU(Dense<Scalar>::create(mL));
                Eigen::Matrix<Scalar,Dynamic,1> mU_d = ldlt.vectorD();
                
                
                
                
                // Comparing the results
                
                KQP_LOG_INFO(logger, "Retrieving the decomposition");
                
                auto kevd = builder.getDecomposition();
                
                ScalarAltMatrix mUY = Eigen::Identity<Scalar>(mL.rows(), mL.rows());
                
                KQP_LOG_DEBUG(logger, "=== Decomposition ===");
//                KQP_LOG_DEBUG(logger, "X = " << kevd.mX);
                KQP_LOG_DEBUG(logger, "Y = " << kevd.mY);
                KQP_LOG_DEBUG(logger, "D = " << kevd.mD);
                
                
                // Computing the difference between operators || U1 - U2 ||^2
                
                KQP_LOG_INFO(logger, "Comparing the decompositions");
                double error = KernelOperators<Scalar>::difference(kevd.fs, kevd.mX, kevd.mY, kevd.mD, mU, mUY, mU_d);
                
                KQP_LOG_INFO_F(logger, "Squared error is %e", %error);
                return error < tolerance ? 0 : 1;
            }
示例#5
0
文件: nullary.cpp 项目: 1k5/eigen
void testVectorType(const VectorType& base)
{
  typedef typename VectorType::Scalar Scalar;

  const Index size = base.size();
  
  Scalar high = internal::random<Scalar>(-500,500);
  Scalar low = (size == 1 ? high : internal::random<Scalar>(-500,500));
  if (low>high) std::swap(low,high);

  const Scalar step = ((size == 1) ? 1 : (high-low)/(size-1));

  // check whether the result yields what we expect it to do
  VectorType m(base);
  m.setLinSpaced(size,low,high);

  if(!NumTraits<Scalar>::IsInteger)
  {
    VectorType n(size);
    for (int i=0; i<size; ++i)
      n(i) = low+i*step;
    VERIFY_IS_APPROX(m,n);
  }

  VectorType n(size);
  for (int i=0; i<size; ++i)
    n(i) = size==1 ? low : (low + ((high-low)*Scalar(i))/(size-1));
  VERIFY_IS_APPROX(m,n);

  // random access version
  m = VectorType::LinSpaced(size,low,high);
  VERIFY_IS_APPROX(m,n);

  VERIFY( internal::isApprox(m(m.size()-1),high) );
  VERIFY( size==1 || internal::isApprox(m(0),low) );

  // sequential access version
  m = VectorType::LinSpaced(Sequential,size,low,high);
  VERIFY_IS_APPROX(m,n);

  VERIFY( internal::isApprox(m(m.size()-1),high) );
  VERIFY( size==1 || internal::isApprox(m(0),low) );

  // check whether everything works with row and col major vectors
  Matrix<Scalar,Dynamic,1> row_vector(size);
  Matrix<Scalar,1,Dynamic> col_vector(size);
  row_vector.setLinSpaced(size,low,high);
  col_vector.setLinSpaced(size,low,high);
  // when using the extended precision (e.g., FPU) the relative error might exceed 1 bit
  // when computing the squared sum in isApprox, thus the 2x factor.
  VERIFY( row_vector.isApprox(col_vector.transpose(), Scalar(2)*NumTraits<Scalar>::epsilon()));

  Matrix<Scalar,Dynamic,1> size_changer(size+50);
  size_changer.setLinSpaced(size,low,high);
  VERIFY( size_changer.size() == size );

  typedef Matrix<Scalar,1,1> ScalarMatrix;
  ScalarMatrix scalar;
  scalar.setLinSpaced(1,low,high);
  VERIFY_IS_APPROX( scalar, ScalarMatrix::Constant(high) );
  VERIFY_IS_APPROX( ScalarMatrix::LinSpaced(1,low,high), ScalarMatrix::Constant(high) );

  // regression test for bug 526 (linear vectorized transversal)
  if (size > 1) {
    m.tail(size-1).setLinSpaced(low, high);
    VERIFY_IS_APPROX(m(size-1), high);
  }
}
IGL_INLINE ScalarMatrix igl::embree::line_mesh_intersection
(
 const ScalarMatrix & V_source,
 const ScalarMatrix  & N_source,
 const ScalarMatrix & V_target,
 const IndexMatrix  & F_target
)
{

  double tol = 0.00001;

  Eigen::MatrixXd ray_pos = V_source;
  Eigen::MatrixXd ray_dir = N_source;

  // Allocate matrix for the result
  ScalarMatrix R;
  R.resize(V_source.rows(), 3);

  // Initialize embree
  EmbreeIntersector embree;
  embree.init(V_target.template cast<float>(),F_target.template cast<int>());

  // Shoot rays from the source to the target
  for (unsigned i=0; i<ray_pos.rows(); ++i)
  {
    igl::Hit A,B;
    // Shoot ray A
    Eigen::RowVector3d A_pos = ray_pos.row(i) + tol * ray_dir.row(i);
    Eigen::RowVector3d A_dir = -ray_dir.row(i);

    bool A_hit = embree.intersectBeam(A_pos.cast<float>(), A_dir.cast<float>(),A);

    Eigen::RowVector3d B_pos = ray_pos.row(i) - tol * ray_dir.row(i);
    Eigen::RowVector3d B_dir = ray_dir.row(i);

    bool B_hit = embree.intersectBeam(B_pos.cast<float>(), B_dir.cast<float>(),B);


    int choice = -1;

    if (A_hit && ! B_hit)
      choice = 0;
    else if (!A_hit && B_hit)
      choice = 1;
    else if (A_hit && B_hit)
      choice = A.t > B.t;

    Eigen::RowVector3d temp;

    if (choice == -1)
      temp << -1, 0, 0;
    else if (choice == 0)
      temp << A.id, A.u, A.v;
    else if (choice == 1)
      temp << B.id, B.u, B.v;

    R.row(i) = temp;

  }

  return R;

}
示例#7
0
  bool SolverSLAM2DLinear::solveOrientation()
  {
    assert(_optimizer->indexMapping().size() + 1 == _optimizer->vertices().size() && "Needs to operate on full graph");
    assert(_optimizer->vertex(0)->fixed() && "Graph is not fixed by vertex 0");
    VectorXD b, x; // will be used for theta and x/y update
    b.setZero(_optimizer->indexMapping().size());
    x.setZero(_optimizer->indexMapping().size());

    typedef Eigen::Matrix<double, 1, 1, Eigen::ColMajor> ScalarMatrix;

    ScopedArray<int> blockIndeces(new int[_optimizer->indexMapping().size()]);
    for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i)
      blockIndeces[i] = i+1;

    SparseBlockMatrix<ScalarMatrix> H(blockIndeces.get(), blockIndeces.get(), _optimizer->indexMapping().size(), _optimizer->indexMapping().size());

    // building the structure, diagonal for each active vertex
    for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
      OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
      int poseIdx = v->hessianIndex();
      ScalarMatrix* m = H.block(poseIdx, poseIdx, true);
      m->setZero();
    }

    HyperGraph::VertexSet fixedSet;

    // off diagonal for each edge
    for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) {
#    ifndef NDEBUG
      EdgeSE2* e = dynamic_cast<EdgeSE2*>(*it);
      assert(e && "Active edges contain non-odometry edge"); //
#    else
      EdgeSE2* e = static_cast<EdgeSE2*>(*it);
#    endif
      OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
      OptimizableGraph::Vertex* to   = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);

      int ind1 = from->hessianIndex();
      int ind2 = to->hessianIndex();
      if (ind1 == -1 || ind2 == -1) {
        if (ind1 == -1) fixedSet.insert(from); // collect the fixed vertices
        if (ind2 == -1) fixedSet.insert(to);
        continue;
      }

      bool transposedBlock = ind1 > ind2;
      if (transposedBlock){ // make sure, we allocate the upper triangle block
        std::swap(ind1, ind2);
      }

      ScalarMatrix* m = H.block(ind1, ind2, true);
      m->setZero();
    }

    // walk along the Minimal Spanning Tree to compute the guess for the robot orientation
    assert(fixedSet.size() == 1);
    VertexSE2* root = static_cast<VertexSE2*>(*fixedSet.begin());
    VectorXD thetaGuess;
    thetaGuess.setZero(_optimizer->indexMapping().size());
    UniformCostFunction uniformCost;
    HyperDijkstra hyperDijkstra(_optimizer);
    hyperDijkstra.shortestPaths(root, &uniformCost);

    HyperDijkstra::computeTree(hyperDijkstra.adjacencyMap());
    ThetaTreeAction thetaTreeAction(thetaGuess.data());
    HyperDijkstra::visitAdjacencyMap(hyperDijkstra.adjacencyMap(), &thetaTreeAction);

    // construct for the orientation
    for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) {
      EdgeSE2* e = static_cast<EdgeSE2*>(*it);
      VertexSE2* from = static_cast<VertexSE2*>(e->vertices()[0]);
      VertexSE2* to   = static_cast<VertexSE2*>(e->vertices()[1]);

      double omega = e->information()(2,2);

      double fromThetaGuess = from->hessianIndex() < 0 ? 0. : thetaGuess[from->hessianIndex()];
      double toThetaGuess   = to->hessianIndex() < 0 ? 0. : thetaGuess[to->hessianIndex()];
      double error          = normalize_theta(-e->measurement().rotation().angle() + toThetaGuess - fromThetaGuess);

      bool fromNotFixed = !(from->fixed());
      bool toNotFixed   = !(to->fixed());

      if (fromNotFixed || toNotFixed) {
        double omega_r = - omega * error;
        if (fromNotFixed) {
          b(from->hessianIndex()) -= omega_r;
          (*H.block(from->hessianIndex(), from->hessianIndex()))(0,0) += omega;
          if (toNotFixed) {
            if (from->hessianIndex() > to->hessianIndex())
              (*H.block(to->hessianIndex(), from->hessianIndex()))(0,0) -= omega;
            else
              (*H.block(from->hessianIndex(), to->hessianIndex()))(0,0) -= omega;
          }
        } 
        if (toNotFixed ) {
          b(to->hessianIndex()) += omega_r;
          (*H.block(to->hessianIndex(), to->hessianIndex()))(0,0) += omega;
        }
      }
    }

    // solve orientation
    typedef LinearSolverCSparse<ScalarMatrix> SystemSolver;
    SystemSolver linearSystemSolver;
    linearSystemSolver.init();
    bool ok = linearSystemSolver.solve(H, x.data(), b.data());
    if (!ok) {
      cerr << __PRETTY_FUNCTION__ << "Failure while solving linear system" << endl;
      return false;
    }

    // update the orientation of the 2D poses and set translation to 0, GN shall solve that
    root->setToOrigin();
    for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
      VertexSE2* v = static_cast<VertexSE2*>(_optimizer->indexMapping()[i]);
      int poseIdx = v->hessianIndex();
      SE2 poseUpdate(0, 0, normalize_theta(thetaGuess(poseIdx) + x(poseIdx)));
      v->setEstimate(poseUpdate);
    }

    return true;
  }