output fc_rnn::execute(input const& in) { // Set activation of input neurons auto const num_input = in.size(); for(size_t n = 0; n < num_input; ++n) { vInput[n] = in[n]; } // Summation for hidden neurons Eigen::VectorXd vHiddenSums = wmInput * vInput + wmHidden * vHidden; // Transfer function vHidden = evaluate(af_hidden, vHiddenSums.array()); // TODO: Maybe should just store as a single vector? Eigen::VectorXd joined(input_layer_count() + hidden_count()); joined << vInput, vHidden; Eigen::VectorXd vOutputSums = wmOutput * joined; Eigen::VectorXd vOutput = evaluate(af_output, vOutputSums.array()); // Return the output values output out{ output_count() }; std::copy(vOutput.data(), vOutput.data() + output_count(), out.begin()); return out; }
// A function to bounce the MCMC proposal off the hard boundaries. Eigen::VectorXd bouncyBounds(const Eigen::VectorXd& val, const Eigen::VectorXd& min, const Eigen::VectorXd& max) { Eigen::VectorXd delta = max - min; Eigen::VectorXd result = val; Eigen::Matrix<bool, Eigen::Dynamic, 1> tooBig = (val.array() > max.array()); Eigen::Matrix<bool, Eigen::Dynamic, 1> tooSmall = (val.array() < min.array()); for (uint i=0; i< result.size(); i++) { bool big = tooBig(i); bool small = tooSmall(i); if (big) { double overstep = val(i)-max(i); int nSteps = (int)(overstep / delta(i)); double stillToGo = overstep - nSteps*delta(i); if (nSteps % 2 == 0) result(i) = max(i) - stillToGo; else result(i) = min(i) + stillToGo; } if (small) { double understep = min(i) - val(i); int nSteps = (int)(understep / delta(i)); double stillToGo = understep - nSteps*delta(i); if (nSteps % 2 == 0) result(i) = min(i) + stillToGo; else result(i) = max(i) - stillToGo; } } return result; }
void corrClass::initialize(Eigen::VectorXd a, Eigen::VectorXd b){ vecA = a.array(); vecB = b.array(); sumA = vecA.sum(); sumB = vecB.sum(); sumA2 = vecA.square().sum(); sumB2 = vecB.square().sum(); sumAB = (vecA*vecB).sum(); }
double numericalDampingForce(const EnergyCondition<double> &c, const Eigen::VectorXd &uv, const Eigen::VectorXd &x, const Eigen::VectorXd &v, double d, int i, double dx) { // find dC/dt Eigen::VectorXd dCdt = numericalCTimeDerivative(c, x, v, uv, dx); // find dC/dx Eigen::VectorXd dCdx = numericalFirstCDerivative(c, x, uv, i, dx); // fd = -d * sum( i, dC_i/dx * dC_i/dt ) return -d * (dCdx.array() * dCdt.array()).sum(); }
int main(int argc, char *argv[]) { using namespace Eigen; using namespace std; cout<<"Usage:"<<endl; cout<<"[space] toggle showing input mesh, output mesh or slice "<<endl; cout<<" through tet-mesh of convex hull."<<endl; cout<<"'.'/',' push back/pull forward slicing plane."<<endl; cout<<endl; // Load mesh: (V,T) tet-mesh of convex hull, F contains facets of input // surface mesh _after_ self-intersection resolution igl::readMESH(TUTORIAL_SHARED_PATH "/big-sigcat.mesh",V,T,F); // Compute barycenters of all tets igl::barycenter(V,T,BC); // Compute generalized winding number at all barycenters cout<<"Computing winding number over all "<<T.rows()<<" tets..."<<endl; igl::winding_number(V,F,BC,W); // Extract interior tets MatrixXi CT((W.array()>0.5).count(),4); { size_t k = 0; for(size_t t = 0;t<T.rows();t++) { if(W(t)>0.5) { CT.row(k) = T.row(t); k++; } } } // find bounary facets of interior tets igl::boundary_facets(CT,G); // boundary_facets seems to be reversed... G = G.rowwise().reverse().eval(); // normalize W = (W.array() - W.minCoeff())/(W.maxCoeff()-W.minCoeff()); // Plot the generated mesh igl::opengl::glfw::Viewer viewer; update_visualization(viewer); viewer.callback_key_down = &key_down; viewer.launch(); }
std::vector<double> ClassifyMotion::classify_motion(const Eigen::MatrixXd &motion ) { double realmin = numeric_limits<double>::min(); std::vector<double> result( m_nb_classes ); std::vector<Eigen::MatrixXd> Pxi( m_nb_classes ); // Compute probability of each class for(int i=0;i<m_nb_classes;i++) { Pxi[i].setZero( motion.cols(), m_nb_states ); for(int j=0;j<m_nb_states;j++) { //Compute the new probability p(x|i) Pxi[i].col(j) = gauss_pdf( motion, i, j ); } // Compute the log likelihood of the class Eigen::VectorXd F = Pxi[i]*m_priors[i]; for(int k=0;k<F.size();k++) if( F(k) < realmin || std::isnan(F(k)) ) F(k) = realmin; // for(int k=0;k<F.size();k++) // if( F(k) > 1 ) // cout << "F(" << k << ") : " << F(k) << endl; result[i] = F.array().log().sum()/F.size(); } return result; }
Eigen::VectorXd TrajectoryThread::varianceToWeights(Eigen::VectorXd& desiredVariance, const double beta) { desiredVariance /= maximumVariance; desiredVariance = desiredVariance.array().min(varianceThresh); //limit desiredVariance to 0.99 maximum Eigen::VectorXd desiredWeights = (Eigen::VectorXd::Ones(desiredVariance.rows()) - desiredVariance) / beta; return desiredWeights; }
void KukaRMControllerRTNET::estimateF(Eigen::VectorXd& F) { for(unsigned int i=0;i<LWRDOF;i++) { m_bufSum(i) = m_bufSum(i)-m_FTab(i,m_Fidx); m_FTab(i,m_Fidx) = F(i); m_bufSum(i) = m_bufSum(i)+F(i); } m_Fidx = m_Fidx+1; if(m_Fidx>m_delta-1){ m_Fidx = 0; m_FBoucle = true; } if(m_FBoucle){ F = m_bufSum/m_delta; } else{ F = m_bufSum/m_Fidx; } for(unsigned int i=0;i<LWRDOF;i++) { if((F.array().abs())(i)>m_FSat) { if(F(i)>0.0){ F(i) = m_FSat; } else{ F(i) = -m_FSat; } } } }
Mat Mat::inv() { if (nrow() != ncol()) throw runtime_error("Mat::inv() error: only symmetric positive definite matrices can be inverted with Mat::inv()"); if (mattype == MatType::DIAGONAL) { Eigen::VectorXd diag = matrix.diagonal(); diag = 1.0 / diag.array(); vector<Eigen::Triplet<double>> triplet_list; for (int i = 0; i != diag.size(); ++i) { triplet_list.push_back(Eigen::Triplet<double>(i, i, diag[i])); } Eigen::SparseMatrix<double> inv_mat(triplet_list.size(),triplet_list.size()); inv_mat.setZero(); inv_mat.setFromTriplets(triplet_list.begin(), triplet_list.end()); return Mat(row_names, col_names, inv_mat); } //Eigen::ConjugateGradient<Eigen::SparseMatrix<double>> solver; Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver; solver.compute(matrix); Eigen::SparseMatrix<double> I(nrow(), nrow()); I.setIdentity(); Eigen::SparseMatrix<double> inv_mat = solver.solve(I); return Mat(row_names, col_names, inv_mat); }
void gaussian_process::evaluate( const Eigen::MatrixXd& domains, Eigen::VectorXd& means, Eigen::VectorXd& variances ) const { if( domains.cols() != domains_.cols() ) { COMMA_THROW( comma::exception, "expected " << domains_.cols() << " column(s) in domains, got " << domains.cols() << std::endl ); } Eigen::MatrixXd Kxsx = Eigen::MatrixXd::Zero( domains.rows(), domains_.rows() ); for( std::size_t r = 0; r < std::size_t( domains.rows() ); ++r ) { const Eigen::VectorXd& row = domains.row( r ); for( std::size_t c = 0; c < std::size_t( domains_.rows() ); ++c ) { Kxsx( r, c ) = covariance_( row, domains_.row( c ) ); } } means = Kxsx * alpha_; means.array() += offset_; Eigen::MatrixXd Kxxs = Kxsx.transpose(); L_.matrixL().solveInPlace( Kxxs ); Eigen::MatrixXd& variance = Kxxs; variance = variance.array() * variance.array(); variances = variance.colwise().sum(); // for each diagonal variance, set v(r) = -v(r,r) + Kxsxs for( std::size_t r = 0; r < std::size_t( domains.rows() ); ++r ) { variances( r ) = -variances( r ) + self_covariance_; } }
void KukaRMControllerRTNET::nextDesVal() { Eigen::VectorXd period = m_freq*m_time; Eigen::VectorXd amp = Eigen::VectorXd::Zero(LWRDOF); if(m_time<1.0){ amp = m_amp*m_time; } else{ amp = m_amp; } for(unsigned int i=0;i<LWRDOF;i++) { m_q_des(i) = amp(i)*(period.array().sin())(i)+m_bias(i); m_qp_des(i) = amp(i)*m_freq(i)*(period.array().cos())(i); m_qpp_des(i) = -amp(i)*m_freq(i)*m_freq(i)*(period.array().sin())(i); } }
TimeIntegrator::TimeIntegrator(Eigen::VectorXd &napp) { _napp = napp; // Determine nstep, dt, F _nstep = 10; _dt = 1.0/_nstep; Eigen::VectorXd x; x.setLinSpaced(_nstep,0.0,60.0); F1=200*x.array().sin(); }
Eigen::VectorXd Shrinkage ( const Eigen::VectorXd& vec, const double kappa ) const { Eigen::ArrayXd zero_vec(vec.size()); zero_vec.setZero(); return zero_vec.max( vec.array() - kappa) - zero_vec.max(-vec.array() - kappa); }
double polyfit(size_t n, size_t deg, const double* xp, const double* yp, const double* wp, double* pp) { ConstMappedVector x(xp, n); Eigen::VectorXd y = ConstMappedVector(yp, n); MappedVector p(pp, deg+1); if (deg >= n) { throw CanteraError("polyfit", "Polynomial degree ({}) must be less " "than number of input data points ({})", deg, n); } // Construct A such that each row i of A has the elements // 1, x[i], x[i]^2, x[i]^3 ... + x[i]^deg Eigen::MatrixXd A(n, deg+1); A.col(0).setConstant(1.0); if (deg > 0) { A.col(1) = x; } for (size_t i = 1; i < deg; i++) { A.col(i+1) = A.col(i).array() * x.array(); } if (wp != nullptr && wp[0] > 0) { // For compatibility with old Fortran dpolft, input weights are the // squares of the weight vector used in this algorithm Eigen::VectorXd w = ConstMappedVector(wp, n).cwiseSqrt().eval(); // Multiply by the weights on both sides A = w.asDiagonal() * A; y.array() *= w.array(); } // Solve W*A*p = W*y to find the polynomial coefficients p = A.colPivHouseholderQr().solve(y); // Evaluate the computed polynomial at the input x coordinates to compute // the RMS error as the return value return (A*p - y).eval().norm() / sqrt(n); }
normal_fullrank operator/=(const normal_fullrank& rhs) { static const char* function = "stan::variational::normal_fullrank::operator/="; stan::math::check_size_match(function, "Dimension of lhs", dimension_, "Dimension of rhs", rhs.dimension()); mu_.array() /= rhs.mu().array(); L_chol_.array() /= rhs.L_chol().array(); return *this; }
void NormalVarianceMixtureBaseError::gradient(const int i, const Eigen::VectorXd& res) { counter++; Eigen::VectorXd res_ = res; Eigen::VectorXd iV = Vs[i].cwiseInverse(); //res_.array() *= iV.array(); dsigma += - res.size()/sigma + (res_.array().square()*iV.array()).sum() / pow(sigma, 3); // Expected fisher infromation // res.size()/pow(sigma, 2) - 3 * E[res.array().square().sum()] /pow(sigma, 4); ddsigma += - 2 * res.size()/pow(sigma, 2); }
IGL_INLINE bool igl::linprog( const Eigen::VectorXd & f, const Eigen::MatrixXd & A, const Eigen::VectorXd & b, const Eigen::MatrixXd & B, const Eigen::VectorXd & c, Eigen::VectorXd & x) { using namespace Eigen; using namespace std; const int m = A.rows(); const int n = A.cols(); const int p = B.rows(); MatrixXd Im = MatrixXd::Identity(m,m); MatrixXd AS(m,n+m); AS<<A,Im; MatrixXd bS = b.array().abs(); for(int i = 0;i<m;i++) { const auto & sign = [](double x)->double { return (x<0?-1:(x>0?1:0)); }; AS.row(i) *= sign(b(i)); } MatrixXd In = MatrixXd::Identity(n,n); MatrixXd P(n+m,2*n+m); P<< In, -In, MatrixXd::Zero(n,m), MatrixXd::Zero(m,2*n), Im; MatrixXd ASP = AS*P; MatrixXd BSP(0,2*n+m); if(p>0) { MatrixXd BS(p,2*n); BS<<B,MatrixXd::Zero(p,n); BSP = BS*P; } VectorXd fSP = VectorXd::Ones(2*n+m); fSP.head(2*n) = P.block(0,0,n,2*n).transpose()*f; const VectorXd & cc = fSP; MatrixXd AA(m+p,2*n+m); AA<<ASP,BSP; VectorXd bb(m+p); bb<<bS,c; VectorXd xxs; bool ret = linprog(cc,AA,bb,0,xxs); x = P.block(0,0,n,2*n+m)*xxs; return ret; }
void Mat::inv_ip(Logger *log) { if (nrow() != ncol()) throw runtime_error("Mat::inv() error: only symmetric positive definite matrices can be inverted with Mat::inv()"); if (mattype == MatType::DIAGONAL) { log->log("inverting diagonal matrix in place"); log->log("extracting diagonal"); Eigen::VectorXd diag = matrix.diagonal(); log->log("inverting diagonal"); diag = 1.0 / diag.array(); log->log("buidling triplets"); vector<Eigen::Triplet<double>> triplet_list; for (int i = 0; i != diag.size(); ++i) { triplet_list.push_back(Eigen::Triplet<double>(i, i, diag[i])); } log->log("resizeing matrix to size " + triplet_list.size()); matrix.resize(triplet_list.size(), triplet_list.size()); matrix.setZero(); log->log("setting matrix from triplets"); matrix.setFromTriplets(triplet_list.begin(),triplet_list.end()); //Eigen::SparseMatrix<double> inv_mat(triplet_list.size(), triplet_list.size()); //inv_mat.setZero(); //inv_mat.setFromTriplets(triplet_list.begin(), triplet_list.end()); //matrix = inv_mat; //cout << "diagonal inv_ip()" << endl; /*matrix.resize(triplet_list.size(), triplet_list.size()); matrix.setZero(); matrix.setFromTriplets(triplet_list.begin(),triplet_list.end());*/ return; } //Eigen::ConjugateGradient<Eigen::SparseMatrix<double>> solver; log->log("inverting non-diagonal matrix in place"); log->log("instantiate solver"); Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver; log->log("computing inverse"); solver.compute(matrix); log->log("getting identity instance for solution"); Eigen::SparseMatrix<double> I(nrow(), nrow()); I.setIdentity(); //Eigen::SparseMatrix<double> inv_mat = solver.solve(I); //matrix = inv_mat; log->log("solving"); matrix = solver.solve(I); //cout << "full inv_ip()" << endl; /*matrix.setZero(); matrix = solver.solve(I);*/ }
void checkPseudoDerivatives(const Eigen::SparseMatrix<double> &dampingPseudoDerivatives, const EnergyCondition<double> &c, const Eigen::VectorXd &uv, Eigen::VectorXd &x, Eigen::VectorXd &v, double k, double d, double dx, double tol) { Eigen::VectorXd dCdt = numericalCTimeDerivative(c, x, v, uv, dx); for (int i = 0; i < x.size(); ++i) { for (int j = 0; j < x.size(); ++j) { Eigen::VectorXd d2Cdidj = numericalSecondCDerivative(c, x, uv, i, j, dx); double expectedCoeff = -d * (d2Cdidj.array() * dCdt.array()).sum(); double actualCoeff = dampingPseudoDerivatives.coeff(i, j); Microsoft::VisualStudio::CppUnitTestFramework::Assert::AreEqual(actualCoeff, expectedCoeff, tol); } } }
void IGMeasurementError::gradient(const int i, const Eigen::VectorXd& res) { NormalVarianceMixtureBaseError::gradient(i, res); Eigen::VectorXd iV = Vs[i].cwiseInverse(); if(common_V == 0){ double logV = Vs[i].array().log().sum(); dnu += res.size() * (1. + log(nu) - digamma_nu) - logV - iV.array().sum() ; ddnu += res.size() * (1/ nu - trigamma_nu); }else{ double logV = log(Vs[i][0]); dnu += (1. + log(nu) - digamma_nu) - logV - iV[0] ; ddnu += (1/ nu - trigamma_nu); } }
Eigen::VectorXd NormalVarianceMixtureBaseError::simulate(const Eigen::VectorXd & Y) { Eigen::VectorXd residual = sigma * (Rcpp::as< Eigen::VectorXd >(Rcpp::rnorm( Y.size()) )); if(common_V == 0){ for(int ii = 0; ii < residual.size(); ii++) { double V = simulate_V(); residual[ii] *= sqrt(V); } }else{ double V = simulate_V(); residual.array() *= sqrt(V); } return(residual); }
void NormalVarianceMixtureBaseError::sampleV(const int i, const Eigen::VectorXd& res, int n_s ) { if(n_s == -1) n_s = Vs[i].size(); if(common_V == 0){ for(int j = 0; j < n_s; j++){ double res2 = pow(res[j]/sigma, 2); Vs[i][j] = sample_V(res2, -1); } } else { double tmp = res.array().square().sum()/pow(sigma, 2); double cv = sample_V(tmp, n_s); for(int j = 0; j < n_s; j++) Vs[i][j] = cv; } }
void do_residuals_stats(const Eigen::VectorXd & r, Eigen::VectorXd &stats, std::string & file_hdr) { file_hdr = "% MAX_ABS_ERR MIN_ABS_ERR AVERAGE_ERR STD_DEV RMSE MEDIAN\n"; const size_t N = r.size(); stats.resize(6); if (!N) return; stats[0] = r.maxCoeff(); stats[1] = r.minCoeff(); mrpt::math::meanAndStd(r, stats[2], stats[3]); // RMSE: stats[4] = std::sqrt( r.array().square().sum() / N ); std::vector<double> v(N); for (size_t i=0;i<N;i++) v[i]=r[i]; nth_element(v.begin(), v.begin()+(N/2), v.end()); stats[5] = v[N/2]; }
Eigen::MatrixXd sqExp(const Eigen::MatrixXd &x1, const Eigen::MatrixXd &x2, const Eigen::VectorXd &lengthScale, bool noisy) { // assert(x1.rows() == x2.rows()) int n1 = x1.cols(), n2 = x2.cols(); // Compute the weighted square distances Eigen::VectorXd w = (lengthScale.array().square().cwiseInverse()).matrix(); Eigen::MatrixXd xx1 = w.replicate(1, n1).cwiseProduct(x1).cwiseProduct(x1).colwise().sum(); Eigen::MatrixXd xx2 = w.replicate(1, n2).cwiseProduct(x2).cwiseProduct(x2).colwise().sum(); Eigen::MatrixXd x1x2 = w.replicate(1, n1).cwiseProduct(x1).transpose() * x2; // Compute the covariance matrix Eigen::MatrixXd K = (-0.5 * Eigen::MatrixXd::Zero(n1, n2).cwiseMax( xx1.transpose().replicate(1, n2) + xx2.replicate(n1, 1) - 2 * x1x2)).array().exp(); if (noisy) { K += K.colwise().sum().asDiagonal(); } return K; }
TEST(qslim,cylinder) { using namespace igl; const int axis_devisions = 5; const int height_devisions = 2+10; Eigen::MatrixXd V; Eigen::MatrixXi F; cylinder(axis_devisions,height_devisions,V,F); Eigen::MatrixXd U; Eigen::MatrixXi G; Eigen::VectorXi I,J; qslim(V,F,2*axis_devisions,U,G,I,J); ASSERT_EQ(axis_devisions*2,U.rows()); double l,u; igl::writePLY("qslim-cylinder-vf.ply",V,F); igl::writePLY("qslim-cylinder-ug.ply",U,G); const auto & hausdorff_lower_bound = []( const Eigen::MatrixXd & V, const Eigen::MatrixXi & F, Eigen::MatrixXd & U, Eigen::MatrixXi & G)->double { Eigen::MatrixXd Vk; Eigen::MatrixXi Fk; igl::upsample(V,F,Vk,Fk,5); Eigen::MatrixXd C; Eigen::VectorXi I; Eigen::VectorXd D; igl::point_mesh_squared_distance(Vk,U,G,D,I,C); return D.array().sqrt().maxCoeff(); }; //igl::hausdorff(V,F,U,G,1e-14,l,u); ASSERT_NEAR(hausdorff_lower_bound(V,F,U,G),0,2e-10); //igl::hausdorff(U,G,V,F,1e-14,l,u); ASSERT_NEAR(hausdorff_lower_bound(U,G,V,F),0,2e-10); }
IGL_INLINE double igl::polyvector_field_poisson_reconstruction( const Eigen::PlainObjectBase<DerivedV> &Vcut, const Eigen::PlainObjectBase<DerivedF> &Fcut, const Eigen::PlainObjectBase<DerivedS> &sol3D_combed, Eigen::PlainObjectBase<DerivedSF> &scalars, Eigen::PlainObjectBase<DerivedS> &sol3D_recon, Eigen::PlainObjectBase<DerivedE> &max_error ) { Eigen::SparseMatrix<typename DerivedV::Scalar> gradMatrix; igl::grad(Vcut, Fcut, gradMatrix); Eigen::VectorXd FAreas; igl::doublearea(Vcut, Fcut, FAreas); FAreas = FAreas.array() * .5; int nf = FAreas.rows(); Eigen::SparseMatrix<typename DerivedV::Scalar> M,M1; Eigen::VectorXi II = igl::colon<int>(0, nf-1); igl::sparse(II, II, FAreas, M1); igl::repdiag(M1, 3, M) ; int half_degree = sol3D_combed.cols()/3; sol3D_recon.setZero(sol3D_combed.rows(),sol3D_combed.cols()); int numF = Fcut.rows(); scalars.setZero(Vcut.rows(),half_degree); Eigen::SparseMatrix<typename DerivedV::Scalar> Q = gradMatrix.transpose()* M *gradMatrix; //fix one point at Ik=fix, value at fixed xk=0 int fix = 0; Eigen::VectorXi Ik(1);Ik<<fix; Eigen::VectorXd xk(1);xk<<0; //unknown indices Eigen::VectorXi Iu(Vcut.rows()-1,1); Iu<<igl::colon<int>(0, fix-1), igl::colon<int>(fix+1,Vcut.rows()-1); Eigen::SparseMatrix<typename DerivedV::Scalar> Quu, Quk; igl::slice(Q, Iu, Iu, Quu); igl::slice(Q, Iu, Ik, Quk); Eigen::SimplicialLDLT<Eigen::SparseMatrix<typename DerivedV::Scalar> > solver; solver.compute(Quu); Eigen::VectorXd vec; vec.setZero(3*numF,1); for (int i =0; i<half_degree; ++i) { vec<<sol3D_combed.col(i*3+0),sol3D_combed.col(i*3+1),sol3D_combed.col(i*3+2); Eigen::VectorXd b = gradMatrix.transpose()* M * vec; Eigen::VectorXd bu = igl::slice(b, Iu); Eigen::VectorXd rhs = bu-Quk*xk; Eigen::MatrixXd yu = solver.solve(rhs); Eigen::VectorXi index = i*Eigen::VectorXi::Ones(Iu.rows(),1); igl::slice_into(yu, Iu, index, scalars);scalars(Ik[0],i)=xk[0]; } // evaluate gradient of found scalar function for (int i =0; i<half_degree; ++i) { Eigen::VectorXd vec_poisson = gradMatrix*scalars.col(i); sol3D_recon.col(i*3+0) = vec_poisson.segment(0*numF, numF); sol3D_recon.col(i*3+1) = vec_poisson.segment(1*numF, numF); sol3D_recon.col(i*3+2) = vec_poisson.segment(2*numF, numF); } max_error.setZero(numF,1); for (int i =0; i<half_degree; ++i) { Eigen::VectorXd diff = (sol3D_recon.block(0, i*3, numF, 3)-sol3D_combed.block(0, i*3, numF, 3)).rowwise().norm(); diff = diff.array() / sol3D_combed.block(0, i*3, numF, 3).rowwise().norm().array(); max_error = max_error.cwiseMax(diff.cast<typename DerivedE::Scalar>()); } return max_error.mean(); }
bool insupport(const Uniform &d, const Eigen::VectorXd &x) { // Check that all dimensions are within the bounds of the distribution. return ((d.min().array() < x.array()) && (x.array() < d.max().array())).all(); }
double LogSumExp(const Eigen::VectorXd& x) { const double x_max = x.maxCoeff(); return log((x.array() - x_max).exp().sum()) + x_max; };
double SumExp(const Eigen::VectorXd& x) { const double x_max = x.maxCoeff(); return (x.array() - x_max).exp().sum() * exp(x_max); };
void test_schur_dense_vs_sparse( const TGraphInitRandom *init_random, const TGraphInitManual *init_manual, const double lambda = 1e3 ) { // A linear system object holds the sparse Jacobians for a set of observations. my_rba_t::rba_problem_state_t::TLinearSystem lin_system; size_t nUnknowns_k2k=0, nUnknowns_k2f=0; if (init_random) { randomGenerator.randomize(init_random->random_seed); nUnknowns_k2k=init_random->nUnknowns_k2k; nUnknowns_k2f=init_random->nUnknowns_k2f; } else { nUnknowns_k2k=init_manual->nUnknowns_k2k; nUnknowns_k2f=init_manual->nUnknowns_k2f; } // Fill example Jacobians for the test: // * 2 keyframes -> 1 k2k edge (edges=unknowns) // * 6 features with unknown positions. // * 6*2 observations: each feature seen once from each keyframe // Note: 6*2*2 = 24 is the minimum >= 1*6+3*6=24 unknowns so // Hessians are invertible // ----------------------------------------------------------------- // Create observations: // Don't populate the symbolic structure, just the numeric part. static char valid_true = 1; // Just to initialize valid bit pointers to this one. { lin_system.dh_dAp.setColCount(nUnknowns_k2k); lin_system.dh_df.setColCount(nUnknowns_k2f); size_t idx_obs = 0; for (size_t nKF=0;nKF<=nUnknowns_k2k;nKF++) { my_rba_t::jacobian_traits_t::TSparseBlocksJacobians_dh_dAp::col_t * dh_dAp_i = (nKF==0) ? NULL : (&lin_system.dh_dAp.getCol(nKF-1)); for (size_t nLM=0;nLM<nUnknowns_k2f;nLM++) { // Do we have an observation of feature "nLM" from keyframe "nKF"?? bool add_this; if (init_random) add_this = (randomGenerator.drawUniform(0.0,1.0)<=init_random->PROB_OBS); else add_this = init_manual->visible[nKF*nUnknowns_k2f + nLM]; if (add_this) { // Create observation: KF[nKF] -> LM[nLM] my_rba_t::jacobian_traits_t::TSparseBlocksJacobians_dh_df::col_t & dh_df_j = lin_system.dh_df.getCol(nLM); // Random is ok for this test: if (dh_dAp_i) { randomGenerator.drawGaussian1DMatrix( (*dh_dAp_i)[idx_obs].num ); (*dh_dAp_i)[idx_obs].sym.is_valid = &valid_true; } randomGenerator.drawGaussian1DMatrix( dh_df_j[idx_obs].num.setRandom() ); dh_df_j[idx_obs].sym.is_valid = &valid_true; idx_obs++; } } } // Debug point: //if ( idx_obs != (1+nUnknowns_k2k)*nUnknowns_k2f ) cout << "#k2k: " << nUnknowns_k2k << " #k2f: " << nUnknowns_k2f << " #obs: " << idx_obs << " out of max.=" << (1+nUnknowns_k2k)*nUnknowns_k2f << endl; } // A default gradient: Eigen::VectorXd minus_grad; // The negative of the gradient. const size_t idx_start_f = 6*nUnknowns_k2k; const size_t nLMs_scalars = 3*nUnknowns_k2f; const size_t nUnknowns_scalars = idx_start_f + nLMs_scalars; minus_grad.resize(nUnknowns_scalars); minus_grad.setOnes(); #if 0 lin_system.dh_dAp.saveToTextFileAsDense("test_dh_dAp.txt"); lin_system.dh_df.saveToTextFileAsDense("test_dh_df.txt"); #endif // ------------------------------------------------------------ // 1st) Evaluate Schur naively with dense matrices // ------------------------------------------------------------ CMatrixDouble dense_dh_dAp, dense_dh_df; lin_system.dh_dAp.getAsDense(dense_dh_dAp); lin_system.dh_df.getAsDense (dense_dh_df); const CMatrixDouble dense_HAp = dense_dh_dAp.transpose() * dense_dh_dAp; const CMatrixDouble dense_Hf = dense_dh_df.transpose() * dense_dh_df; const CMatrixDouble dense_HApf = dense_dh_dAp.transpose() * dense_dh_df; CMatrixDouble dense_Hf_plus_lambda = dense_Hf; for (size_t i=0;i<nLMs_scalars;i++) dense_Hf_plus_lambda(i,i)+=lambda; // Schur: naive dense computation: const CMatrixDouble dense_HAp_schur = dense_HAp - dense_HApf*dense_Hf_plus_lambda.inv()*dense_HApf.transpose(); const Eigen::VectorXd dense_minus_grad_schur = minus_grad.head(idx_start_f) - dense_HApf*(dense_Hf_plus_lambda.inv())*minus_grad.tail(nLMs_scalars); #if 0 dense_HAp.saveToTextFile("dense_HAp.txt"); dense_Hf.saveToTextFile("dense_Hf.txt"); dense_HApf.saveToTextFile("dense_HApf.txt"); #endif // ------------------------------------------------------------ // 2nd) Evaluate using sparse Schur implementation // ------------------------------------------------------------ // Build a list with ALL the unknowns: vector<my_rba_t::jacobian_traits_t::TSparseBlocksJacobians_dh_dAp::col_t*> dh_dAp; vector<my_rba_t::jacobian_traits_t::TSparseBlocksJacobians_dh_df::col_t*> dh_df; for (size_t i=0;i<lin_system.dh_dAp.getColCount();i++) dh_dAp.push_back( & lin_system.dh_dAp.getCol(i) ); for (size_t i=0;i<lin_system.dh_df.getColCount();i++) dh_df.push_back( & lin_system.dh_df.getCol(i) ); #if 0 { CMatrixDouble Jbin; lin_system.dh_dAp.getBinaryBlocksRepresentation(Jbin); Jbin.saveToTextFile("test_sparse_jacobs_blocks.txt", mrpt::math::MATRIX_FORMAT_INT ); lin_system.dh_dAp.saveToTextFileAsDense("test_sparse_jacobs.txt"); } #endif my_rba_t::hessian_traits_t::TSparseBlocksHessian_Ap HAp; my_rba_t::hessian_traits_t::TSparseBlocksHessian_f Hf; my_rba_t::hessian_traits_t::TSparseBlocksHessian_Apf HApf; // This one stores in row-compressed form (i.e. it's stored transposed!!!) // This resizes and fills in the structs HAp,Hf,HApf from Jacobians: my_rba_t::sparse_hessian_build_symbolic( HAp,Hf,HApf, dh_dAp,dh_df ); my_rba_t rba; rba.sparse_hessian_update_numeric(HAp); rba.sparse_hessian_update_numeric(Hf); rba.sparse_hessian_update_numeric(HApf); #if 0 HAp.saveToTextFileAsDense("HAp.txt", true, true ); Hf.saveToTextFileAsDense("Hf.txt", true, true); HApf.saveToTextFileAsDense("HApf.txt",false, false); minus_grad.saveToTextFile("minus_grad_Ap.txt"); { ofstream f("lambda.txt"); f << lambda << endl; } #endif // The constructor builds the symbolic Schur. SchurComplement< my_rba_t::hessian_traits_t::TSparseBlocksHessian_Ap, my_rba_t::hessian_traits_t::TSparseBlocksHessian_f, my_rba_t::hessian_traits_t::TSparseBlocksHessian_Apf > schur_compl( HAp,Hf,HApf, // The different symbolic/numeric Hessian &minus_grad[0], // minus gradient of the Ap part &minus_grad[idx_start_f] // minus gradient of the features part ); schur_compl.numeric_build_reduced_system(lambda); //cout << "getNumFeaturesFullRank: " << schur_compl.getNumFeaturesFullRank() << endl; // ------------------------------------------------------------ // 3rd) Both must match! // ------------------------------------------------------------ // * HAp: Holds the updated Schur complement Hessian. // * minus_grad: Holds the updated Schur complement -gradient. CMatrixDouble final_HAp_schur; HAp.getAsDense(final_HAp_schur, true /* force symmetry, i.e. replicate the half matrix not stored in sparse form */ ); #if 0 final_HAp_schur.saveToTextFile("schur_HAp.txt"); #endif EXPECT_NEAR( (dense_HAp_schur-final_HAp_schur).array().abs().maxCoeff()/(dense_HAp_schur.array().abs().maxCoeff()),0, 1e-10) << "nUnknowns_k2k=" << nUnknowns_k2k << endl << "nUnknowns_k2f=" << nUnknowns_k2f << endl << "final_HAp_schur:\n" << final_HAp_schur << endl << "dense_HAp_schur:\n" << dense_HAp_schur << endl ; const Eigen::VectorXd final_minus_grad_Ap = minus_grad.head(idx_start_f); const double Ap_minus_grad_Ap_max_error = (dense_minus_grad_schur-final_minus_grad_Ap).array().abs().maxCoeff(); EXPECT_NEAR( Ap_minus_grad_Ap_max_error/dense_minus_grad_schur.array().abs().maxCoeff(),0, 1e-10) << "nUnknowns_k2k=" << nUnknowns_k2k << endl << "nUnknowns_k2f=" << nUnknowns_k2f << endl //<< "dense_minus_grad_schur:\n" << dense_minus_grad_schur //<< "final_minus_grad_Ap:\n" << final_minus_grad_Ap << endl ; #if 0 HAp.saveToTextFileAsDense("test_HAp_sparse_schur.txt"); dense_HAp_schur.saveToTextFile("test_HAp_sparse_schur2.txt"); #endif }