IGL_INLINE void igl::slice_into( const Eigen::PlainObjectBase<DerivedX> & X, const Eigen::Matrix<int,Eigen::Dynamic,1> & R, const Eigen::Matrix<int,Eigen::Dynamic,1> & C, Eigen::PlainObjectBase<DerivedX> & Y) { int xm = X.rows(); int xn = X.cols(); assert(R.size() == xm); assert(C.size() == xn); #ifndef NDEBUG int ym = Y.size(); int yn = Y.size(); assert(R.minCoeff() >= 0); assert(R.maxCoeff() < ym); assert(C.minCoeff() >= 0); assert(C.maxCoeff() < yn); #endif // Build reindexing maps for columns and rows, -1 means not in map Eigen::Matrix<int,Eigen::Dynamic,1> RI; RI.resize(xm); for(int i = 0;i<xm;i++) { for(int j = 0;j<xn;j++) { Y(R(i),C(j)) = X(i,j); } } }
IGL_INLINE void igl::slice_into( const Eigen::SparseMatrix<T>& X, const Eigen::Matrix<int,Eigen::Dynamic,1> & R, const Eigen::Matrix<int,Eigen::Dynamic,1> & C, Eigen::SparseMatrix<T>& Y) { int xm = X.rows(); int xn = X.cols(); assert(R.size() == xm); assert(C.size() == xn); #ifndef NDEBUG int ym = Y.size(); int yn = Y.size(); assert(R.minCoeff() >= 0); assert(R.maxCoeff() < ym); assert(C.minCoeff() >= 0); assert(C.maxCoeff() < yn); #endif // create temporary dynamic sparse matrix Eigen::DynamicSparseMatrix<T, Eigen::RowMajor> dyn_Y(Y); // Iterate over outside for(int k=0; k<X.outerSize(); ++k) { // Iterate over inside for(typename Eigen::SparseMatrix<T>::InnerIterator it (X,k); it; ++it) { dyn_Y.coeffRef(R(it.row()),C(it.col())) = it.value(); } } Y = Eigen::SparseMatrix<T>(dyn_Y); }
void rootBounds( double &lb, double &ub ) { Eigen::Matrix<double,deg,1> mycoef = coef.tail(deg).array().abs(); mycoef /= fabs(coef(0)); mycoef(0) += 1.; ub = mycoef.maxCoeff(); lb = -ub; }
TEST_F(MaterialLibSolidsKelvinVector6, DeviatoricSphericalProjections) { auto const& P_dev = Invariants<size>::deviatoric_projection; auto const& P_sph = Invariants<size>::spherical_projection; // Test product P_dev * P_sph is zero. Eigen::Matrix<double, 6, 6> const P_dev_P_sph = P_dev * P_sph; EXPECT_LT(P_dev_P_sph.norm(), std::numeric_limits<double>::epsilon()); EXPECT_LT(P_dev_P_sph.maxCoeff(), std::numeric_limits<double>::epsilon()); // Test product P_sph * P_dev is zero. Eigen::Matrix<double, 6, 6> const P_sph_P_dev = P_sph * P_dev; EXPECT_LT(P_sph_P_dev.norm(), std::numeric_limits<double>::epsilon()); EXPECT_LT(P_sph_P_dev.maxCoeff(), std::numeric_limits<double>::epsilon()); // Test sum is identity. EXPECT_EQ(P_dev + P_sph, (Eigen::Matrix<double, size, size>::Identity())); }
inline Eigen::Matrix<T, Eigen::Dynamic, 1> softmax(const Eigen::Matrix<T, Eigen::Dynamic, 1>& v) { using std::exp; check_nonzero_size("softmax", "v", v); Eigen::Matrix<T, Eigen::Dynamic, 1> theta(v.size()); T sum(0.0); T max_v = v.maxCoeff(); for (int i = 0; i < v.size(); ++i) { theta(i) = exp(v(i) - max_v); // extra work for (v[i] == max_v) sum += theta(i); // extra work vs. sum() w. auto-diff } for (int i = 0; i < v.size(); ++i) theta(i) /= sum; return theta; }
inline T max(const Eigen::Matrix<T, R, C>& m) { if (m.size() == 0) return -std::numeric_limits<double>::infinity(); return m.maxCoeff(); }
IGL_INLINE void igl::slice( const Eigen::SparseMatrix<TX>& X, const Eigen::Matrix<int,Eigen::Dynamic,1> & R, const Eigen::Matrix<int,Eigen::Dynamic,1> & C, Eigen::SparseMatrix<TY>& Y) { #if 1 int xm = X.rows(); int xn = X.cols(); int ym = R.size(); int yn = C.size(); // special case when R or C is empty if(ym == 0 || yn == 0) { Y.resize(ym,yn); return; } assert(R.minCoeff() >= 0); assert(R.maxCoeff() < xm); assert(C.minCoeff() >= 0); assert(C.maxCoeff() < xn); // Build reindexing maps for columns and rows, -1 means not in map std::vector<std::vector<int> > RI; RI.resize(xm); for(int i = 0;i<ym;i++) { RI[R(i)].push_back(i); } std::vector<std::vector<int> > CI; CI.resize(xn); // initialize to -1 for(int i = 0;i<yn;i++) { CI[C(i)].push_back(i); } // Resize output Eigen::DynamicSparseMatrix<TY, Eigen::RowMajor> dyn_Y(ym,yn); // Take a guess at the number of nonzeros (this assumes uniform distribution // not banded or heavily diagonal) dyn_Y.reserve((X.nonZeros()/(X.rows()*X.cols())) * (ym*yn)); // Iterate over outside for(int k=0; k<X.outerSize(); ++k) { // Iterate over inside for(typename Eigen::SparseMatrix<TX>::InnerIterator it (X,k); it; ++it) { std::vector<int>::iterator rit, cit; for(rit = RI[it.row()].begin();rit != RI[it.row()].end(); rit++) { for(cit = CI[it.col()].begin();cit != CI[it.col()].end(); cit++) { dyn_Y.coeffRef(*rit,*cit) = it.value(); } } } } Y = Eigen::SparseMatrix<TY>(dyn_Y); #else // Alec: This is _not_ valid for arbitrary R,C since they don't necessary // representation a strict permutation of the rows and columns: rows or // columns could be removed or replicated. The removal of rows seems to be // handled here (although it's not clear if there is a performance gain when // the #removals >> #remains). If this is sufficiently faster than the // correct code above, one could test whether all entries in R and C are // unique and apply the permutation version if appropriate. // int xm = X.rows(); int xn = X.cols(); int ym = R.size(); int yn = C.size(); // special case when R or C is empty if(ym == 0 || yn == 0) { Y.resize(ym,yn); return; } assert(R.minCoeff() >= 0); assert(R.maxCoeff() < xm); assert(C.minCoeff() >= 0); assert(C.maxCoeff() < xn); // initialize row and col permutation vectors Eigen::VectorXi rowIndexVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1); Eigen::VectorXi rowPermVec = Eigen::VectorXi::LinSpaced(xm,0,xm-1); for(int i=0;i<ym;i++) { int pos = rowIndexVec.coeffRef(R(i)); if(pos != i) { int& val = rowPermVec.coeffRef(i); std::swap(rowIndexVec.coeffRef(val),rowIndexVec.coeffRef(R(i))); std::swap(rowPermVec.coeffRef(i),rowPermVec.coeffRef(pos)); } } Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic,int> rowPerm(rowIndexVec); Eigen::VectorXi colIndexVec = Eigen::VectorXi::LinSpaced(xn,0,xn-1); Eigen::VectorXi colPermVec = Eigen::VectorXi::LinSpaced(xn,0,xn-1); for(int i=0;i<yn;i++) { int pos = colIndexVec.coeffRef(C(i)); if(pos != i) { int& val = colPermVec.coeffRef(i); std::swap(colIndexVec.coeffRef(val),colIndexVec.coeffRef(C(i))); std::swap(colPermVec.coeffRef(i),colPermVec.coeffRef(pos)); } } Eigen::PermutationMatrix<Eigen::Dynamic,Eigen::Dynamic,int> colPerm(colPermVec); Eigen::SparseMatrix<T> M = (rowPerm * X); Y = (M * colPerm).block(0,0,ym,yn); #endif }
IGL_INLINE bool igl::arap_dof_recomputation( const Eigen::Matrix<int,Eigen::Dynamic,1> & fixed_dim, const Eigen::SparseMatrix<double> & A_eq, ArapDOFData<LbsMatrixType, SSCALAR> & data) { using namespace Eigen; typedef Matrix<SSCALAR, Dynamic, Dynamic> MatrixXS; LbsMatrixType * Q; LbsMatrixType Qdyn; if(data.with_dynamics) { // multiply by 1/timestep and to quadratic coefficients matrix // Might be missing a 0.5 here LbsMatrixType Q_copy = data.Q; Qdyn = Q_copy + (1.0/(data.h*data.h))*data.Mass_tilde; Q = &Qdyn; // This may/should be superfluous //printf("is_symmetric()\n"); if(!is_symmetric(*Q)) { //printf("Fixing symmetry...\n"); // "Fix" symmetry LbsMatrixType QT = (*Q).transpose(); LbsMatrixType Q_copy = *Q; *Q = 0.5*(Q_copy+QT); // Check that ^^^ this really worked. It doesn't always //assert(is_symmetric(*Q)); } }else { Q = &data.Q; } assert((int)data.CSM_M.size() == data.dim); assert(A_eq.cols() == data.m*data.dim*(data.dim+1)); data.fixed_dim = fixed_dim; if(fixed_dim.size() > 0) { assert(fixed_dim.maxCoeff() < data.m*data.dim*(data.dim+1)); assert(fixed_dim.minCoeff() >= 0); } #ifdef EXTREME_VERBOSE cout<<"data.fixed_dim=["<<endl<<data.fixed_dim<<endl<<"]+1;"<<endl; #endif // Compute dense solve matrix (alternative of matrix factorization) //printf("min_quad_dense_precompute()\n"); MatrixXd Qfull; full(*Q, Qfull); MatrixXd A_eqfull; full(A_eq, A_eqfull); MatrixXd M_Solve; double timer0_start = get_seconds_hires(); bool use_lu = data.effective_dim != 2; //use_lu = false; //printf("use_lu: %s\n",(use_lu?"TRUE":"FALSE")); min_quad_dense_precompute(Qfull, A_eqfull, use_lu,M_Solve); double timer0_end = get_seconds_hires(); verbose("Bob timing: %.20f\n", (timer0_end - timer0_start)*1000.0); // Precompute full solve matrix: const int fsRows = data.m * data.dim * (data.dim + 1); // 12 * number_of_bones const int fsCols1 = data.M_KG.cols(); // 9 * number_of_posConstraints const int fsCols2 = A_eq.rows(); // number_of_posConstraints data.M_FullSolve.resize(fsRows, fsCols1 + fsCols2); // note the magical multiplicative constant "-0.5", I've no idea why it has // to be there :) data.M_FullSolve << (-0.5 * M_Solve.block(0, 0, fsRows, fsRows) * data.M_KG).template cast<SSCALAR>(), M_Solve.block(0, fsRows, fsRows, fsCols2).template cast<SSCALAR>(); if(data.with_dynamics) { printf( "---------------------------------------------------------------------\n" "\n\n\nWITH DYNAMICS recomputation\n\n\n" "---------------------------------------------------------------------\n" ); // Also need to save Π1 before it gets multiplied by Ktilde (aka M_KG) data.Pi_1 = M_Solve.block(0, 0, fsRows, fsRows).template cast<SSCALAR>(); } // Precompute condensed matrices, // first CSM: std::vector<MatrixXS> CSM_M_SSCALAR; CSM_M_SSCALAR.resize(data.dim); for (int i=0; i<data.dim; i++) CSM_M_SSCALAR[i] = data.CSM_M[i].template cast<SSCALAR>(); SSCALAR maxErr1 = condense_CSM(CSM_M_SSCALAR, data.m, data.dim, data.CSM); verbose("condense_CSM maxErr = %.15f (this should be close to zero)\n", maxErr1); assert(fabs(maxErr1) < 1e-5); // and then solveBlock1: // number of groups const int k = data.CSM_M[0].rows()/data.dim; MatrixXS SolveBlock1 = data.M_FullSolve.block(0, 0, data.M_FullSolve.rows(), data.dim * data.dim * k); SSCALAR maxErr2 = condense_Solve1(SolveBlock1, data.m, k, data.dim, data.CSolveBlock1); verbose("condense_Solve1 maxErr = %.15f (this should be close to zero)\n", maxErr2); assert(fabs(maxErr2) < 1e-5); return true; }