IGL_INLINE void igl::cat( const int dim, const Eigen::MatrixBase<Derived> & A, const Eigen::MatrixBase<Derived> & B, MatC & C) { assert(dim == 1 || dim == 2); // Special case if B or A is empty if(A.size() == 0) { C = B; return; } if(B.size() == 0) { C = A; return; } if(dim == 1) { assert(A.cols() == B.cols()); C.resize(A.rows()+B.rows(),A.cols()); C << A,B; }else if(dim == 2) { assert(A.rows() == B.rows()); C.resize(A.rows(),A.cols()+B.cols()); C << A,B; }else { fprintf(stderr,"cat.h: Error: Unsupported dimension %d\n",dim); } }
void forward_propagation(Eigen::MatrixBase<Derived1> const &input, Eigen::MatrixBase<Derived2> const &weight, Eigen::MatrixBase<Derived3> const &bias, Eigen::MatrixBase<Derived4> &output, bool no_overlap = true, UnaryFunc func = UnaryFunc()) { static_assert(std::is_same<Derived1::Scalar, Derived2::Scalar>::value && std::is_same<Derived2::Scalar, Derived3::Scalar>::value && std::is_same<Derived4::Scalar, Derived4::Scalar>::value, "Data type of matrix input, weight, bias and output should be the same"); if(input.rows() != 0 && weight.rows() != 0 && bias.rows() != 0){ if(no_overlap){ output.noalias() = weight * input; }else{ output = weight * input; } using Scalar = typename Derived3::Scalar; using MatType = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>; using Mapper = Eigen::Map<const MatType, Eigen::Aligned>; Mapper Map(&bias(0, 0), bias.size()); output.colwise() += Map; func(output); } }
inline Eigen::Matrix<typename Eigen::internal::traits<Derived_T>::Scalar, 3, 3> crossMx( Eigen::MatrixBase<Derived_T> const & v) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Eigen::MatrixBase<Derived_T>, 3); assert((v.cols()==3 && v.rows()==1)||(v.rows()==3 && v.cols()==1)); return crossMx(v(0, 0), v(1, 0), v(2, 0)); }
IGL_INLINE void igl::vertex_triangle_adjacency( const Eigen::MatrixBase<DerivedF> & F, const int n, Eigen::PlainObjectBase<DerivedVF> & VF, Eigen::PlainObjectBase<DerivedNI> & NI) { typedef Eigen::Matrix<typename DerivedVF::Scalar,Eigen::Dynamic,1> VectorXI; // vfd #V list so that vfd(i) contains the vertex-face degree (number of // faces incident on vertex i) VectorXI vfd = VectorXI::Zero(n); for (int i = 0; i < F.rows(); i++) { for (int j = 0; j < 3; j++) { vfd[F(i,j)]++; } } igl::cumsum(vfd,1,NI); // Prepend a zero NI = (DerivedNI(n+1)<<0,NI).finished(); // vfd now acts as a counter vfd = NI; VF.derived()= Eigen::VectorXi(3*F.rows()); for (int i = 0; i < F.rows(); i++) { for (int j = 0; j < 3; j++) { VF[vfd[F(i,j)]] = i; vfd[F(i,j)]++; } } }
IGL_INLINE void igl::per_face_normals( const Eigen::MatrixBase<DerivedV>& V, const Eigen::MatrixBase<DerivedF>& F, const Eigen::MatrixBase<DerivedZ> & Z, Eigen::PlainObjectBase<DerivedN> & N) { N.resize(F.rows(),3); // loop over faces int Frows = F.rows(); #pragma omp parallel for if (Frows>10000) for(int i = 0; i < Frows;i++) { const Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v1 = V.row(F(i,1)) - V.row(F(i,0)); const Eigen::Matrix<typename DerivedV::Scalar, 1, 3> v2 = V.row(F(i,2)) - V.row(F(i,0)); N.row(i) = v1.cross(v2);//.normalized(); typename DerivedV::Scalar r = N.row(i).norm(); if(r == 0) { N.row(i) = Z; }else { N.row(i) /= r; } } }
Eigen::Matrix<typename DerivedA::Scalar, matGradMultMatNumRows(DerivedA::RowsAtCompileTime, DerivedB::ColsAtCompileTime), DerivedDA::ColsAtCompileTime> matGradMultMat( const Eigen::MatrixBase<DerivedA>& A, const Eigen::MatrixBase<DerivedB>& B, const Eigen::MatrixBase<DerivedDA>& dA, const Eigen::MatrixBase<DerivedDB>& dB) { assert(dA.cols() == dB.cols()); const int nq = dA.cols(); const int nq_at_compile_time = DerivedDA::ColsAtCompileTime; Eigen::Matrix<typename DerivedA::Scalar, matGradMultMatNumRows(DerivedA::RowsAtCompileTime, DerivedB::ColsAtCompileTime), DerivedDA::ColsAtCompileTime> ret(A.rows() * B.cols(), nq); for (int col = 0; col < B.cols(); col++) { auto block = ret.template block<DerivedA::RowsAtCompileTime, nq_at_compile_time>(col * A.rows(), 0, A.rows(), nq); // A * dB part: block.noalias() = A * dB.template block<DerivedA::ColsAtCompileTime, nq_at_compile_time>(col * A.cols(), 0, A.cols(), nq); for (int row = 0; row < B.rows(); row++) { // B * dA part: block.noalias() += B(row, col) * dA.template block<DerivedA::RowsAtCompileTime, nq_at_compile_time>(row * A.rows(), 0, A.rows(), nq); } } return ret; // much slower and requires eigen/unsupported: // return Eigen::kroneckerProduct(Eigen::MatrixXd::Identity(B.cols(), B.cols()), A) * dB + Eigen::kroneckerProduct(B.transpose(), Eigen::MatrixXd::Identity(A.rows(), A.rows())) * dA; }
MatrixSuccessCode LUPartialPivotDecompositionSuccessful(Eigen::MatrixBase<Derived> const& LU) { #ifndef BERTINI_DISABLE_ASSERTS assert(LU.rows()==LU.cols() && "non-square matrix in LUPartialPivotDecompositionSuccessful"); assert(LU.rows()>0 && "empty matrix in LUPartialPivotDecompositionSuccessful"); #endif // this loop won't test entry (0,0). it's tested separately after. for (unsigned int ii = LU.rows()-1; ii > 0; ii--) { if (IsSmallValue(LU(ii,ii))) { return MatrixSuccessCode::SmallValue; } if (IsLargeChange(LU(ii-1,ii-1),LU(ii,ii))) { return MatrixSuccessCode::LargeChange; } } // this line is the reason for the above assert on non-empty matrix. if (IsSmallValue(LU(0,0))) { return MatrixSuccessCode::SmallValue; } return MatrixSuccessCode::Success; }
void normalizeVec( const Eigen::MatrixBase<Derived>& x, typename Derived::PlainObject& x_norm, typename Gradient<Derived, Derived::RowsAtCompileTime, 1>::type* dx_norm, typename Gradient<Derived, Derived::RowsAtCompileTime, 2>::type* ddx_norm) { typename Derived::Scalar xdotx = x.squaredNorm(); typename Derived::Scalar norm_x = std::sqrt(xdotx); x_norm = x / norm_x; if (dx_norm) { dx_norm->setIdentity(x.rows(), x.rows()); (*dx_norm) -= x * x.transpose() / xdotx; (*dx_norm) /= norm_x; if (ddx_norm) { auto dx_norm_transpose = transposeGrad(*dx_norm, x.rows()); auto ddx_norm_times_norm = -matGradMultMat(x_norm, x_norm.transpose(), (*dx_norm), dx_norm_transpose); auto dnorm_inv = -x.transpose() / (xdotx * norm_x); (*ddx_norm) = ddx_norm_times_norm / norm_x; auto temp = (*dx_norm) * norm_x; int n = x.rows(); for (int col = 0; col < n; col++) { auto column_as_matrix = (dnorm_inv(0, col) * temp); for (int row_block = 0; row_block < n; row_block++) { ddx_norm->block(row_block * n, col, n, 1) += column_as_matrix.col(row_block); } } } } }
IGL_INLINE void igl::hessian( const Eigen::MatrixBase<DerivedV> & V, const Eigen::MatrixBase<DerivedF> & F, Eigen::SparseMatrix<Scalar>& H) { typedef typename DerivedV::Scalar denseScalar; typedef typename Eigen::Matrix<denseScalar, Eigen::Dynamic, 1> VecXd; typedef typename Eigen::SparseMatrix<Scalar> SparseMat; typedef typename Eigen::DiagonalMatrix <Scalar, Eigen::Dynamic, Eigen::Dynamic> DiagMat; int dim = V.cols(); assert((dim==2 || dim==3) && "The dimension of the vertices should be 2 or 3"); //Construct the combined gradient matric SparseMat G; igl::grad(Eigen::PlainObjectBase<DerivedV>(V), Eigen::PlainObjectBase<DerivedF>(F), G, false); SparseMat GG(F.rows(), dim*V.rows()); GG.reserve(G.nonZeros()); for(int i=0; i<dim; ++i) GG.middleCols(i*G.cols(),G.cols()) = G.middleRows(i*F.rows(),F.rows()); SparseMat D; igl::repdiag(GG,dim,D); //Compute area matrix VecXd areas; igl::doublearea(V, F, areas); DiagMat A = (0.5*areas).replicate(dim,1).asDiagonal(); //Compute FEM Hessian H = D.transpose()*A*G; }
typename Gradient<Eigen::Matrix<typename DerivedR::Scalar, RPY_SIZE, 1>, DerivedDR::ColsAtCompileTime>::type drotmat2rpy( const Eigen::MatrixBase<DerivedR>& R, const Eigen::MatrixBase<DerivedDR>& dR) { EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Eigen::MatrixBase<DerivedR>, SPACE_DIMENSION, SPACE_DIMENSION); EIGEN_STATIC_ASSERT(Eigen::MatrixBase<DerivedDR>::RowsAtCompileTime == RotmatSize, THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE); const int nq = dR.cols(); typedef typename DerivedR::Scalar Scalar; typedef typename Gradient<Eigen::Matrix<Scalar, RPY_SIZE, 1>, DerivedDR::ColsAtCompileTime>::type ReturnType; ReturnType drpy(RPY_SIZE, nq); auto dR11_dq = getSubMatrixGradient(dR, 0, 0, R.rows()); auto dR21_dq = getSubMatrixGradient(dR, 1, 0, R.rows()); auto dR31_dq = getSubMatrixGradient(dR, 2, 0, R.rows()); auto dR32_dq = getSubMatrixGradient(dR, 2, 1, R.rows()); auto dR33_dq = getSubMatrixGradient(dR, 2, 2, R.rows()); Scalar sqterm = R(2,1) * R(2,1) + R(2,2) * R(2,2); using namespace std; // droll_dq drpy.row(0) = (R(2, 2) * dR32_dq - R(2, 1) * dR33_dq) / sqterm; // dpitch_dq Scalar sqrt_sqterm = sqrt(sqterm); drpy.row(1) = (-sqrt_sqterm * dR31_dq + R(2, 0) / sqrt_sqterm * (R(2, 1) * dR32_dq + R(2, 2) * dR33_dq)) / (R(2, 0) * R(2, 0) + R(2, 1) * R(2, 1) + R(2, 2) * R(2, 2)); // dyaw_dq sqterm = R(0, 0) * R(0, 0) + R(1, 0) * R(1, 0); drpy.row(2) = (R(0, 0) * dR21_dq - R(1, 0) * dR11_dq) / sqterm; return drpy; }
bool allGreaterEquals(const Eigen::MatrixBase<DerivedA> & A, const Eigen::MatrixBase<DerivedB> & B, double tolerance = math::NUMERICAL_ZERO_DIFFERENCE) { assertion(A.rows() == B.rows(), "Matrices with different number of rows can't be compared."); assertion(A.cols() == B.cols(), "Matrices with different number of cols can't be compared."); return ((A-B).array() >= tolerance).all(); }
void assert_size(const Eigen::MatrixBase<Derived> &X, int rows_expected, int cols_expected) { common_assert_msg_ex( X.rows() == rows_expected && X.cols() == cols_expected, "matrix [row, col] mismatch" << std::endl << "actual: [" << X.rows() << ", " << X.cols() << "]" << std::endl << "expected: [" << rows_expected << ", " << cols_expected << "]", eigen_utilities::assert_error); }
void ba81NormalQuad::layer::detectTwoTier(Eigen::ArrayBase<T1> ¶m, Eigen::MatrixBase<T2> &mean, Eigen::MatrixBase<T3> &cov) { if (mean.rows() < 3) return; std::vector<int> orthogonal; Eigen::Matrix<Eigen::DenseIndex, Eigen::Dynamic, 1> numCov((cov.array() != 0.0).matrix().colwise().count()); std::vector<int> candidate; for (int fx=0; fx < numCov.rows(); ++fx) { if (numCov(fx) == 1) candidate.push_back(fx); } if (candidate.size() > 1) { std::vector<bool> mask(numItems()); for (int cx=candidate.size() - 1; cx >= 0; --cx) { std::vector<bool> loading(numItems()); for (int ix=0; ix < numItems(); ++ix) { loading[ix] = param(candidate[cx], itemsMap[ix]) != 0; } std::vector<bool> overlap(loading.size()); std::transform(loading.begin(), loading.end(), mask.begin(), overlap.begin(), std::logical_and<bool>()); if (std::find(overlap.begin(), overlap.end(), true) == overlap.end()) { std::transform(loading.begin(), loading.end(), mask.begin(), mask.begin(), std::logical_or<bool>()); orthogonal.push_back(candidate[cx]); } } } std::reverse(orthogonal.begin(), orthogonal.end()); if (orthogonal.size() == 1) orthogonal.clear(); if (orthogonal.size() && orthogonal[0] != mean.rows() - int(orthogonal.size())) { mxThrow("Independent specific factors must be given after general dense factors"); } numSpecific = orthogonal.size(); if (numSpecific) { Sgroup.assign(numItems(), 0); for (int ix=0; ix < numItems(); ix++) { for (int dx=orthogonal[0]; dx < mean.rows(); ++dx) { if (param(dx, itemsMap[ix]) != 0) { Sgroup[ix] = dx - orthogonal[0]; continue; } } } //Eigen::Map< Eigen::ArrayXi > foo(Sgroup.data(), param.cols()); //mxPrintMat("sgroup", foo); } }
IGL_INLINE void igl::per_face_normals_stable( const Eigen::MatrixBase<DerivedV>& V, const Eigen::MatrixBase<DerivedF>& F, Eigen::PlainObjectBase<DerivedN> & N) { using namespace Eigen; typedef Matrix<typename DerivedV::Scalar,1,3> RowVectorV3; typedef typename DerivedV::Scalar Scalar; const size_t m = F.rows(); N.resize(F.rows(),3); // Grad all points for(size_t f = 0;f<m;f++) { const RowVectorV3 p0 = V.row(F(f,0)); const RowVectorV3 p1 = V.row(F(f,1)); const RowVectorV3 p2 = V.row(F(f,2)); const RowVectorV3 n0 = (p1 - p0).cross(p2 - p0); const RowVectorV3 n1 = (p2 - p1).cross(p0 - p1); const RowVectorV3 n2 = (p0 - p2).cross(p1 - p2); // careful sum for(int d = 0;d<3;d++) { // This is a little _silly_ in terms of complexity, but its recursive // implementation is clean looking... const std::function<Scalar(Scalar,Scalar,Scalar)> sum3 = [&sum3](Scalar a, Scalar b, Scalar c)->Scalar { if(fabs(c)>fabs(a)) { return sum3(c,b,a); } // c < a if(fabs(c)>fabs(b)) { return sum3(a,c,b); } // c < a, c < b if(fabs(b)>fabs(a)) { return sum3(b,a,c); } return (a+b)+c; }; N(f,d) = sum3(n0(d),n1(d),n2(d)); } // sum better not be sure, or else NaN N.row(f) /= N.row(f).norm(); } }
static void run( float a, const Eigen::MatrixBase<Derived2> & A, const Eigen::MatrixBase<Derived1> & x, float b, Eigen::MatrixBase<Derived1> &y) { EIGEN_STATIC_ASSERT(sizeof(PREC) == sizeof(typename Derived1::Scalar), YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) ASSERTMSG(A.cols() == x.rows() && A.rows() == y.rows() ,"ERROR: Vector/Matrix wrong dimension"); // b_dev = alpha * A_dev * x_old_dev + beta *b_dev //Derived2 C = A; #if USE_INTEL_BLAS == 1 CBLAS_ORDER order; CBLAS_TRANSPOSE trans; if(Derived1::Flags & Eigen::RowMajorBit) { order = CblasRowMajor; } else { order = CblasColMajor; } trans = CblasNoTrans; mkl_set_dynamic(false); mkl_set_num_threads(BLAS_NUM_THREADS); //cout << "Threads:" << mkl_get_max_threads(); cblas_sgemv(order, trans, A.rows(), A.cols(), a, const_cast<double*>(&(A.operator()(0,0))), A.outerStride(), const_cast<double*>(&(x.operator()(0,0))), 1, b, &(y.operator()(0,0)), 1); //cblas_dgemm(order,trans,trans, A.rows(), A.cols(), A.cols(), 1.0, const_cast<double*>(&(A.operator()(0,0))), A.rows(), const_cast<double*>(&(A.operator()(0,0))), A.rows(), 1.0 , &(C.operator()(0,0)), C.rows() ); #else #if USE_GOTO_BLAS == 1 /* static DGEMVFunc DGEMV = NULL; if (DGEMV == NULL) { HINSTANCE hInstLibrary = LoadLibrary("libopenblasp-r0.1alpha2.2.dll"); DGEMV = (DGEMVFunc)GetProcAddress(hInstLibrary, "DGEMV"); }*/ char trans = 'N'; BLAS_INT idx = 1; BLAS_INT m = A.rows(); BLAS_INT n = A.cols(); sgemv(&trans, &m, &n, &a, &(A.operator()(0,0)), &m, &(x.operator()(0,0)), &idx, &b, &(y.operator()(0,0)), &idx); // FreeLibrary(hInstLibrary); #else ASSERTMSG(false,"No implementation for BLAS defined!"); #endif #endif }
void least_squares_eqcon(Eigen::MatrixBase<data1__> &x, const Eigen::MatrixBase<data2__> &A, const Eigen::MatrixBase<data3__> &b, const Eigen::MatrixBase<data4__> &B, const Eigen::MatrixBase<data5__> &d) { typedef Eigen::Matrix<typename Eigen::MatrixBase<data4__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> B_matrixD; B_matrixD Q, R, temp_B, temp_R; typedef Eigen::Matrix<typename Eigen::MatrixBase<data2__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> A_matrixD; A_matrixD A1, A2, temp_A; typedef Eigen::Matrix<typename Eigen::MatrixBase<data5__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> d_matrixD; d_matrixD y; typedef Eigen::Matrix<typename Eigen::MatrixBase<data3__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> b_matrixD; b_matrixD z, rhs; typedef Eigen::Matrix<typename Eigen::MatrixBase<data1__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> x_matrixD; x_matrixD x_temp(A.cols(), b.cols()); typename A_matrixD::Index p(B.rows()), n(A.cols()); #ifdef DEBUG typename A_matrixD::Index m(b.rows()); #endif // build Q and R Eigen::HouseholderQR<B_matrixD> qr(B.transpose()); Q=qr.householderQ(); temp_B=qr.matrixQR(); temp_R=Eigen::TriangularView<B_matrixD, Eigen::Upper>(temp_B); R=temp_R.topRows(p); assert((R.rows()==p) && (R.cols()==p)); assert((Q.rows()==n) && (Q.cols()==n)); // build A1 and A2 temp_A=A*Q; A1=temp_A.leftCols(p); A2=temp_A.rightCols(n-p); #ifdef DEBUG assert((A1.rows()==m) && (A1.cols()==p)); assert((A2.rows()==m) && (A2.cols()==n-p)); #endif assert(A1.cols()==p); assert(A2.cols()==n-p); // solve for y y=R.transpose().lu().solve(d); // setup the unconstrained optimization rhs=b-A1*y; least_squares_uncon(z, A2, rhs); // build the solution x_temp.topRows(p)=y; x_temp.bottomRows(n-p)=z; x=Q*x_temp; }
typename Derived::PlainObject transposeGrad( const Eigen::MatrixBase<Derived>& dX, int rows_X) { typename Derived::PlainObject dX_transpose(dX.rows(), dX.cols()); int numel = dX.rows(); int index = 0; for (int i = 0; i < numel; i++) { dX_transpose.row(i) = dX.row(index); index += rows_X; if (index >= numel) { index = (index % numel) + 1; } } return dX_transpose; }
Eigen::Matrix<typename DerivedDA::Scalar, matGradMultNumRows(DerivedDA::RowsAtCompileTime, Derivedb::RowsAtCompileTime), DerivedDA::ColsAtCompileTime> matGradMult(const Eigen::MatrixBase<DerivedDA>& dA, const Eigen::MatrixBase<Derivedb>& b) { const int nq = dA.cols(); assert(b.cols() == 1); assert(dA.rows() % b.rows() == 0); const int A_rows = dA.rows() / b.rows(); Eigen::Matrix<typename DerivedDA::Scalar, matGradMultNumRows(DerivedDA::RowsAtCompileTime, Derivedb::RowsAtCompileTime), DerivedDA::ColsAtCompileTime> ret(A_rows, nq); ret.setZero(); for (int row = 0; row < b.rows(); row++) { ret += b(row, 0) * dA.block(row * A_rows, 0, A_rows, nq); } return ret; }
void printEigen(const Eigen::MatrixBase<T> &b) { for (int i = 0; i < b.rows(); ++i) { printf("("); for (int j = 0; j < b.cols(); ++j) { if (j > 0) { printf(","); } printf("%.3f", static_cast<double>(b(i, j))); } printf(")%s\n", i + 1 < b.rows() ? "," : ""); } }
IGL_INLINE void igl::internal_angles_using_edge_lengths( const Eigen::MatrixBase<DerivedL>& L, Eigen::PlainObjectBase<DerivedK> & K) { // Note: // Usage of internal_angles_using_squared_edge_lengths() is preferred to internal_angles_using_squared_edge_lengths() // This function is deprecated and probably will be removed in future versions typedef typename DerivedL::Index Index; assert(L.cols() == 3 && "Edge-lengths should come from triangles"); const Index m = L.rows(); K.resize(m,3); parallel_for( m, [&L,&K](const Index f) { for(size_t d = 0;d<3;d++) { const auto & s1 = L(f,d); const auto & s2 = L(f,(d+1)%3); const auto & s3 = L(f,(d+2)%3); K(f,d) = acos((s3*s3 + s2*s2 - s1*s1)/(2.*s3*s2)); } }, 1000l); }
bool checkRange(const Eigen::MatrixBase<DerivedA>& mm, typename DerivedA::Scalar minVal, typename DerivedA::Scalar maxVal) { assert(minVal < maxVal); // Go through each element in the matrix and ensure they are not // NaN and fall within the specified min and max values. for (int ii = 0; ii < mm.rows(); ii++) { for (int jj = 0; jj < mm.cols(); jj++) { if (std::isnan(mm(ii, jj))) { std::stringstream ss; ss << "NaN detected at index (" << ii << ", " << jj << "), returning false!"; ROS_WARN_STREAM(ss.str()); return false; } if (mm(ii, jj) > maxVal || mm(ii, jj) < minVal) { std::stringstream ss; ss << "Value of out range at index (" << ii << ", " << jj << "), returning false.\n" " - value = " << mm(ii, jj) << "\n" " - minVal = " << minVal << "\n" " - maxVal = " << maxVal; ROS_WARN_STREAM(ss.str()); return false; } } } return true; }
IGL_INLINE void igl::face_areas( const Eigen::MatrixBase<DerivedL>& L, const typename DerivedL::Scalar doublearea_nan_replacement, Eigen::PlainObjectBase<DerivedA>& A) { using namespace Eigen; assert(L.cols() == 6); const int m = L.rows(); // (unsigned) face Areas (opposite vertices: 1 2 3 4) Matrix<typename DerivedA::Scalar,Dynamic,1> A0(m,1), A1(m,1), A2(m,1), A3(m,1); Matrix<typename DerivedA::Scalar,Dynamic,3> L0(m,3), L1(m,3), L2(m,3), L3(m,3); L0<<L.col(1),L.col(2),L.col(3); L1<<L.col(0),L.col(2),L.col(4); L2<<L.col(0),L.col(1),L.col(5); L3<<L.col(3),L.col(4),L.col(5); doublearea(L0,doublearea_nan_replacement,A0); doublearea(L1,doublearea_nan_replacement,A1); doublearea(L2,doublearea_nan_replacement,A2); doublearea(L3,doublearea_nan_replacement,A3); A.resize(m,4); A.col(0) = 0.5*A0; A.col(1) = 0.5*A1; A.col(2) = 0.5*A2; A.col(3) = 0.5*A3; }
double SLHA_io::read_vector(const std::string& block_name, Eigen::MatrixBase<Derived>& vector) const { if (vector.cols() != 1) throw SetupError("Vector has more than 1 column"); auto block = data.find(data.cbegin(), data.cend(), block_name); const int rows = vector.rows(); double scale = 0.; while (block != data.cend()) { for (const auto& line: *block) { if (!line.is_data_line()) { // read scale from block definition if (line.size() > 3 && to_lower(line[0]) == "block" && line[2] == "Q=") scale = convert_to<double>(line[3]); continue; } if (line.size() >= 2) { const int i = convert_to<int>(line[0]) - 1; if (0 <= i && i < rows) { const double value = convert_to<double>(line[1]); vector(i,0) = value; } } } ++block; block = data.find(block, data.cend(), block_name); } return scale; }
void SLHA_io::set_block_imag(const std::string& name, const Eigen::MatrixBase<Derived>& matrix, const std::string& symbol, double scale) { std::ostringstream ss; ss << "Block " << name; if (scale != 0.) ss << " Q= " << FORMAT_SCALE(scale); ss << '\n'; const int rows = matrix.rows(); const int cols = matrix.cols(); for (int i = 1; i <= rows; ++i) { if (cols == 1) { ss << boost::format(vector_formatter) % i % Im(matrix(i-1,0)) % ("Im(" + symbol + "(" + ToString(i) + "))"); } else { for (int k = 1; k <= cols; ++k) { ss << boost::format(mixing_matrix_formatter) % i % k % Im(matrix(i-1,k-1)) % ("Im(" + symbol + "(" + ToString(i) + "," + ToString(k) + "))"); } } } set_block(ss); }
IGL_INLINE void igl::project_to_line_segment( const Eigen::MatrixBase<DerivedP> & P, const Eigen::MatrixBase<DerivedS> & S, const Eigen::MatrixBase<DerivedD> & D, Eigen::PlainObjectBase<Derivedt> & t, Eigen::PlainObjectBase<DerivedsqrD> & sqrD) { project_to_line(P,S,D,t,sqrD); const int np = P.rows(); // loop over points and fix those that projected beyond endpoints #pragma omp parallel for if (np>10000) for(int p = 0;p<np;p++) { const DerivedP Pp = P.row(p); if(t(p)<0) { sqrD(p) = (Pp-S).squaredNorm(); t(p) = 0; }else if(t(p)>1) { sqrD(p) = (Pp-D).squaredNorm(); t(p) = 1; } } }
IGL_INLINE bool igl::writeDMAT( const std::string file_name, const Eigen::MatrixBase<DerivedW> & W, const bool ascii) { FILE * fp = fopen(file_name.c_str(),"wb"); if(fp == NULL) { fprintf(stderr,"IOError: writeDMAT() could not open %s...",file_name.c_str()); return false; } if(ascii) { // first line contains number of rows and number of columns fprintf(fp,"%d %d\n",(int)W.cols(),(int)W.rows()); // Loop over columns slowly for(int j = 0;j < W.cols();j++) { // loop over rows (down columns) quickly for(int i = 0;i < W.rows();i++) { fprintf(fp,"%0.17lg\n",(double)W(i,j)); } } }else { // write header for ascii fprintf(fp,"0 0\n"); // first line contains number of rows and number of columns fprintf(fp,"%d %d\n",(int)W.cols(),(int)W.rows()); // reader assumes the binary part is double precision Eigen::MatrixXd Wd = W.template cast<double>(); fwrite(Wd.data(),sizeof(double),Wd.size(),fp); //// Loop over columns slowly //for(int j = 0;j < W.cols();j++) //{ // // loop over rows (down columns) quickly // for(int i = 0;i < W.rows();i++) // { // double d = (double)W(i,j); // fwrite(&d,sizeof(double),1,fp); // } //} } fclose(fp); return true; }
void ErrorTermFs<C>::setInvR(const Eigen::MatrixBase<DERIVED>& invR) { SM_ASSERT_EQ(Exception, invR.rows(), invR.cols(), "The covariance matrix must be square"); SM_ASSERT_EQ(Exception, invR.rows(), (int)dimension(), "The covariance matrix does not match the size of the error"); // http://eigen.tuxfamily.org/dox-devel/classEigen_1_1LDLT.html#details // LDLT seems to work on positive semidefinite matrices. sm::eigen::computeMatrixSqrt(invR, _sqrtInvR); }
void Precision(Eigen::MatrixBase<Derived> & v, unsigned prec) { using bertini::Precision; for (int ii=0; ii<v.rows(); ++ii) for (int jj=0; jj<v.cols(); ++jj) Precision(v(ii,jj),prec); }
template<typename Rhs> inline const Eigen::internal::solve_retval<MyJacobiPreconditioner, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const { eigen_assert(m_isInitialized && "MyJacobiPreconditioner is not initialized."); eigen_assert(m_invdiag.size()==b.rows() && "MyJacobiPreconditioner::solve(): invalid number of rows of the right hand side matrix b"); return Eigen::internal::solve_retval<MyJacobiPreconditioner, Rhs>(*this, b.derived()); }
IGL_INLINE void igl::vertex_triangle_adjacency( const Eigen::MatrixBase<DerivedV>& V, const Eigen::MatrixBase<DerivedF>& F, std::vector<std::vector<IndexType> >& VF, std::vector<std::vector<IndexType> >& VFi) { return vertex_triangle_adjacency(V.rows(),F,VF,VFi); }