/// Solve the linear system Ax = b, with A being the /// combined derivative matrix of the residual and b /// being the residual itself. /// \param[in] residual residual object containing A and b. /// \return the solution x NewtonIterationBlackoilSimple::SolutionVector NewtonIterationBlackoilSimple::computeNewtonIncrement(const LinearisedBlackoilResidual& residual) const { typedef LinearisedBlackoilResidual::ADB ADB; const int np = residual.material_balance_eq.size(); ADB mass_res = residual.material_balance_eq[0]; for (int phase = 1; phase < np; ++phase) { mass_res = vertcat(mass_res, residual.material_balance_eq[phase]); } const ADB well_res = vertcat(residual.well_flux_eq, residual.well_eq); const ADB total_residual = collapseJacs(vertcat(mass_res, well_res)); Eigen::SparseMatrix<double, Eigen::RowMajor> matr; total_residual.derivative()[0].toSparse(matr); SolutionVector dx(SolutionVector::Zero(total_residual.size())); Opm::LinearSolverInterface::LinearSolverReport rep = linsolver_->solve(matr.rows(), matr.nonZeros(), matr.outerIndexPtr(), matr.innerIndexPtr(), matr.valuePtr(), total_residual.value().data(), dx.data(), parallelInformation_); // store iterations iterations_ = rep.iterations; if (!rep.converged) { OPM_THROW(LinearSolverProblem, "FullyImplicitBlackoilSolver::solveJacobianSystem(): " "Linear solver convergence failure."); } return dx; }
Eigen::SparseMatrix<double> Condi2Joint(Eigen::SparseMatrix<double> Condi, Eigen::SparseVector<double> Pa) { // second dimension of Condi is the parent Eigen::SparseMatrix<double> Joint; Joint.resize(Condi.rows(), Condi.cols()); for (int cols = 0; cols < Condi.cols(); cols++) { Eigen::SparseVector<double> tmp_vec = Condi.block(0, cols, Condi.rows(), 1)*Pa.coeff(cols); for (int id_rows = 0; id_rows < tmp_vec.size(); id_rows++) { Joint.coeffRef(id_rows, cols) = tmp_vec.coeff(id_rows); } } Joint.prune(TOLERANCE); return Joint; }
Eigen::VectorXd KronProdSPMat2( Eigen::SparseMatrix<double, Eigen::RowMajor> a0, Eigen::SparseMatrix<double, Eigen::RowMajor> a1, Eigen::VectorXd y) { signal(SIGSEGV, handler); // install our handler Eigen::VectorXd retvec; retvec.setZero( a0.rows() * a1.rows() ); //loop rows a0 for (int row_idx0=0; row_idx0<a0.outerSize(); ++row_idx0) { int row_offset1 = row_idx0; row_offset1 *= a1.rows(); // loop rows a1 for (int row_idx1=0; row_idx1<a1.outerSize(); ++row_idx1) { // loop cols a0 (non-zero elements only) for (Eigen::SparseMatrix<double,RowMajor>::InnerIterator it0(a0,row_idx0); it0; ++it0) { int col_offset1 = it0.index(); col_offset1 *= a1.innerSize(); double factor1 = it0.value(); for (Eigen::SparseMatrix<double,RowMajor>::InnerIterator it1(a1,row_idx1); it1; ++it1) { retvec( row_offset1 + row_idx1 ) += factor1 * it1.value() * y( col_offset1 + it1.index() ); } } } } return retvec; }
/// return the extremal eigenvalues of Ax=cBx std::pair<double, double> generalized_extreme_eigenvalues(const Eigen::SparseMatrix<double> &Ain, const Eigen::SparseMatrix<double> &Bin) { assert(Ain.rows() == Ain.cols()); assert(Ain.rows() == Ain.cols()); assert(Ain.rows() == Bin.rows()); assert(Ain.isCompressed()); assert(Bin.isCompressed()); const int N = static_cast<int>(Ain.rows()); /* mkl_sparse_d_gv input parameters */ char which = 'S'; /* Which eigenvalues to calculate. ('L' - largest (algebraic) eigenvalues, 'S' - smallest (algebraic) eigenvalues) */ int pm[128]; /* This array is used to pass various parameters to Extended Eigensolver Extensions routines. */ int k0 = 1; /* Desired number of max/min eigenvalues */ /* mkl_sparse_d_gv output parameters */ int k; /* Number of eigenvalues found (might be less than k0). */ double E_small[3]; /* Eigenvalues */ double E_large[3]; /* Eigenvalues */ double X[3]; /* Eigenvectors */ double res[3]; /* Residual */ /* Local variables */ int compute_vectors = 0; /* Flag to compute eigenvectors */ int tol = 7; /* Tolerance */ /* Sparse BLAS IE variables */ sparse_status_t status; ConvertToMklResult A = to_mkl(Ain, status); // TODO: check A.status; ConvertToMklResult B = to_mkl(Bin, status); // TODO: check B.status; /* Step 2. Call mkl_sparse_ee_init to define default input values */ mkl_sparse_ee_init(pm); pm[1] = tol; /* Set tolerance */ pm[6] = compute_vectors; /* Step 3. Solve the standard Ax = ex eigenvalue problem. */ which = 'S'; const int infoS = mkl_sparse_d_gv(&which, pm, A.matrix, A.descr, B.matrix, B.descr, k0, &k, E_small, X, res); assert(infoS == 0); which = 'L'; const int infoL = mkl_sparse_d_gv(&which, pm, A.matrix, A.descr, B.matrix, B.descr, k0, &k, E_large, X, res); assert(infoL == 0); mkl_sparse_destroy(A.matrix); mkl_sparse_destroy(B.matrix); return {E_small[0], E_large[0]}; // todo: return the right thing }
IGL_INLINE void igl::components( const Eigen::SparseMatrix<AScalar> & A, Eigen::PlainObjectBase<DerivedC> & C, Eigen::PlainObjectBase<Derivedcounts> & counts) { using namespace Eigen; using namespace std; assert(A.rows() == A.cols() && "A should be square."); const size_t n = A.rows(); Array<bool,Dynamic,1> seen = Array<bool,Dynamic,1>::Zero(n,1); C.resize(n,1); typename DerivedC::Scalar id = 0; vector<typename Derivedcounts::Scalar> vcounts; // breadth first search for(int k=0; k<A.outerSize(); ++k) { if(seen(k)) { continue; } queue<int> Q; Q.push(k); vcounts.push_back(0); while(!Q.empty()) { const int f = Q.front(); Q.pop(); if(seen(f)) { continue; } seen(f) = true; C(f,0) = id; vcounts[id]++; // Iterate over inside for(typename SparseMatrix<AScalar>::InnerIterator it (A,f); it; ++it) { const int g = it.index(); if(!seen(g) && it.value()) { Q.push(g); } } } id++; } assert((size_t) id == vcounts.size()); const size_t ncc = vcounts.size(); assert((size_t)C.maxCoeff()+1 == ncc); counts.resize(ncc,1); for(size_t i = 0;i<ncc;i++) { counts(i) = vcounts[i]; } }
matrix<Type> invertSparseMatrix(Eigen::SparseMatrix<Type> A){ matrix<Type> I(A.rows(),A.cols()); I.setIdentity(); Eigen::SimplicialLDLT< Eigen::SparseMatrix<Type> > ldlt(A); matrix<Type> ans = ldlt.solve(I); return ans; }
ColCompressedMatrix convert_from_Eigen(const Eigen::SparseMatrix<double> &m) { assert(m.rows() == m.cols()); assert(m.isCompressed()); return ColCompressedMatrix(m.rows, m.nonZeros(), m.valuePtr(), m.outerIndexPtr(), m.innerIndexPtr()); }
IGL_INLINE void igl::matlab::prepare_lhs_double( const Eigen::SparseMatrix<Vtype> & M, mxArray *plhs[]) { using namespace std; const int m = M.rows(); const int n = M.cols(); // THIS WILL NOT WORK FOR ROW-MAJOR assert(n==M.outerSize()); const int nzmax = M.nonZeros(); plhs[0] = mxCreateSparse(m, n, nzmax, mxREAL); mxArray * mx_data = plhs[0]; // Copy data immediately double * pr = mxGetPr(mx_data); mwIndex * ir = mxGetIr(mx_data); mwIndex * jc = mxGetJc(mx_data); // Iterate over outside int k = 0; for(int j=0; j<M.outerSize(); j++) { jc[j] = k; // Iterate over inside for(typename Eigen::SparseMatrix<Vtype>::InnerIterator it (M,j); it; ++it) { // copy (cast to double) pr[k] = it.value(); ir[k] = it.row(); k++; } } jc[M.outerSize()] = k; }
void place::erodeSparse(const Eigen::SparseMatrix<double> &src, Eigen::SparseMatrix<double> &dst) { dst = Eigen::SparseMatrix<double>(src.rows(), src.cols()); std::vector<Eigen::Triplet<double>> tripletList; Eigen::MatrixXd srcNS = Eigen::MatrixXd(src); for (int i = 0; i < srcNS.cols(); ++i) { for (int j = 0; j < srcNS.rows(); ++j) { double value = 0.0; for (int k = -1; k < 1; ++k) { for (int l = -1; l < 1; ++l) { if (i + k < 0 || i + k >= srcNS.cols() || j + l < 0 || j + l >= srcNS.rows()) continue; else value = std::max(value, srcNS(j + l, i + k)); } } if (value != 0) tripletList.push_back(Eigen::Triplet<double>(j, i, value)); } } dst.setFromTriplets(tripletList.begin(), tripletList.end()); }
IGL_INLINE void igl::min( const Eigen::SparseMatrix<AType> & A, const int dim, Eigen::PlainObjectBase<DerivedB> & B, Eigen::PlainObjectBase<DerivedI> & I) { const int n = A.cols(); const int m = A.rows(); B.resize(dim==1?n:m); B.setConstant(std::numeric_limits<typename DerivedB::Scalar>::max()); I.resize(dim==1?n:m); for_each(A,[&B,&I,&dim](int i, int j,const typename DerivedB::Scalar v) { if(dim == 2) { std::swap(i,j); } // Coded as if dim == 1, assuming swap for dim == 2 if(v < B(j)) { B(j) = v; I(j) = i; } }); Eigen::VectorXi Z; find_zero(A,dim,Z); for(int j = 0;j<I.size();j++) { if(Z(j) != (dim==1?m:n) && 0 < B(j)) { B(j) = 0; I(j) = Z(j); } } }
IGL_INLINE void igl::slice_into( const Eigen::SparseMatrix<T>& X, const Eigen::Matrix<int,Eigen::Dynamic,1> & R, const Eigen::Matrix<int,Eigen::Dynamic,1> & C, Eigen::SparseMatrix<T>& Y) { int xm = X.rows(); int xn = X.cols(); assert(R.size() == xm); assert(C.size() == xn); #ifndef NDEBUG int ym = Y.size(); int yn = Y.size(); assert(R.minCoeff() >= 0); assert(R.maxCoeff() < ym); assert(C.minCoeff() >= 0); assert(C.maxCoeff() < yn); #endif // create temporary dynamic sparse matrix Eigen::DynamicSparseMatrix<T, Eigen::RowMajor> dyn_Y(Y); // Iterate over outside for(int k=0; k<X.outerSize(); ++k) { // Iterate over inside for(typename Eigen::SparseMatrix<T>::InnerIterator it (X,k); it; ++it) { dyn_Y.coeffRef(R(it.row()),C(it.col())) = it.value(); } } Y = Eigen::SparseMatrix<T>(dyn_Y); }
CollOfScalar EquelleRuntimeCPU::solveForUpdate(const CollOfScalar& residual) const { Eigen::SparseMatrix<double, Eigen::RowMajor> matr = residual.derivative()[0]; CollOfScalar::V du = CollOfScalar::V::Zero(residual.size()); Opm::time::StopWatch clock; clock.start(); // solve(n, # nonzero values ("val"), ptr to col indices // ("col_ind"), ptr to row locations in val array ("row_ind") // (these two may be swapped, not sure about the naming convention // here...), array of actual values ("val") (I guess... '*sa'...), // rhs, solution) Opm::LinearSolverInterface::LinearSolverReport rep = linsolver_.solve(matr.rows(), matr.nonZeros(), matr.outerIndexPtr(), matr.innerIndexPtr(), matr.valuePtr(), residual.value().data(), du.data()); if (verbose_ > 2) { std::cout << " solveForUpdate: Linear solver took: " << clock.secsSinceLast() << " seconds." << std::endl; } if (!rep.converged) { OPM_THROW(std::runtime_error, "Linear solver convergence failure."); } return du; }
void NewtonIterationBlackoilInterleaved::formInterleavedSystem(const std::vector<ADB>& eqs, const Eigen::SparseMatrix<double, Eigen::RowMajor>& A, Mat& istlA) const { const int np = eqs.size(); // Find sparsity structure as union of basic block sparsity structures, // corresponding to the jacobians with respect to pressure. // Use addition to get to the union structure. Eigen::SparseMatrix<double> structure = eqs[0].derivative()[0]; for (int phase = 0; phase < np; ++phase) { structure += eqs[phase].derivative()[0]; } Eigen::SparseMatrix<double, Eigen::RowMajor> s = structure; // Create ISTL matrix with interleaved rows and columns (block structured). assert(np == 3); istlA.setSize(s.rows(), s.cols(), s.nonZeros()); istlA.setBuildMode(Mat::row_wise); const int* ia = s.outerIndexPtr(); const int* ja = s.innerIndexPtr(); for (Mat::CreateIterator row = istlA.createbegin(); row != istlA.createend(); ++row) { int ri = row.index(); for (int i = ia[ri]; i < ia[ri + 1]; ++i) { row.insert(ja[i]); } } const int size = s.rows(); Span span[3] = { Span(size, 1, 0), Span(size, 1, size), Span(size, 1, 2*size) }; for (int row = 0; row < size; ++row) { for (int col_ix = ia[row]; col_ix < ia[row + 1]; ++col_ix) { const int col = ja[col_ix]; MatrixBlockType block; for (int p1 = 0; p1 < np; ++p1) { for (int p2 = 0; p2 < np; ++p2) { block[p1][p2] = A.coeff(span[p1][row], span[p2][col]); } } istlA[row][col] = block; } } }
Eigen::VectorXd KronProdSPMat4( Eigen::SparseMatrix<double, Eigen::RowMajor> a0, Eigen::SparseMatrix<double, Eigen::RowMajor> a1, Eigen::SparseMatrix<double, Eigen::RowMajor> a2, Eigen::SparseMatrix<double, Eigen::RowMajor> a3, Eigen::VectorXd y) { signal(SIGSEGV, handler); // install our handler Eigen::VectorXd retvec; retvec.setZero( a0.rows() * a1.rows() * a2.rows() * a3.rows() ); //loop rows a0 for (int row_idx0=0; row_idx0<a0.outerSize(); ++row_idx0) { int row_offset1 = row_idx0; row_offset1 *= a1.rows(); // loop rows a1 for (int row_idx1=0; row_idx1<a1.outerSize(); ++row_idx1) { int row_offset2 = row_offset1 + row_idx1; row_offset2 *= a2.rows(); // loop rows a2 for (int row_idx2=0; row_idx2<a2.outerSize(); ++row_idx2) { int row_offset3 = row_offset2 + row_idx2; row_offset3 *= a3.rows(); // loop rows a3 for (int row_idx3=0; row_idx3<a3.outerSize(); ++row_idx3) { // loop cols a0 (non-zero elements only) for (Eigen::SparseMatrix<double,RowMajor>::InnerIterator it0(a0,row_idx0); it0; ++it0) { int col_offset1 = it0.index(); col_offset1 *= a1.innerSize(); double factor1 = it0.value(); // loop cols a1 for (Eigen::SparseMatrix<double,RowMajor>::InnerIterator it1(a1,row_idx1); it1; ++it1) { int col_offset2 = col_offset1 + it1.index(); col_offset2 *= a2.innerSize(); double factor2 = factor1 * it1.value(); //loop cols a2 for (Eigen::SparseMatrix<double,RowMajor>::InnerIterator it2(a2,row_idx2); it2; ++it2) { int col_offset3 = col_offset2 + it2.index(); col_offset3 *= a3.innerSize(); double factor3 = factor2 * it2.value(); for (Eigen::SparseMatrix<double,RowMajor>::InnerIterator it3(a3,row_idx3); it3; ++it3) { retvec( row_offset3 + row_idx3 ) += factor3 * it3.value() * y( col_offset3 + it3.index() ); } } } } } } } } return retvec; }
Mat::Mat(vector<string> _row_names, vector<string> _col_names, Eigen::SparseMatrix<double> _matrix,MatType _mattype) { row_names = _row_names; col_names = _col_names; assert(row_names.size() == _matrix.rows()); assert(col_names.size() == _matrix.cols()); matrix = _matrix; mattype = _mattype; }
inline void space_operator( Eigen::SparseMatrix<double>& result, Eigen::SparseMatrix<double>& laplace, const double multiplier, Eigen::SparseMatrix<double>& unit_matrix) { result.resize(unit_matrix.rows(), unit_matrix.cols()); result = laplace*multiplier+unit_matrix; }
Eigen::SparseMatrix<double> normProbMatrix(Eigen::SparseMatrix<double> P) { // each column is a probability simplex Eigen::SparseMatrix<double> P_norm; P_norm.resize(P.rows(), P.cols()); for (int col = 0; col < P.cols(); col++) { //SparseVector<double> A_col_sparse = A_sparse.block(0, i, A_sparse.rows(),1); SparseVector<double> P_vec = P.block(0, col, P.rows(), 1); SparseVector<double> P_vec_norm; P_vec_norm.resize(P_vec.size()); P_vec_norm = normProbVector(P_vec); for (int id_row = 0; id_row < P.rows(); id_row++) { P_norm.coeffRef(id_row, col) = P_vec_norm.coeff(id_row); } } P_norm.makeCompressed(); P_norm.prune(TOLERANCE); return P_norm; }
TEST(slice, sparse_identity) { Eigen::SparseMatrix<double> A = Eigen::MatrixXd::Random(10,9).sparseView(); Eigen::VectorXi I = igl::LinSpaced<Eigen::VectorXi >(A.rows(),0,A.rows()-1); Eigen::VectorXi J = igl::LinSpaced<Eigen::VectorXi >(A.cols(),0,A.cols()-1); { Eigen::SparseMatrix<double> B; igl::slice(A,I,J,B); test_common::assert_eq(A,B); } { Eigen::SparseMatrix<double> B; igl::slice(A,I,1,B); test_common::assert_eq(A,B); } { Eigen::SparseMatrix<double> B; igl::slice(A,J,2,B); test_common::assert_eq(A,B); } }
TEST(slice_into, sparse_identity) { Eigen::SparseMatrix<double> A = Eigen::MatrixXd::Random(10,9).sparseView(); Eigen::VectorXi I = Eigen::VectorXi::LinSpaced(A.rows(),0,A.rows()-1); Eigen::VectorXi J = Eigen::VectorXi::LinSpaced(A.cols(),0,A.cols()-1); { Eigen::SparseMatrix<double> B(I.maxCoeff()+1,J.maxCoeff()+1); igl::slice_into(A,I,J,B); test_common::assert_eq(A,B); } { Eigen::SparseMatrix<double> B(I.maxCoeff()+1,A.cols()); igl::slice_into(A,I,1,B); test_common::assert_eq(A,B); } { Eigen::SparseMatrix<double> B(A.rows(),J.maxCoeff()+1); igl::slice_into(A,J,2,B); test_common::assert_eq(A,B); } }
Eigen::SparseMatrix<double> joint2conditional(Eigen::SparseMatrix<double> edgePot)// pa is the second dimension { // second dimension of edgePot is the parent Eigen::SparseMatrix<double> Conditional; Conditional.resize(edgePot.rows(), edgePot.cols()); Eigen::SparseVector<double> Parent_Marginal; Parent_Marginal.resize(edgePot.cols()); for (int id_col = 0; id_col < edgePot.cols(); id_col++) { Eigen::SparseVector<double> tmp_vec = edgePot.block(0, id_col, edgePot.rows(), 1); Parent_Marginal.coeffRef(id_col) = tmp_vec.sum(); if (Parent_Marginal.coeff(id_col)>TOLERANCE) for (int id_row = 0; id_row < edgePot.rows(); id_row++) { Conditional.coeffRef(id_row, id_col) = edgePot.coeff(id_row, id_col) / Parent_Marginal.coeff(id_col); } } Conditional.makeCompressed(); Conditional.prune(TOLERANCE); return Conditional; }
IGL_INLINE void igl::cat( const int dim, const Eigen::SparseMatrix<Scalar> & A, const Eigen::SparseMatrix<Scalar> & B, Eigen::SparseMatrix<Scalar> & C) { assert(dim == 1 || dim == 2); using namespace Eigen; // Special case if B or A is empty if(A.size() == 0) { C = B; return; } if(B.size() == 0) { C = A; return; } DynamicSparseMatrix<Scalar, RowMajor> dyn_C; if(dim == 1) { assert(A.cols() == B.cols()); dyn_C.resize(A.rows()+B.rows(),A.cols()); }else if(dim == 2) { assert(A.rows() == B.rows()); dyn_C.resize(A.rows(),A.cols()+B.cols()); }else { fprintf(stderr,"cat.h: Error: Unsupported dimension %d\n",dim); } dyn_C.reserve(A.nonZeros()+B.nonZeros()); // Iterate over outside of A for(int k=0; k<A.outerSize(); ++k) { // Iterate over inside for(typename SparseMatrix<Scalar>::InnerIterator it (A,k); it; ++it) { dyn_C.coeffRef(it.row(),it.col()) += it.value(); } } // Iterate over outside of B for(int k=0; k<B.outerSize(); ++k) { // Iterate over inside for(typename SparseMatrix<Scalar>::InnerIterator it (B,k); it; ++it) { int r = (dim == 1 ? A.rows()+it.row() : it.row()); int c = (dim == 2 ? A.cols()+it.col() : it.col()); dyn_C.coeffRef(r,c) += it.value(); } } C = SparseMatrix<Scalar>(dyn_C); }
IGL_INLINE void igl::repdiag( const Eigen::SparseMatrix<T>& A, const int d, Eigen::SparseMatrix<T>& B) { using namespace std; using namespace Eigen; int m = A.rows(); int n = A.cols(); vector<Triplet<T> > IJV; IJV.reserve(A.nonZeros()*d); // Loop outer level for (int k=0; k<A.outerSize(); ++k) { // loop inner level for (typename Eigen::SparseMatrix<T>::InnerIterator it(A,k); it; ++it) { for(int i = 0;i<d;i++) { IJV.push_back(Triplet<T>(i*m+it.row(),i*n+it.col(),it.value())); } } } B.resize(m*d,n*d); B.setFromTriplets(IJV.begin(),IJV.end()); // Q: Why is this **Very** slow? //int m = A.rows(); //int n = A.cols(); //B.resize(m*d,n*d); //// Reserve enough space for new non zeros //B.reserve(d*A.nonZeros()); //// loop over reps //for(int i=0;i<d;i++) //{ // // Loop outer level // for (int k=0; k<A.outerSize(); ++k) // { // // loop inner level // for (typename Eigen::SparseMatrix<T>::InnerIterator it(A,k); it; ++it) // { // B.insert(i*m+it.row(),i*n+it.col()) = it.value(); // } // } //} //B.makeCompressed(); }
std::vector<double> getRowSum(const Eigen::SparseMatrix<int,Eigen::RowMajor>& adjacencyMatrix) { int rowSize = adjacencyMatrix.rows(); std::vector<double> rowSum; rowSum.reserve(rowSize); for(int i=0; i<rowSize;++i){ double value = adjacencyMatrix.innerVector(i).sum(); rowSum.push_back(value); } return rowSum; }
IGL_INLINE void igl::components( const Eigen::SparseMatrix<AScalar> & A, Eigen::PlainObjectBase<DerivedC> & C) { assert(A.rows() == A.cols()); using namespace Eigen; // THIS IS DENSE: //boost::adjacency_matrix<boost::undirectedS> bA(A.rows()); boost::adjacency_list<boost::vecS,boost::vecS,boost::undirectedS> bA(A.rows()); for(int j=0; j<A.outerSize();j++) { // Iterate over inside for(typename SparseMatrix<AScalar>::InnerIterator it (A,j); it; ++it) { if(0 != it.value()) { boost::add_edge(it.row(),it.col(),bA); } } } C.resize(A.rows(),1); boost::connected_components(bA,C.data()); }
Eigen::SparseMatrix<Type> kronecker(Eigen::SparseMatrix<Type> x, Eigen::SparseMatrix<Type> y){ typedef Eigen::Triplet<Type> T; typedef typename Eigen::SparseMatrix<Type>::InnerIterator Iterator; std::vector<T> tripletList; int n1=x.rows(),n2=x.cols(),n3=y.rows(),n4=y.cols(); int i,j,k,l; // Loop over nonzeros of x for (int cx=0; cx<x.outerSize(); cx++) for (Iterator itx(x,cx); itx; ++itx) // Loop over nonzeros of y for (int cy=0; cy<y.outerSize(); cy++) for (Iterator ity(y,cy); ity; ++ity) { i=itx.row(); j=itx.col(); k=ity.row(); l=ity.col(); tripletList.push_back(T(i*n3+k,j*n4+l, itx.value()*ity.value() )); } Eigen::SparseMatrix<Type> mat(n1*n3,n2*n4); mat.setFromTriplets(tripletList.begin(), tripletList.end()); return mat; }
ConvertToMklResult to_mkl(const Eigen::SparseMatrix<double> &Ain, sparse_status_t &status) { ConvertToMklResult result; const int N = static_cast<int>(Ain.rows()); // const-cast to work with C-api. int *row_starts = const_cast<int *>(Ain.outerIndexPtr()); int *col_index = const_cast<int *>(Ain.innerIndexPtr()); double *values = const_cast<double *>(Ain.valuePtr()); result.descr.type = SPARSE_MATRIX_TYPE_GENERAL; /* Full matrix is stored */ result.status = mkl_sparse_d_create_csr(&result.matrix, SPARSE_INDEX_BASE_ZERO, N, N, row_starts, row_starts + 1, col_index, values); return result; }
IGL_INLINE void igl::full( const Eigen::SparseMatrix<T> & A, Eigen::PlainObjectBase<DerivedB>& B) { assert(false && "Obsolete. Just call B = Matrix(A)"); using namespace Eigen; B = PlainObjectBase<DerivedB >::Zero(A.rows(),A.cols()); // Iterate over outside for(int k=0; k<A.outerSize(); ++k) { // Iterate over inside for(typename SparseMatrix<T>::InnerIterator it (A,k); it; ++it) { B(it.row(),it.col()) = it.value(); } } }
inline std::unique_ptr<giss::VectorSparseMatrix> Eigen_to_giss( Eigen::SparseMatrix<double> const &mat) { std::unique_ptr<giss::VectorSparseMatrix> ret( new giss::VectorSparseMatrix(giss::SparseDescr( mat.rows(), mat.cols()))); int nz = mat.nonZeros(); ret->reserve(nz); for (int k=0; k<mat.outerSize(); ++k) { for (Eigen::SparseMatrix<double>::InnerIterator it(mat,k); it; ++it) { ret->set(it.row(), it.col(), it.value(), SparseMatrix::DuplicatePolicy::ADD); } } return ret; }
//update random Vectors void updateMatrix(Eigen::SparseMatrix<double,Eigen::RowMajor>& randomMatrix, const Eigen::SparseMatrix<int,Eigen::RowMajor>& adjacencyMatrix, int itr) { int numberOfRVector = randomMatrix.rows(); int numberOfVertice = randomMatrix.cols(); std::vector<double> rowSum = getRowSum(adjacencyMatrix); for(int i=0; i<numberOfRVector;++i){ for(int h=0;h<itr;++h){ Eigen::SparseMatrix<double,Eigen::RowMajor> middleValueMatrix(numberOfVertice,1); middleValueMatrix.reserve(1); for(int j = 0; j<numberOfVertice; ++j){ Eigen::SparseMatrix<double,Eigen::RowMajor> v1(1,numberOfVertice), v2(numberOfVertice,1); v1.reserve(numberOfVertice); v2.reserve(1); v1 = adjacencyMatrix.cast<double>().innerVector(j); v2 = randomMatrix.innerVector(h).transpose(); Eigen::SparseMatrix<double,Eigen::RowMajor> middleValue1 = v1*v2; double middleValue = 0; if(rowSum[j] != 0){ middleValue = (*middleValue1.valuePtr())/rowSum[j]; } else { middleValue = 1; } middleValueMatrix.insert(j,0) = middleValue; } Eigen::SparseMatrix<double,Eigen::RowMajor> right = middleValueMatrix.transpose(); Eigen::SparseMatrix<double,Eigen::RowMajor> left = randomMatrix.innerVector(i); randomMatrix.innerVector(i) = 0.5*(left + right); } } }
double L2R_Huber_SVC::predict(const std::string fileNameLibSVMFormat, Eigen::VectorXd &w) { Eigen::SparseMatrix<double, 1> vX; Eigen::ArrayXd vy; const char *fn = fileNameLibSVMFormat.c_str(); read_LibSVMdata1(fn, vX, vy); // loadLib(fileNameLibSVMFormat, vX, vy); int vl = vX.rows(); int vn = vX.cols(); int success = 0; if (vn != w.size()) { w.conservativeResize(vn); } Eigen::ArrayXd prob = vy * (vX * w).array(); for (int i = 0; i < vl; ++i) { if (prob.coeffRef(i) >= 0.0) ++success; } return (double)success / vl; }