void test3() { #ifdef MAKE_TEST3 std::cout << "Multidimensional test:" << std::endl; // Construct data for the IVP double T = 1; // Many dimensions int n = 5; // Multidimensional rhs Eigen::VectorXd y0(2*n); for(int i = 0; i < n; ++i) { y0(i)=(i+1.)/n; y0(i+n)=-1; } // Multidimensional rhs auto f = [n] (Eigen::VectorXd y) { Eigen::VectorXd fy(2*n); Eigen::VectorXd g(n); g(0) = y(0)*(y(1)+y(0)); g(n-1) = y(n-1)*(y(n-1)+y(n-2)); for(int i = 1; i < n-1; ++i) { g(i) = y(i)*(y(i-1)+y(i+1)); } Eigen::SparseMatrix<double> C(n,n); C.reserve(3); for(int i = 0; i < n; ++i) { C.insert(i,i) = 2; if(i < n-1) C.insert(i,i+1) = -1; if(i >= 1) C.insert(i,i-1) = -1; } C.makeCompressed(); fy.head(n) = y.head(n); Eigen::SparseLU< Eigen::SparseMatrix<double> > solver; solver.analyzePattern(C); solver.compute(C); fy.tail(n) = solver.solve(g); return fy; }; // Constructor: ode45<Eigen::VectorXd> O(f); // Setup options O.options.do_statistics = true; // Solve auto sol = O.solve(y0, T); // Print info O.print(); std::cout << "T = " << sol.back().second << std::endl; std::cout << "y(T) = " << std::endl << sol.back().first << std::endl; #endif }
NatCSI(const std::vector<double> & t, const std::vector<double> & y) : t(t), y(y), h(t.size()-1), c(t.size()) { // Size check assert( ( t.size() == y.size() ) && "Error: mismatched size of t and y!"); // m is the number of conditions (t goes from t_0 to t_n) // WARNING: m = n+1 m = t.size(); // Vector containing increments (from the right) for(int i = 0; i < (m - 1); ++i) { h(i) = t[i+1] - t[i]; // Check that t is sorted assert( ( h(i) > 0 ) && "Error: array t must be sorted!"); } // System matrix and rhs as in 3.5.9, we remove first and last row (known by natural contition) Eigen::SparseMatrix<double> A(m,m); Eigen::VectorXd b(m); // WARNING: sparse reserve space A.reserve(3); // Fill in natural conditions 3.5.10 for matrix A.coeffRef(0,0) = 2 / h(0); A.coeffRef(0,1) = 1 / h(0); A.coeffRef(m-1,m-2) = 1 / h(m-2); A.coeffRef(m-1,m-1) = 2 / h(m-2); // Reuse computation for rhs double bold = (y[1] - y[0]) / (h(0)*h(0)); b(0) = 3*bold; // Fill in natural conditions 3.5.10 // Fill matrix A and rhs b for(int i = 1; i < m-1; ++i) { // PRecompute b_i double hinv = 1./h(i); // Fill in a A.coeffRef(i,i-1) = hinv; A.coeffRef(i,i) = 2./h(i) + 2./h(i-1); A.coeffRef(i,i+1) = hinv; // Reuse computation for rhs b double bnew = (y[i+1] - y[i]) / (h(i)*h(i)); b(i) = 3. * (bnew + bold); bold = bnew; } b(m-1) = 3*bold; // Fill in natural conditions 3.5.10 // Compress the matrix A.makeCompressed(); std::cout << A; // Factorize A and solve system A*c(1:end) = b Eigen::SparseLU<Eigen::SparseMatrix<double>> lu; lu.compute(A); c = lu.solve(b); }
void testEigen(int m, int n, int nnz, std::vector<int>& rows, std::vector<int>& cols, std::vector<double>& values, double* matB){ double start, stop, time_to_solve, time_to_build; double tol=1e-9; Eigen::SparseMatrix<double> A; std::vector< Eigen::Triplet<double> > trips; trips.reserve(m * n); for (int k = 0; k < nnz; k++){ double _val = values[k]; int i = rows[k]; int j = cols[k]; if (fabs(_val) > tol){ trips.push_back(Eigen::Triplet<double>(i-1,j-1,_val)); } } //NOTE: setFromTriples() accumulates contributions to the same (i,j)! A.resize(m, n); start = second(); A.setFromTriplets(trips.begin(), trips.end()); stop = second(); time_to_build = stop - start; Eigen::SparseLU< Eigen::SparseMatrix<double>, Eigen::COLAMDOrdering<int> > solverLU; Eigen::VectorXd b; b.resize(m); for (int i = 0; i < m; i++ ) b(i) = matB[i]; printf("\nProcessing in Eigen using LU...\n"); start = second(); solverLU.compute(A); Eigen::VectorXd X = solverLU.solve(b); stop = second(); time_to_solve = stop - start; Eigen::VectorXd ax = A * X; Eigen::VectorXd bMinusAx = b - ax; double h_r[m]; for (int i=0; i<m; i++) h_r[i]=bMinusAx(i); double r_inf = vec_norminf(m, h_r); printf("(Eigen) |b - A*x| = %E \n", r_inf); printf("(Eigen) Time to build(sec): %f\n", time_to_build); printf("(Eigen) Time (sec): %f\n", time_to_solve); }
//************************************ // Method: SolvePFC // FullName: PFCal::PFC::SolvePFC // Access: public // Returns: PF_RESULT // Qualifier: 潮流计算模板函数,构造各种方法求解线性方程组 //************************************ PFVoid PFC::Solve(){ PFDouble opml(0.0); Eigen::SparseLU<PFCore::PFSMatrixXD, PFCore::PFCOLAMDOrdering> solver; PFCIter = 0; //0. 初始化计算矩阵及向量大小 this->_InitPFCalMatrix(); do { //1. 求解功率失配量 cout << "QG DISPATCH..." << endl; _MakeDPQVector(); //2. 如果达到收敛条件,退出 cout << "MAXDISPATCH\t" << PFCIter << "\t" << dPFCPQ.cwiseAbs().maxCoeff() << endl; if (dPFCPQ.cwiseAbs().maxCoeff() < PFCEpsm){ this->PFCResult = PF_RESULT_CONVERG; return; } //3. 计算雅可比阵 _MakeJacoMatrix(); //4. 计算电压偏差 solver.compute(PFCJaco); if (solver.info() != Eigen::Success){ cout << "Solving<stage_1> Failed!" << endl; this->PFCResult = PF_RESULT_DIVERGE_FAILURE; return; } dPFCVA = solver.solve(dPFCPQ); if (solver.info() != Eigen::Success){ cout << "Solving<stage_2> Failed!" << endl; this->PFCResult = PF_RESULT_DIVERGE_FAILURE; return; } //5. 检测是否出现NAN if(dPFCVA.hasNaN()){ this->PFCResult = PF_RESULT_DIVERGE_NAN; return; } //6. 更新状态量 cout << "VOL DISPATCH..." << endl; opml = _CalOptimalMultiplier(); dPFCVA *= opml; _UpdateSysState(); //7. 迭代次数增加 ++PFCIter; } while (PFCIter <= PFCMaxIter); //迭代次数越限 if (PFCIter > PFCMaxIter){ this->PFCResult = PF_RESULT_DIVERGE_OVER_ITER; return; } this->PFCResult = PF_RESULT_DIVERGE; return; }
Tvector<T> Tsparse_matrix<T>::solve( const Tvector<T>& b ) const { assert( ROWS == b.size() ); // Check dimensions are compatible // Convert Tvector to an Eigen matrix Eigen::Matrix<T, -1, 1> B; B.resize(b.size(),1); for (std::size_t i=0; i<b.size(); ++i) { B(i,0) = b.CONTAINER[i]; } // Convert Tsparse_matrix to an Eigen::SparseMatrix Eigen::SparseMatrix<T> A(ROWS,COLS); // Declare Eigen sparse matrix std::vector<Eigen::Triplet<T>> triplet_list; // Declare list of triplets for ( std::size_t i=0; i<ROWS; ++i ) { if ( !S_MATRIX[i].isempty() ) // Check that the row is not empty { std::vector<std::size_t> index; std::vector<T> element; index = S_MATRIX[i].index_list(); element = S_MATRIX[i].element_list(); std::size_t J = index.size(); for ( std::size_t j=0; j<J; ++j ) { triplet_list.push_back( Eigen::Triplet<T>( i, index[j], element[j] )); } } } A.setFromTriplets(triplet_list.begin(), triplet_list.end()); // Setup and solve the system Eigen::SparseLU< Eigen::SparseMatrix<T> > solverA; //Eigen::BiCGSTAB<Eigen::SparseMatrix<T>> solverA; //solverA.analyzePattern(A); //solverA.factorize(A); solverA.compute(A); Eigen::Matrix<T, -1, 1> X; X.resize(b.size(),1); X = solverA.solve(B); // Convert back to a Tvector Tvector<T> x(COLS,0.0); for (std::size_t i=0; i<COLS; ++i) { x.CONTAINER[i] = X(i,0); } return x; }
int main() { // Construct data for RK order 4 MatrixXd A = MatrixXd::Zero(4,4); A(1,0) = .5; A(2,1) = .5; A(3,2) = 1; VectorXd b(4); b << 1./6, 1./3, 1./3, 1./6; // Construct data for the IVP double T = 1; int n = 5; VectorXd y0(2*n); for(int i = 0; i < n; ++i) { y0(i)=(i+1.)/n; y0(i+n)=-1; } auto f = [n] (VectorXd y) { VectorXd fy(2*n); VectorXd g(n); g(0) = y(0)*(y(1)+y(0)); g(n-1) = y(n-1)*(y(n-1)+y(n-2)); for(int i = 1; i < n-1; ++i) { g(i) = y(i)*(y(i-1)+y(i+1)); } Eigen::SparseMatrix<double> C(n,n); C.reserve(3); for(int i = 0; i < n; ++i) { C.insert(i,i) = 2; if(i < n-1) C.insert(i,i+1) = -1; if(i >= 1) C.insert(i,i-1) = -1; } C.makeCompressed(); fy.head(n) = y.head(n); Eigen::SparseLU< Eigen::SparseMatrix<double> > solver; solver.analyzePattern(C); solver.compute(C); fy.tail(n) = solver.solve(g); return fy; }; errors(f, T, y0, A, b); }
void ReliefGeneration::CompressHeightField(std::vector<double>& heightField, int resX, int resY) { double bbScale = 2; double alpha = bbScale * 300.0; double threthold = bbScale * 0.05; int vertNum = (resX + 1) * (resY + 1); std::vector<MagicMath::Vector3> DeltaVector(vertNum); for (int xid = 0; xid < resX; xid++) { for (int yid = 0; yid < resY; yid++) { int index = xid * (resY + 1) + yid; MagicMath::Vector3 deltaT(0, 0, 0); deltaT[0] = heightField.at(index + resY + 1) - heightField.at(index); deltaT[1] = heightField.at(index + 1) - heightField.at(index); double deltaMag = deltaT.Normalise(); if (deltaMag > threthold) { deltaMag = 0; } else { deltaMag = log(1 + alpha * deltaMag) / alpha; } DeltaVector.at(index) = deltaT * deltaMag; } } std::vector<double> LaplaceField(vertNum); for (int xid = 1; xid < resX; xid++) { for (int yid = 1; yid < resY; yid++) { int index = xid * (resY + 1) + yid; LaplaceField.at(index) = DeltaVector.at(index)[0] - DeltaVector.at(index - resY - 1)[0] + DeltaVector.at(index)[1] - DeltaVector.at(index - 1)[1]; } } DebugLog << "Relief: Construct Matrix" << std::endl; std::vector< Eigen::Triplet<double> > tripletList; Eigen::VectorXd b(vertNum, 1); for (int xid = 0; xid < resX + 1; xid++) { for (int yid = 0; yid < resY + 1; yid++) { int index = xid * (resY + 1) + yid; if (xid == 0 || xid == resX || yid == 0 || yid == resY) { tripletList.push_back( Eigen::Triplet<double>(index, index, 1) ); b(index) = 0; } else { tripletList.push_back( Eigen::Triplet<double>(index, index, -4.0) ); tripletList.push_back( Eigen::Triplet<double>(index, index + 1, 1.0) ); tripletList.push_back( Eigen::Triplet<double>(index, index - 1, 1.0) ); tripletList.push_back( Eigen::Triplet<double>(index, index + resY + 1, 1.0) ); tripletList.push_back( Eigen::Triplet<double>(index, index - resY - 1, 1.0) ); b(index) = LaplaceField.at(index); } } } DebugLog << "Relief: Solve Matrix" << std::endl; Eigen::SparseMatrix<double, Eigen::ColMajor> matA(vertNum,vertNum); matA.setFromTriplets(tripletList.begin(), tripletList.end()); Eigen::SparseLU<Eigen::SparseMatrix<double, Eigen::ColMajor> > solver; solver.compute(matA); if(solver.info()!= Eigen::Success) { DebugLog << "Relief: SuperLU Failed" << std::endl; } Eigen::VectorXd res = solver.solve(b); //Copy results for (int i = 0; i < vertNum; i++) { heightField.at(i) = res(i); } }
IGL_INLINE void igl::PolyVectorFieldFinder<DerivedV, DerivedF>:: minQuadWithKnownMini(const Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar> > &Q, const Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar> > &f, const Eigen::VectorXi isConstrained, const Eigen::Matrix<std::complex<typename DerivedV::Scalar>, Eigen::Dynamic, 1> &xknown, Eigen::Matrix<std::complex<typename DerivedV::Scalar>, Eigen::Dynamic, 1> &x) { int N = Q.rows(); int nc = xknown.rows(); Eigen::VectorXi known; known.setZero(nc,1); Eigen::VectorXi unknown; unknown.setZero(N-nc,1); int indk = 0, indu = 0; for (int i = 0; i<N; ++i) if (isConstrained[i]) { known[indk] = i; indk++; } else { unknown[indu] = i; indu++; } Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar>> Quu, Quk; igl::slice(Q,unknown, unknown, Quu); igl::slice(Q,unknown, known, Quk); std::vector<typename Eigen::Triplet<std::complex<typename DerivedV::Scalar> > > tripletList; Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar> > fu(N-nc,1); igl::slice(f,unknown, Eigen::VectorXi::Zero(1,1), fu); Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar> > rhs = (Quk*xknown).sparseView()+.5*fu; Eigen::SparseLU< Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar>>> solver; solver.compute(-Quu); if(solver.info()!=Eigen::Success) { std::cerr<<"Decomposition failed!"<<std::endl; return; } Eigen::SparseMatrix<std::complex<typename DerivedV::Scalar>> b = solver.solve(rhs); if(solver.info()!=Eigen::Success) { std::cerr<<"Solving failed!"<<std::endl; return; } indk = 0, indu = 0; x.setZero(N,1); for (int i = 0; i<N; ++i) if (isConstrained[i]) x[i] = xknown[indk++]; else x[i] = b.coeff(indu++,0); }
void poisson_blend(mve::FloatImage::ConstPtr src, mve::ByteImage::ConstPtr mask, mve::FloatImage::Ptr dest, float alpha) { assert(src->width() == mask->width() && mask->width() == dest->width()); assert(src->height() == mask->height() && mask->height() == dest->height()); assert(src->channels() == 3 && dest->channels() == 3); assert(mask->channels() == 1); assert(valid_mask(mask)); const int n = dest->get_pixel_amount(); const int width = dest->width(); const int height = dest->height(); const int channels = dest->channels(); mve::Image<int>::Ptr indices = mve::Image<int>::create(width, height, 1); indices->fill(-1); int index = 0; for (int i = 0; i < n; ++i) { if (mask->at(i) != 0) { indices->at(i) = index; index++; } } const int nnz = index; std::vector<math::Vec3f> coefficients_b; coefficients_b.resize(nnz); std::vector<Eigen::Triplet<float, int> > coefficients_A; coefficients_A.reserve(nnz); //TODO better estimate... for (int i = 0; i < n; ++i) { const int row = indices->at(i); if (mask->at(i) == 126 || mask->at(i) == 128) { Eigen::Triplet<float, int> t(row, row, 1.0f); coefficients_A.push_back(t); coefficients_b[row] = math::Vec3f(&dest->at(i, 0)); } if (mask->at(i) == 255) { const int i01 = indices->at(i - width); const int i10 = indices->at(i - 1); const int i11 = indices->at(i); const int i12 = indices->at(i + 1); const int i21 = indices->at(i + width); /* All neighbours should be eighter border conditions or part of the optimization. */ assert(i01 != -1 && i10 != -1 && i11 != -1 && i12 != -1 && i21 != -1); Eigen::Triplet<float, int> t01(row, i01, 1.0f); Eigen::Triplet<float, int> t10(row, i10, 1.0f); Eigen::Triplet<float, int> t11(row, i11, -4.0f); Eigen::Triplet<float, int> t12(row, i12, 1.0f); Eigen::Triplet<float, int> t21(row, i21, 1.0f); Eigen::Triplet<float, int> triplets[] = {t01, t10, t11, t12, t21}; coefficients_A.insert(coefficients_A.end(), triplets, triplets + 5); math::Vec3f l_d = simple_laplacian(i, dest); math::Vec3f l_s = simple_laplacian(i, src); coefficients_b[row] = (alpha * l_s + (1.0f - alpha) * l_d); } } SpMat A(nnz, nnz); A.setFromTriplets(coefficients_A.begin(), coefficients_A.end()); Eigen::SparseLU<SpMat, Eigen::COLAMDOrdering<int> > solver; solver.compute(A); for (int channel = 0; channel < channels; ++channel) { Eigen::VectorXf b(nnz); for (std::size_t i = 0; i < coefficients_b.size(); ++i) b[i] = coefficients_b[i][channel]; Eigen::VectorXf x(n); x = solver.solve(b); for (int i = 0; i < n; ++i) { int index = indices->at(i); if (index != -1) dest->at(i, channel) = x[index]; } } }
int main() { cout << "----- TESTING SparseMatrix -----" << endl; /* ----- TESTING sparse linear system solver ----- */ /* initialize random seed: */ srand (time(NULL)); size_t N = 2000; TSL::SparseMatrix<double> A_matrix(N,N); /*for (size_t i=0; i<N; ++i) { A_matrix( i, i ) = rand() * (i + rand()); A_matrix( i, 10 ) = rand() * (i + rand()); A_matrix( i, 57 ) = rand() * (i + rand()); }*/ for (size_t i = 0; i < N; ++i) { for (size_t j = 0; j < N; ++j) { A_matrix( i, j ) = rand() * i + rand() * j; } } //cout << "A_matrix.rows() = " << A_matrix.rows() << endl; //cout << "A_matrix.cols() = " << A_matrix.cols() << endl; //cout << "A_matrix.size() = " << A_matrix.size() << endl; //cout << "A_matrix.numel() = " << A_matrix.numel() << endl; //A_matrix(33, 100) = 2.0; //cout << "A_matrix.numel_row( 33 ) = " << A_matrix.numel_row( 33 ) << endl; // Test copy constructor //TSL::SparseMatrix<double> A_copy( A_matrix ); //cout << "A_copy.numel() = " << A_copy.numel() << endl; TSL::Vector<double> B_vector(N,0.435); TSL::Vector<double> X_vector(N,0.0); // Test SparseLU solve method cout << " * Solving using Eigen and SparseLU " << endl; TSL::Timer timer; timer.start(); X_vector = A_matrix.solve( B_vector ); // SparseLU solve //cout << "B_vector = " << endl << B_vector << endl; //cout << "X_vector = " << endl << X_vector << endl; //cout << "X[N-1] = " << X_vector[ N - 1 ] << endl; //cout << "1 / N = " << 1.0 / N << endl; timer.print(); timer.stop(); // Test PETSc solve cout << " * Solving using PETSc " << endl; PetscInitialize(NULL,NULL,(char*)0,(char*)0); #ifdef PETSC_Z // Convert to a complex matrix TSL::SparseMatrix< std::complex<double> > A_cmplx( N, N ); for (size_t i = 0; i < N; ++i) { for (size_t j = 0; j < N; ++j) { A_cmplx( i, j ) = A_matrix( i, j ); } } TSL::Vector< std::complex<double> > B_cmplx( N, 0.435 ); TSL::PETScSparseLinearSystem< std::complex<double> > petsc_system( &A_cmplx, &B_cmplx ); #endif #ifdef PETSC_D TSL::PETScSparseLinearSystem<double> petsc_system( &A_matrix, &B_vector ); #endif timer.start(); try { petsc_system.solve(); } catch ( std::runtime_error ) { cout << " \033[1;31;48m * FAILED THROUGH EXCEPTION BEING RAISED \033[0m\n"; return 1; } timer.print(); timer.stop(); TSL::Vector<double> diff( N, 0.0 ); for (size_t i = 0; i < N; ++i) { #ifdef PETSC_Z diff[i] = X_vector[i] - B_cmplx[i].real(); #endif #ifdef PETSC_D diff[i] = X_vector[i] - B_vector[i]; #endif } cout << "diff.norm_inf() = " << diff.norm_inf() << endl; //cout << "A_matrix( 10, 10 ) = " << A_matrix( 10, 10 ) << endl; //A_matrix.clear(); //cout << "A_matrix( 10, 10 ) = " << A_matrix( 10, 10 ) << endl; //A_matrix.eye(); //cout << "A_matrix( 10, 10 ) = " << A_matrix( 10, 10 ) << endl; //A_matrix.scale( 2.0 ); //cout << "A_matrix( 10, 10 ) = " << A_matrix( 10, 10 ) << endl; //A_matrix.print(); TSL::SparseMatrix<double> B_matrix(N,N); B_matrix = A_matrix; B_matrix = A_matrix * 2.0; B_matrix = 2.0 * A_matrix; //B_matrix.print(); TSL::SparseMatrix<double> C_matrix(3,3); C_matrix(0,0) = 1.0; C_matrix(0,1) = -2.0; //C_matrix(0,2) = 3.0; C_matrix(1,0) = 5.0; C_matrix(1,1) = 8.0; C_matrix(1,2) = -1.0; //C_matrix(2,0) = 2.0; C_matrix(2,1) = 1.0; C_matrix(2,2) = 1.0; //C_matrix.print(); //C_matrix.output( "./C_matrix" ); //B_matrix = C_matrix; //cout << "B_matrix = "; //B_matrix.print(); TSL::SparseMatrix<double> D_matrix(N,N); Eigen::SparseMatrix<double, Eigen::ColMajor, long long> C_matrix_Eigen(3,3); C_matrix_Eigen = C_matrix.convert_to_Eigen(); /*for (int k = 0; k < C_matrix_Eigen.outerSize(); ++k){ for (Eigen::SparseMatrix<double, Eigen::ColMajor, long long>::InnerIterator it(C_matrix_Eigen, k); it; ++it){ std::cout << it.row() <<"\t"; std::cout << it.col() << "\t"; std::cout << it.value() << std::endl; } }*/ Eigen::SparseLU< Eigen::SparseMatrix<double, Eigen::ColMajor, long long> > solver; solver.compute( C_matrix_Eigen ); // Test complex sparse matrices TSL::SparseMatrix<std::complex<double>> E_matrix( 2, 2 ); TSL::Vector<std::complex<double>> E_vector( 2, 0.0 ); E_matrix( 0, 0 ) = std::complex<double>(1.0,1.0); E_matrix( 0, 1 ) = std::complex<double>(-1.0,0.0); E_matrix( 1, 0 ) = std::complex<double>(1.0,-1.0); E_matrix( 1, 1 ) = std::complex<double>(1.0,1.0); E_vector[ 0 ] = std::complex<double>(0.0,1.0); E_vector[ 1 ] = std::complex<double>(1.0,0.0); //E_matrix.print(); TSL::Vector<std::complex<double>> E_sol( 2, 0.0 ); E_sol = E_matrix.solve( E_vector ); //cout << " E_sol = " << endl << E_sol << endl; cout << "FINISHED" << endl; }