// =================================================== // Methods // =================================================== Int PreconditionerIfpack::buildPreconditioner( operator_type& matrix ) { M_operator = matrix->matrixPtr(); M_overlapLevel = this->M_list.get( "overlap level", -1 ); M_precType = this->M_list.get( "prectype", "Amesos" ); Ifpack factory; M_preconditioner.reset( factory.Create( M_precType, M_operator.get(), M_overlapLevel ) ); M_precType += "_Ifpack"; if ( !M_preconditioner.get() ) { ERROR_MSG( "Preconditioner not set, something went wrong in its computation\n" ); } IFPACK_CHK_ERR( M_preconditioner->SetParameters( this->M_list ) ); IFPACK_CHK_ERR( M_preconditioner->Initialize() ); IFPACK_CHK_ERR( M_preconditioner->Compute() ); this->M_preconditionerCreated = true; return ( EXIT_SUCCESS ); }
void IfpackSmoother::Setup(Level ¤tLevel) { FactoryMonitor m(*this, "Setup Smoother", currentLevel); if (SmootherPrototype::IsSetup() == true) GetOStream(Warnings0, 0) << "Warning: MueLu::IfpackSmoother::Setup(): Setup() has already been called"; A_ = Factory::Get< RCP<Matrix> >(currentLevel, "A"); double lambdaMax = -1.0; if (type_ == "Chebyshev") { std::string maxEigString = "chebyshev: max eigenvalue"; std::string eigRatioString = "chebyshev: ratio eigenvalue"; try { lambdaMax = Teuchos::getValue<Scalar>(this->GetParameter(maxEigString)); this->GetOStream(Statistics1, 0) << maxEigString << " (cached with smoother parameter list) = " << lambdaMax << std::endl; } catch (Teuchos::Exceptions::InvalidParameterName) { lambdaMax = A_->GetMaxEigenvalueEstimate(); if (lambdaMax != -1.0) { this->GetOStream(Statistics1, 0) << maxEigString << " (cached with matrix) = " << lambdaMax << std::endl; this->SetParameter(maxEigString, ParameterEntry(lambdaMax)); } } // Calculate the eigenvalue ratio const Scalar defaultEigRatio = 20; Scalar ratio = defaultEigRatio; try { ratio = Teuchos::getValue<Scalar>(this->GetParameter(eigRatioString)); } catch (Teuchos::Exceptions::InvalidParameterName) { this->SetParameter(eigRatioString, ParameterEntry(ratio)); } if (currentLevel.GetLevelID()) { // Update ratio to be // ratio = max(number of fine DOFs / number of coarse DOFs, defaultValue) // // NOTE: We don't need to request previous level matrix as we know for sure it was constructed RCP<const Matrix> fineA = currentLevel.GetPreviousLevel()->Get<RCP<Matrix> >("A"); size_t nRowsFine = fineA->getGlobalNumRows(); size_t nRowsCoarse = A_->getGlobalNumRows(); ratio = std::max(ratio, as<Scalar>(nRowsFine)/nRowsCoarse); this->GetOStream(Statistics1, 0) << eigRatioString << " (computed) = " << ratio << std::endl; this->SetParameter(eigRatioString, ParameterEntry(ratio)); } } RCP<Epetra_CrsMatrix> epA = Utils::Op2NonConstEpetraCrs(A_); Ifpack factory; prec_ = rcp(factory.Create(type_, &(*epA), overlap_)); TEUCHOS_TEST_FOR_EXCEPTION(prec_.is_null(), Exceptions::RuntimeError, "Could not create an Ifpack preconditioner with type = \"" << type_ << "\""); SetPrecParameters(); prec_->Compute(); SmootherPrototype::IsSetup(true); if (type_ == "Chebyshev" && lambdaMax == -1.0) { Teuchos::RCP<Ifpack_Chebyshev> chebyPrec = rcp_dynamic_cast<Ifpack_Chebyshev>(prec_); if (chebyPrec != Teuchos::null) { lambdaMax = chebyPrec->GetLambdaMax(); A_->SetMaxEigenvalueEstimate(lambdaMax); this->GetOStream(Statistics1, 0) << "chebyshev: max eigenvalue (calculated by Ifpack)" << " = " << lambdaMax << std::endl; } TEUCHOS_TEST_FOR_EXCEPTION(lambdaMax == -1.0, Exceptions::RuntimeError, "MueLu::IfpackSmoother::Setup(): no maximum eigenvalue estimate"); } this->GetOStream(Statistics0, 0) << description() << std::endl; }
int main(int argc, char *argv[]) { // int MyPID = 0; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); MyPID = Comm.MyPID(); #else Epetra_SerialComm Comm; #endif // typedef double ST; typedef Teuchos::ScalarTraits<ST> SCT; typedef SCT::magnitudeType MT; typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Belos::MultiVecTraits<ST,MV> MVT; typedef Belos::OperatorTraits<ST,MV,OP> OPT; using Teuchos::ParameterList; using Teuchos::RCP; using Teuchos::rcp; bool success = false; bool verbose = false; try { bool proc_verbose = false; int frequency = -1; // frequency of status test output. int blocksize = 1; // blocksize int numrhs = 1; // number of right-hand sides to solve for int maxrestarts = 15; // maximum number of restarts allowed int maxiters = -1; // maximum number of iterations allowed per linear system int maxsubspace = 25; // maximum number of blocks the solver can use for the subspace std::string filename("orsirr1.hb"); MT tol = 1.0e-5; // relative residual tolerance // Specify whether to use RHS as initial guess. If false, use zero. bool useRHSAsInitialGuess = false; Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("verbose","quiet",&verbose,"Print messages and results."); cmdp.setOption("use-rhs","use-zero",&useRHSAsInitialGuess,"Use RHS as initial guess."); cmdp.setOption("frequency",&frequency,"Solvers frequency for printing residuals (#iters)."); cmdp.setOption("filename",&filename,"Filename for test matrix. Acceptable file extensions: *.hb,*.mtx,*.triU,*.triS"); cmdp.setOption("tol",&tol,"Relative residual tolerance used by GMRES solver."); cmdp.setOption("num-rhs",&numrhs,"Number of right-hand sides to be solved for."); cmdp.setOption("block-size",&blocksize,"Block size used by GMRES."); cmdp.setOption("max-iters",&maxiters,"Maximum number of iterations per linear system (-1 = adapted to problem/block size)."); cmdp.setOption("max-subspace",&maxsubspace,"Maximum number of blocks the solver can use for the subspace."); cmdp.setOption("max-restarts",&maxrestarts,"Maximum number of restarts allowed for GMRES solver."); if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { return EXIT_FAILURE; } if (!verbose) frequency = -1; // reset frequency if test is not verbose // // *************Get the problem********************* // RCP<Epetra_Map> Map; RCP<Epetra_CrsMatrix> A; RCP<Epetra_MultiVector> B, X; RCP<Epetra_Vector> vecB, vecX; EpetraExt::readEpetraLinearSystem(filename, Comm, &A, &Map, &vecX, &vecB); A->OptimizeStorage(); proc_verbose = verbose && (MyPID==0); /* Only print on the zero processor */ // Check to see if the number of right-hand sides is the same as requested. if (numrhs>1) { X = rcp( new Epetra_MultiVector( *Map, numrhs ) ); B = rcp( new Epetra_MultiVector( *Map, numrhs ) ); X->Seed(); X->Random(); OPT::Apply( *A, *X, *B ); X->PutScalar( 0.0 ); } else { X = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(vecX); B = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(vecB); } // If requested, use a copy of B as initial guess if (useRHSAsInitialGuess) { X->Update(1.0, *B, 0.0); } // // ************Construct preconditioner************* // ParameterList ifpackList; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // create the preconditioner. For valid PrecType values, // please check the documentation std::string PrecType = "ILU"; // incomplete LU int OverlapLevel = 1; // must be >= 0. If Comm.NumProc() == 1, // it is ignored. RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) ); assert(Prec != Teuchos::null); // specify parameters for ILU ifpackList.set("fact: level-of-fill", 1); // the combine mode is on the following: // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax" // Their meaning is as defined in file Epetra_CombineMode.h ifpackList.set("schwarz: combine mode", "Add"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(ifpackList)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec->Compute()); // Create the Belos preconditioned operator from the Ifpack preconditioner. // NOTE: This is necessary because Belos expects an operator to apply the // preconditioner with Apply() NOT ApplyInverse(). RCP<Belos::EpetraPrecOp> belosPrec = rcp( new Belos::EpetraPrecOp( Prec ) ); // // *****Create parameter list for the block GMRES solver manager***** // const int NumGlobalElements = B->GlobalLength(); if (maxiters == -1) maxiters = NumGlobalElements/blocksize - 1; // maximum number of iterations to run // ParameterList belosList; belosList.set( "Flexible Gmres", true ); // Flexible Gmres will be used to solve this problem belosList.set( "Num Blocks", maxsubspace ); // Maximum number of blocks in Krylov factorization belosList.set( "Block Size", blocksize ); // Blocksize to be used by iterative solver belosList.set( "Maximum Iterations", maxiters ); // Maximum number of iterations allowed belosList.set( "Maximum Restarts", maxrestarts ); // Maximum number of restarts allowed belosList.set( "Convergence Tolerance", tol ); // Relative convergence tolerance requested if (numrhs > 1) { belosList.set( "Show Maximum Residual Norm Only", true ); // Show only the maximum residual norm } if (verbose) { belosList.set( "Verbosity", Belos::Errors + Belos::Warnings + Belos::TimingDetails + Belos::StatusTestDetails ); if (frequency > 0) belosList.set( "Output Frequency", frequency ); } else belosList.set( "Verbosity", Belos::Errors + Belos::Warnings ); // // *******Construct a preconditioned linear problem******** // RCP<Belos::LinearProblem<double,MV,OP> > problem = rcp( new Belos::LinearProblem<double,MV,OP>( A, X, B ) ); problem->setRightPrec( belosPrec ); bool set = problem->setProblem(); if (set == false) { if (proc_verbose) std::cout << std::endl << "ERROR: Belos::LinearProblem failed to set up correctly!" << std::endl; return EXIT_FAILURE; } // Create an iterative solver manager. RCP< Belos::SolverManager<double,MV,OP> > solver = rcp( new Belos::BlockGmresSolMgr<double,MV,OP>(problem, rcp(&belosList,false))); // // ******************************************************************* // *************Start the block Gmres iteration************************* // ******************************************************************* // if (proc_verbose) { std::cout << std::endl << std::endl; std::cout << "Dimension of matrix: " << NumGlobalElements << std::endl; std::cout << "Number of right-hand sides: " << numrhs << std::endl; std::cout << "Block size used by solver: " << blocksize << std::endl; std::cout << "Number of restarts allowed: " << maxrestarts << std::endl; std::cout << "Max number of Gmres iterations per restart cycle: " << maxiters << std::endl; std::cout << "Relative residual tolerance: " << tol << std::endl; std::cout << std::endl; } // // Perform solve // Belos::ReturnType ret = solver->solve(); // // Compute actual residuals. // bool badRes = false; std::vector<double> actual_resids( numrhs ); std::vector<double> rhs_norm( numrhs ); Epetra_MultiVector resid(*Map, numrhs); OPT::Apply( *A, *X, resid ); MVT::MvAddMv( -1.0, resid, 1.0, *B, resid ); MVT::MvNorm( resid, actual_resids ); MVT::MvNorm( *B, rhs_norm ); if (proc_verbose) { std::cout<< "---------- Actual Residuals (normalized) ----------"<<std::endl<<std::endl; for ( int i=0; i<numrhs; i++) { double actRes = actual_resids[i]/rhs_norm[i]; std::cout<<"Problem "<<i<<" : \t"<< actRes <<std::endl; if (actRes > tol) badRes = true; } } success = ret==Belos::Converged && !badRes; if (success) { if (proc_verbose) std::cout << "End Result: TEST PASSED" << std::endl; } else { if (proc_verbose) std::cout << "End Result: TEST FAILED" << std::endl; } } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); #ifdef EPETRA_MPI MPI_Finalize(); #endif return ( success ? EXIT_SUCCESS : EXIT_FAILURE ); }
int main(int argc, char *argv[]) { // int MyPID = 0; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); MyPID = Comm.MyPID(); #else Epetra_SerialComm Comm; #endif // typedef double ST; typedef Teuchos::ScalarTraits<ST> SCT; typedef SCT::magnitudeType MT; typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Belos::MultiVecTraits<ST,MV> MVT; typedef Belos::OperatorTraits<ST,MV,OP> OPT; using Teuchos::ParameterList; using Teuchos::RCP; using Teuchos::rcp; bool verbose = false, debug = false, proc_verbose = false, strict_conv = false; int frequency = -1; // frequency of status test output. int blocksize = 1; // blocksize int numrhs = 1; // number of right-hand sides to solve for int maxiters = -1; // maximum number of iterations allowed per linear system int maxsubspace = 50; // maximum number of blocks the solver can use for the subspace int maxrestarts = 15; // number of restarts allowed std::string filename("orsirr1.hb"); std::string precond("none"); MT tol = 1.0e-5; // relative residual tolerance MT polytol = tol/10; // relative residual tolerance for polynomial construction Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("verbose","quiet",&verbose,"Print messages and results."); cmdp.setOption("debug","nondebug",&debug,"Print debugging information from solver."); cmdp.setOption("strict-conv","not-strict-conv",&strict_conv,"Require solver to strictly adhere to convergence tolerance."); cmdp.setOption("frequency",&frequency,"Solvers frequency for printing residuals (#iters)."); cmdp.setOption("filename",&filename,"Filename for test matrix. Acceptable file extensions: *.hb,*.mtx,*.triU,*.triS"); cmdp.setOption("precond",&precond,"Preconditioning type (none, left, right)."); cmdp.setOption("tol",&tol,"Relative residual tolerance used by GMRES solver."); cmdp.setOption("poly-tol",&polytol,"Relative residual tolerance used to construct the GMRES polynomial."); cmdp.setOption("num-rhs",&numrhs,"Number of right-hand sides to be solved for."); cmdp.setOption("block-size",&blocksize,"Block size used by GMRES."); cmdp.setOption("max-iters",&maxiters,"Maximum number of iterations per linear system (-1 = adapted to problem/block size)."); cmdp.setOption("max-subspace",&maxsubspace,"Maximum number of blocks the solver can use for the subspace."); cmdp.setOption("max-restarts",&maxrestarts,"Maximum number of restarts allowed for GMRES solver."); if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { return -1; } if (!verbose) frequency = -1; // reset frequency if test is not verbose // // Get the problem // RCP<Epetra_Map> Map; RCP<Epetra_CrsMatrix> A; RCP<Epetra_MultiVector> B, X; RCP<Epetra_Vector> vecB, vecX; EpetraExt::readEpetraLinearSystem(filename, Comm, &A, &Map, &vecX, &vecB); A->OptimizeStorage(); proc_verbose = verbose && (MyPID==0); /* Only print on the zero processor */ // Check to see if the number of right-hand sides is the same as requested. if (numrhs>1) { X = rcp( new Epetra_MultiVector( *Map, numrhs ) ); B = rcp( new Epetra_MultiVector( *Map, numrhs ) ); X->Seed(); X->Random(); OPT::Apply( *A, *X, *B ); X->PutScalar( 0.0 ); } else { X = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(vecX); B = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(vecB); } // // ************Construct preconditioner************* // RCP<Belos::EpetraPrecOp> belosPrec; if (precond != "none") { ParameterList ifpackList; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // create the preconditioner. For valid PrecType values, // please check the documentation std::string PrecType = "ILU"; // incomplete LU int OverlapLevel = 1; // must be >= 0. If Comm.NumProc() == 1, // it is ignored. RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) ); assert(Prec != Teuchos::null); // specify parameters for ILU ifpackList.set("fact: drop tolerance", 1e-9); ifpackList.set("fact: ilut level-of-fill", 1.0); // the combine mode is on the following: // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax" // Their meaning is as defined in file Epetra_CombineMode.h ifpackList.set("schwarz: combine mode", "Add"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(ifpackList)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec->Compute()); // Create the Belos preconditioned operator from the Ifpack preconditioner. // NOTE: This is necessary because Belos expects an operator to apply the // preconditioner with Apply() NOT ApplyInverse(). belosPrec = rcp( new Belos::EpetraPrecOp( Prec ) ); } // // ********Other information used by block solver*********** // *****************(can be user specified)****************** // const int NumGlobalElements = B->GlobalLength(); if (maxiters == -1) maxiters = NumGlobalElements/blocksize - 1; // maximum number of iterations to run // ParameterList belosList; belosList.set( "Num Blocks", maxsubspace); // Maximum number of blocks in Krylov factorization belosList.set( "Block Size", blocksize ); // Blocksize to be used by iterative solver belosList.set( "Maximum Iterations", maxiters ); // Maximum number of iterations allowed belosList.set( "Maximum Restarts", maxrestarts ); // Maximum number of restarts allowed belosList.set( "Convergence Tolerance", tol ); // Relative convergence tolerance requested belosList.set( "Polynomial Tolerance", polytol ); // Polynomial convergence tolerance requested belosList.set( "Strict Convergence", strict_conv ); // Whether solver must strictly reach requested tolerance int verbosity = Belos::Errors + Belos::Warnings; if (verbose) { verbosity += Belos::TimingDetails + Belos::StatusTestDetails; if (frequency > 0) belosList.set( "Output Frequency", frequency ); } if (debug) { verbosity += Belos::Debug; } belosList.set( "Verbosity", verbosity ); // // Construct an unpreconditioned linear problem instance. // Belos::LinearProblem<double,MV,OP> problem( A, X, B ); if (precond == "left") { problem.setLeftPrec( belosPrec ); } else if (precond == "right") { problem.setRightPrec( belosPrec ); } bool set = problem.setProblem(); if (set == false) { if (proc_verbose) std::cout << std::endl << "ERROR: Belos::LinearProblem failed to set up correctly!" << std::endl; return -1; } // // ******************************************************************* // *************Start the block Gmres iteration************************* // ******************************************************************* // Belos::OutputManager<double> My_OM(); // Create an iterative solver manager. RCP< Belos::SolverManager<double,MV,OP> > newSolver //= rcp( new Belos::BlockGmresSolMgr<double,MV,OP>(rcp(&problem,false), rcp(&belosList,false))); = rcp( new Belos::GmresPolySolMgr<double,MV,OP>(rcp(&problem,false), rcp(&belosList,false))); // // **********Print out information about problem******************* // if (proc_verbose) { std::cout << std::endl << std::endl; std::cout << "Dimension of matrix: " << NumGlobalElements << std::endl; std::cout << "Number of right-hand sides: " << numrhs << std::endl; std::cout << "Block size used by solver: " << blocksize << std::endl; std::cout << "Max number of restarts allowed: " << maxrestarts << std::endl; std::cout << "Max number of Gmres iterations per restart cycle: " << maxiters << std::endl; std::cout << "Relative residual tolerance: " << tol << std::endl; std::cout << std::endl; } // // Perform solve // Belos::ReturnType ret = newSolver->solve(); // // Compute actual residuals. // bool badRes = false; std::vector<double> actual_resids( numrhs ); std::vector<double> rhs_norm( numrhs ); Epetra_MultiVector resid(*Map, numrhs); OPT::Apply( *A, *X, resid ); MVT::MvAddMv( -1.0, resid, 1.0, *B, resid ); MVT::MvNorm( resid, actual_resids ); MVT::MvNorm( *B, rhs_norm ); if (proc_verbose) { std::cout<< "---------- Actual Residuals (normalized) ----------"<<std::endl<<std::endl; for ( int i=0; i<numrhs; i++) { double actRes = actual_resids[i]/rhs_norm[i]; std::cout<<"Problem "<<i<<" : \t"<< actRes <<std::endl; if (actRes > tol) badRes = true; } } #ifdef EPETRA_MPI MPI_Finalize(); #endif if ((ret!=Belos::Converged || badRes) && strict_conv) { if (proc_verbose) std::cout << std::endl << "ERROR: Belos did not converge!" << std::endl; return -1; } // // Default return value // if (proc_verbose) std::cout << std::endl << "SUCCESS: Belos converged!" << std::endl; return 0; // }
int main(int argc, char *argv[]) { // int MyPID = 0; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); MyPID = Comm.MyPID(); #else Epetra_SerialComm Comm; #endif // typedef double ST; typedef Teuchos::ScalarTraits<ST> SCT; typedef SCT::magnitudeType MT; typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Belos::MultiVecTraits<ST,MV> MVT; typedef Belos::OperatorTraits<ST,MV,OP> OPT; using Teuchos::ParameterList; using Teuchos::RCP; using Teuchos::rcp; bool verbose = false; bool success = true; try { bool proc_verbose = false; bool leftprec = true; // left preconditioning or right. // LSQR applies the operator and the transposed operator. // A preconditioner must support transpose multiply. int frequency = -1; // frequency of status test output. int blocksize = 1; // blocksize // LSQR as currently implemented is a single vector algorithm. // However some of the parameters that would be used by a block version // have not been removed from this file. int numrhs = 1; // number of right-hand sides to solve for int maxiters = -1; // maximum number of iterations allowed per linear system std::string filename("orsirr1_scaled.hb"); MT relResTol = 1.0e-5; // relative residual tolerance for the preconditioned linear system MT resGrowthFactor = 1.0; // In this example, warn if |resid| > resGrowthFactor * relResTol MT relMatTol = 1.e-10; // relative Matrix error, default value sqrt(eps) MT maxCond = 1.e+5; // maximum condition number default value 1/eps MT damp = 0.; // regularization (or damping) parameter Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("verbose","quiet",&verbose,"Print messages and results."); cmdp.setOption("left-prec","right-prec",&leftprec,"Left preconditioning or right."); cmdp.setOption("frequency",&frequency,"Solvers frequency for printing residuals (#iters)."); cmdp.setOption("filename",&filename,"Filename for test matrix. Acceptable file extensions: *.hb,*.mtx,*.triU,*.triS"); cmdp.setOption("lambda",&damp,"Regularization parameter"); cmdp.setOption("tol",&relResTol,"Relative residual tolerance"); cmdp.setOption("matrixTol",&relMatTol,"Relative error in Matrix"); cmdp.setOption("max-cond",&maxCond,"Maximum condition number"); cmdp.setOption("num-rhs",&numrhs,"Number of right-hand sides to be solved for."); cmdp.setOption("block-size",&blocksize,"Block size used by LSQR."); cmdp.setOption("max-iters",&maxiters,"Maximum number of iterations per linear system (-1 = adapted to problem/block size)."); if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { return -1; } if (!verbose) frequency = -1; // reset frequency if test is not verbose // // *************Get the problem********************* // RCP<Epetra_Map> Map; RCP<Epetra_CrsMatrix> A; RCP<Epetra_MultiVector> B, X; RCP<Epetra_Vector> vecB, vecX; EpetraExt::readEpetraLinearSystem(filename, Comm, &A, &Map, &vecX, &vecB); A->OptimizeStorage(); proc_verbose = verbose && (MyPID==0); /* Only print on the zero processor */ // Check to see if the number of right-hand sides is the same as requested. if (numrhs>1) { X = rcp( new Epetra_MultiVector( *Map, numrhs ) ); B = rcp( new Epetra_MultiVector( *Map, numrhs ) ); X->Random(); OPT::Apply( *A, *X, *B ); X->PutScalar( 0.0 ); } else { int locNumCol = Map->MaxLID() + 1; // Create a known solution int globNumCol = Map->MaxAllGID() + 1; for( int li = 0; li < locNumCol; li++){ // assume consecutive lid int gid = Map->GID(li); double value = (double) ( globNumCol -1 - gid ); int numEntries = 1; vecX->ReplaceGlobalValues( numEntries, &value, &gid ); } bool Trans = false; A->Multiply( Trans, *vecX, *vecB ); // Create a consistent linear system // At this point, the initial guess is exact. bool zeroInitGuess = false; // annihilate initial guess bool goodInitGuess = true; // initial guess near solution if( zeroInitGuess ) { vecX->PutScalar( 0.0 ); } else { if( goodInitGuess ) { double value = 1.e-2; // "Rel RHS Err" and "Rel Mat Err" apply to the residual equation, int numEntries = 1; // norm( b - A x_k ) ?<? relResTol norm( b- Axo). int index = 0; // norm(b) is inaccessible to LSQR. vecX->SumIntoMyValues( numEntries, &value, &index); } } X = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(vecX); B = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(vecB); } // // ************Construct preconditioner************* // ParameterList ifpackList; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // do support transpose multiply // create the preconditioner. For valid PrecType values, // please check the documentation std::string PrecType = "ILU"; // incomplete LU int OverlapLevel = 1; // nonnegative RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) ); assert(Prec != Teuchos::null); // specify parameters for ILU ifpackList.set("fact: level-of-fill", 1); // the combine mode is on the following: // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax" // Their meaning is as defined in file Epetra_CombineMode.h ifpackList.set("schwarz: combine mode", "Add"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(ifpackList)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec->Compute()); { const int errcode = Prec->SetUseTranspose (true); if (errcode != 0) { throw std::logic_error ("Oh hai! Ifpack_Preconditioner doesn't know how to apply its transpose."); } else { (void) Prec->SetUseTranspose (false); } } // Create the Belos preconditioned operator from the Ifpack preconditioner. // NOTE: This is necessary because Belos expects an operator to apply the // preconditioner with Apply() NOT ApplyInverse(). RCP<Belos::EpetraPrecOp> belosPrec = rcp( new Belos::EpetraPrecOp( Prec ) ); // // *****Create parameter list for the LSQR solver manager***** // const int NumGlobalElements = B->GlobalLength(); if (maxiters == -1) maxiters = NumGlobalElements/blocksize - 1; // maximum number of iterations to run // ParameterList belosList; belosList.set( "Block Size", blocksize ); // Blocksize to be used by iterative solver belosList.set( "Lambda", damp ); // Regularization parameter belosList.set( "Rel RHS Err", relResTol ); // Relative convergence tolerance requested belosList.set( "Rel Mat Err", relMatTol ); // Maximum number of restarts allowed belosList.set( "Condition Limit", maxCond); // upper bound for cond(A) belosList.set( "Maximum Iterations", maxiters );// Maximum number of iterations allowed if (numrhs > 1) { belosList.set( "Show Maximum Residual Norm Only", true ); // Show only the maximum residual norm } if (verbose) { belosList.set( "Verbosity", Belos::Errors + Belos::Warnings + Belos::TimingDetails + Belos::StatusTestDetails ); if (frequency > 0) belosList.set( "Output Frequency", frequency ); } else belosList.set( "Verbosity", Belos::Errors + Belos::Warnings ); // // *******Construct a preconditioned linear problem******** // RCP<Belos::LinearProblem<double,MV,OP> > problem = rcp( new Belos::LinearProblem<double,MV,OP>( A, X, B ) ); if (leftprec) { problem->setLeftPrec( belosPrec ); } else { problem->setRightPrec( belosPrec ); } bool set = problem->setProblem(); if (set == false) { if (proc_verbose) std::cout << std::endl << "ERROR: Belos::LinearProblem failed to set up correctly!" << std::endl; return -1; } // Create an iterative solver manager. RCP< Belos::LSQRSolMgr<double,MV,OP> > solver = rcp( new Belos::LSQRSolMgr<double,MV,OP>(problem, rcp(&belosList,false))); // // ******************************************************************* // ******************Start the LSQR iteration************************* // ******************************************************************* // if (proc_verbose) { std::cout << std::endl << std::endl; std::cout << "Dimension of matrix: " << NumGlobalElements << std::endl; std::cout << "Number of right-hand sides: " << numrhs << std::endl; std::cout << "Block size used by solver: " << blocksize << std::endl; std::cout << "Max number of Gmres iterations per restart cycle: " << maxiters << std::endl; std::cout << "Relative residual tolerance: " << relResTol << std::endl; std::cout << std::endl; std::cout << "Solver's Description: " << std::endl; std::cout << solver->description() << std::endl; // visually verify the parameter list } // // Perform solve // Belos::ReturnType ret = solver->solve(); // // Get the number of iterations for this solve. // std::vector<double> solNorm( numrhs ); // get solution norm MVT::MvNorm( *X, solNorm ); int numIters = solver->getNumIters(); MT condNum = solver->getMatCondNum(); MT matrixNorm= solver->getMatNorm(); MT resNorm = solver->getResNorm(); MT lsResNorm = solver->getMatResNorm(); if (proc_verbose) std::cout << "Number of iterations performed for this solve: " << numIters << std::endl << "matrix condition number: " << condNum << std::endl << "matrix norm: " << matrixNorm << std::endl << "residual norm: " << resNorm << std::endl << "solution norm: " << solNorm[0] << std::endl << "least squares residual Norm: " << lsResNorm << std::endl; // // Compute actual residuals. // bool badRes = false; std::vector<double> actual_resids( numrhs ); std::vector<double> rhs_norm( numrhs ); Epetra_MultiVector resid(*Map, numrhs); OPT::Apply( *A, *X, resid ); MVT::MvAddMv( -1.0, resid, 1.0, *B, resid ); MVT::MvNorm( resid, actual_resids ); MVT::MvNorm( *B, rhs_norm ); if (proc_verbose) { std::cout<< "---------- Actual Residuals (normalized) ----------"<<std::endl<<std::endl; for ( int i=0; i<numrhs; i++) { double actRes = actual_resids[i]/rhs_norm[i]; std::cout<<"Problem "<<i<<" : \t"<< actRes <<std::endl; if (actRes > relResTol * resGrowthFactor ) badRes = true; } } if (ret!=Belos::Converged || badRes) { success = false; if (proc_verbose) std::cout << std::endl << "ERROR: Belos did not converge!" << std::endl; } else { success = true; if (proc_verbose) std::cout << std::endl << "SUCCESS: Belos converged!" << std::endl; } } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); #ifdef EPETRA_MPI MPI_Finalize(); #endif return success ? EXIT_SUCCESS : EXIT_FAILURE; }
int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Check verbosity level bool verbose = false; if (argc > 1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 101; // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { std::cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << std::endl; std::cout << "Test failed!" << std::endl; throw "NOX Error"; } bool success = false; try { // Create the interface between NOX and the application // This object is derived from NOX::Epetra::Interface Teuchos::RCP<Interface> interface = Teuchos::rcp(new Interface(NumGlobalElements, Comm)); // Set the PDE factor (for nonlinear forcing term). This could be specified // via user input. interface->setPDEfactor(1000.0); // Begin Nonlinear Solver ************************************ // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> IfpackParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList printParams; printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); if (verbose) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::LinearSolverDetails + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::Debug + NOX::Utils::TestDetails + NOX::Utils::Error); else printParams.set("Output Information", NOX::Utils::Error + NOX::Utils::TestDetails); // Create a print class for controlling output below NOX::Utils p(printParams); // ******************************* // Setup Test Objects // ******************************* // Create Linear Objects // Get the vector from the Problem if (verbose) p.out() << "Creating Vectors and Matrices" << std::endl; Teuchos::RCP<Epetra_Vector> solution_vec = interface->getSolution(); Teuchos::RCP<Epetra_Vector> rhs_vec = Teuchos::rcp(new Epetra_Vector(*solution_vec)); Teuchos::RCP<Epetra_Vector> lhs_vec = Teuchos::rcp(new Epetra_Vector(*solution_vec)); Teuchos::RCP<Epetra_CrsMatrix> jacobian_matrix = interface->getJacobian(); if (verbose) p.out() << "Evaluating F and J" << std::endl; solution_vec->PutScalar(1.0); interface->computeF(*solution_vec, *rhs_vec); rhs_vec->Scale(-1.0); interface->computeJacobian(*solution_vec, *jacobian_matrix); double norm =0.0; rhs_vec->Norm2(&norm); if (verbose) p.out() << "Step 0, ||F|| = " << norm << std::endl; if (verbose) p.out() << "Creating Ifpack preconditioner" << std::endl; Ifpack Factory; Teuchos::RCP<Ifpack_Preconditioner> PreconditionerPtr; PreconditionerPtr = Teuchos::rcp(Factory.Create("ILU", jacobian_matrix.get(),0)); Teuchos::ParameterList teuchosParams; PreconditionerPtr->SetParameters(teuchosParams); PreconditionerPtr->Initialize(); PreconditionerPtr->Compute(); if (verbose) p.out() << "Creating Aztec Solver" << std::endl; Teuchos::RCP<AztecOO> aztecSolverPtr = Teuchos::rcp(new AztecOO()); if (verbose) aztecSolverPtr->SetAztecOption(AZ_output, AZ_last); else aztecSolverPtr->SetAztecOption(AZ_output, AZ_none); // ******************************* // Reuse Test // ******************************* if (verbose) { p.out() << "**********************************************" << std::endl; p.out() << "Testing Newton solve with prec reuse" << std::endl; p.out() << "**********************************************" << std::endl; } int step_number = 0; int max_steps = 20; bool converged = false; int total_linear_iterations = 0; while (norm > 1.0e-8 && step_number < max_steps) { step_number++; if (verbose) p.out() << "Step " << step_number << ", ||F|| = " << norm << std::endl; aztecSolverPtr->SetUserMatrix(jacobian_matrix.get(), false); aztecSolverPtr->SetPrecOperator(PreconditionerPtr.get()); aztecSolverPtr->SetRHS(rhs_vec.get()); aztecSolverPtr->SetLHS(lhs_vec.get()); aztecSolverPtr->Iterate(400, 1.0e-4); solution_vec->Update(1.0, *lhs_vec, 1.0); interface->computeF(*solution_vec, *rhs_vec); rhs_vec->Scale(-1.0); interface->computeJacobian(*solution_vec, *jacobian_matrix); rhs_vec->Norm2(&norm); total_linear_iterations += aztecSolverPtr->NumIters(); if (norm < 1.0e-8) converged = true; } if (verbose) { p.out() << "Final Step " << step_number << ", ||F|| = " << norm << std::endl; if (converged) p.out() << "Converged!!" << std::endl; else p.out() << "Failed!!" << std::endl; } // Tests int status = 0; // Converged if (verbose) p.out() << "Total Number of Linear Iterations = " << total_linear_iterations << std::endl; if (Comm.NumProc() == 1 && total_linear_iterations != 157) status = 1; if (!converged) status = 2; success = converged && status == 0; // Summarize test results if (success) p.out() << "Test passed!" << std::endl; else p.out() << "Test failed!" << std::endl; if (verbose) p.out() << "Status = " << status << std::endl; } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); #ifdef HAVE_MPI MPI_Finalize(); #endif return ( success ? EXIT_SUCCESS : EXIT_FAILURE ); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm comm(MPI_COMM_WORLD); #else Epetra_SerialComm comm; #endif Galeri::core::Workspace::setNumDimensions(3); Galeri::grid::Loadable domain, boundary; int numGlobalElementsX = 2 * comm.NumProc(); int numGlobalElementsY = 2; int numGlobalElementsZ = 2; int mx = comm.NumProc(); int my = 1; int mz = 1; Galeri::grid::Generator:: getCubeWithHexs(comm, numGlobalElementsX, numGlobalElementsY, numGlobalElementsZ, mx, my, mz, domain, boundary); Epetra_Map matrixMap(domain.getNumGlobalVertices(), 0, comm); Epetra_FECrsMatrix A(Copy, matrixMap, 0); Epetra_FEVector LHS(matrixMap); Epetra_FEVector RHS(matrixMap); Galeri::problem::ScalarLaplacian<Laplacian> problem("Hex", 1, 8); problem.integrate(domain, A, RHS); LHS.PutScalar(0.0); problem.imposeDirichletBoundaryConditions(boundary, A, RHS, LHS); // ============================================================ // // Solving the linear system is the next step, using the IFPACK // // factory. This is done by using the IFPACK factory, then // // asking for IC preconditioner, and setting few parameters // // using a Teuchos::ParameterList. // // ============================================================ // Ifpack Factory; Ifpack_Preconditioner* Prec = Factory.Create("IC", &A, 0); Teuchos::ParameterList list; list.set("fact: level-of-fill", 1); IFPACK_CHK_ERR(Prec->SetParameters(list)); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); Epetra_LinearProblem linearProblem(&A, &LHS, &RHS); AztecOO solver(linearProblem); solver.SetAztecOption(AZ_solver, AZ_cg); solver.SetPrecOperator(Prec); solver.Iterate(1550, 1e-9); // visualization using MEDIT -- a VTK module is available as well Galeri::viz::MEDIT::write(domain, "sol", LHS); // now compute the norm of the solution problem.computeNorms(domain, LHS); #ifdef HAVE_MPI MPI_Finalize(); #endif }
Epetra_Operator* ML_Gen_Smoother_Ifpack_Epetra(const Epetra_Operator *A,const Epetra_Vector *InvDiagonal, Teuchos::ParameterList & List,string printMsg,bool verbose){ /* Variables */ double lambda_min = 0.0; double lambda_max = 0.0; Teuchos::ParameterList IFPACKList=List.sublist("smoother: ifpack list"); /* Parameter-list Options */ string PreOrPostSmoother = List.get("smoother: pre or post","both"); string SmooType = List.get("smoother: type", "Chebyshev"); if(SmooType=="IFPACK") SmooType=List.get("smoother: ifpack type","Chebyshev"); int Sweeps = List.get("smoother: sweeps", 3); int IfpackOverlap = List.get("smoother: ifpack overlap",0); double omega = List.get("smoother: damping factor",1.0); Ifpack_Chebyshev* SmootherC_=0; Ifpack_Preconditioner* SmootherP_=0; /* Sanity Check*/ if(Sweeps==0) return 0; /* Early Output*/ int Nrows=A->OperatorDomainMap().NumGlobalElements(); int Nnz=-1; const Epetra_RowMatrix *Arow=dynamic_cast<const Epetra_RowMatrix*>(A); if(Arow) Nnz=Arow->NumGlobalNonzeros(); if(verbose && !A->Comm().MyPID()) cout <<printMsg<<"# global rows = "<<Nrows<<" # estim. global nnz = "<<Nnz<<endl; /**********************************************/ /*** Chebyshev (Including Block) ***/ /**********************************************/ if(SmooType=="Chebyshev" || SmooType=="MLS" || SmooType=="IFPACK-Chebyshev" || SmooType=="IFPACK-Block Chebyshev"){ bool allocated_inv_diagonal=false; int MaximumIterations = List.get("eigen-analysis: max iters", 10); string EigenType_ = List.get("eigen-analysis: type", "cg"); double boost = List.get("eigen-analysis: boost for lambda max", 1.0); double alpha = List.get("chebyshev: alpha",30.0001); Epetra_Vector *InvDiagonal_=0; /* Block Chebyshev stuff if needed */ int MyCheby_nBlocks= List.get("smoother: Block Chebyshev number of blocks",0); int* MyCheby_blockIndices=List.get("smoother: Block Chebyshev block list",(int*)0); int* MyCheby_blockStarts= List.get("smoother: Block Chebyshev block starts",(int*)0); bool MyCheby_NE= List.get("smoother: chebyshev solve normal equations",false); if(SmooType == "IFPACK-Block Chebyshev" && MyCheby_blockIndices && MyCheby_blockStarts){ // If we're using Block Chebyshev, it can compute it's own eigenvalue estimate.. Teuchos::ParameterList PermuteList,BlockList; BlockList.set("apply mode","invert"); PermuteList.set("number of local blocks",MyCheby_nBlocks); PermuteList.set("block start index",MyCheby_blockStarts); // if(is_lid) PermuteList.set("block entry lids",Blockids_); //NTS: Add LID support PermuteList.set("block entry gids",MyCheby_blockIndices); PermuteList.set("blockdiagmatrix: list",BlockList); IFPACKList.set("chebyshev: use block mode",true); IFPACKList.set("chebyshev: block list",PermuteList); IFPACKList.set("chebyshev: eigenvalue max iterations",10); // EXPERIMENTAL: Cheby-NE IFPACKList.set("chebyshev: solve normal equations",MyCheby_NE); } else { /* Non-Blocked Chebyshev */ /* Grab Diagonal & invert if not provided */ if(InvDiagonal) InvDiagonal_=const_cast<Epetra_Vector *>(InvDiagonal); else{ const Epetra_CrsMatrix* Acrs=dynamic_cast<const Epetra_CrsMatrix*>(A); if(!Acrs) return 0; allocated_inv_diagonal=true; InvDiagonal_ = new Epetra_Vector(Acrs->RowMap()); Acrs->ExtractDiagonalCopy(*InvDiagonal_); for (int i = 0; i < InvDiagonal_->MyLength(); ++i) if ((*InvDiagonal_)[i] != 0.0) (*InvDiagonal_)[i] = 1.0 / (*InvDiagonal_)[i]; } /* Do the eigenvalue estimation*/ if (EigenType_ == "power-method") Ifpack_Chebyshev::PowerMethod(*A,*InvDiagonal_,MaximumIterations,lambda_max); else if(EigenType_ == "cg") Ifpack_Chebyshev::CG(*A,*InvDiagonal_,MaximumIterations,lambda_min,lambda_max); else ML_CHK_ERR(0); // not recognized lambda_min=lambda_max / alpha; /* Setup the Smoother's List*/ IFPACKList.set("chebyshev: min eigenvalue", lambda_min); IFPACKList.set("chebyshev: max eigenvalue", boost * lambda_max); IFPACKList.set("chebyshev: ratio eigenvalue",alpha); IFPACKList.set("chebyshev: operator inv diagonal", InvDiagonal_); } /* Setup the Smoother's List*/ IFPACKList.set("chebyshev: ratio eigenvalue", alpha); IFPACKList.set("chebyshev: degree", Sweeps); IFPACKList.set("chebyshev: zero starting solution",false); // Setup SmootherC_= new Ifpack_Chebyshev(A); if (SmootherC_ == 0) return 0; SmootherC_->SetParameters(IFPACKList); SmootherC_->Initialize(); SmootherC_->Compute(); // Grab the lambda's if needed if(SmooType=="IFPACK-Block Chebyshev"){ lambda_min=SmootherC_->GetLambdaMin(); lambda_max=SmootherC_->GetLambdaMax(); } // Smoother Info Output if(verbose && !A->Comm().MyPID()){ if(SmooType=="IFPACK-Block Chebyshev") { cout << printMsg << "MLS/Block-Chebyshev, polynomial order = " << Sweeps << ", alpha = " << alpha << endl; cout << printMsg << "lambda_min = " << lambda_min << ", lambda_max = " << lambda_max << endl; } else { cout << printMsg << "MLS/Chebyshev, polynomial order = " << Sweeps << ", alpha = " << alpha << endl; cout << printMsg << "lambda_min = " << lambda_min << ", lambda_max = " << boost*lambda_max << endl; } } // Cleanup: Since Chebyshev will keep it's own copy of the Inverse Diagonal... if (allocated_inv_diagonal) delete InvDiagonal_; return SmootherC_; } /**********************************************/ /*** Point Relaxation ***/ /**********************************************/ else if(SmooType=="Gauss-Seidel" || SmooType=="symmetric Gauss-Seidel" || SmooType=="Jacobi" || SmooType=="point relaxation stand-alone" || SmooType=="point relaxation" ){ const Epetra_CrsMatrix* Acrs=dynamic_cast<const Epetra_CrsMatrix*>(A); if(!Acrs) return 0; string MyIfpackType="point relaxation stand-alone"; if(IfpackOverlap > 0) MyIfpackType="point relaxation"; string MyRelaxType="symmetric Gauss-Seidel"; if(SmooType=="symmetric Gauss-Seidel") MyRelaxType=SmooType; else if(SmooType=="Jacobi") MyRelaxType=SmooType; IFPACKList.set("relaxation: type", IFPACKList.get("relaxation: type",MyRelaxType)); IFPACKList.set("relaxation: sweeps", Sweeps); IFPACKList.set("relaxation: damping factor", omega); IFPACKList.set("relaxation: zero starting solution",false); if(verbose && !A->Comm().MyPID()){ cout << printMsg << IFPACKList.get("relaxation: type",MyRelaxType).c_str()<<" (sweeps=" << Sweeps << ",omega=" << omega << ")" <<endl; } Ifpack Factory; SmootherP_ = Factory.Create(MyIfpackType,const_cast<Epetra_CrsMatrix*>(Acrs),IfpackOverlap); if (SmootherP_ == 0) return 0; SmootherP_->SetParameters(IFPACKList); SmootherP_->Initialize(); SmootherP_->Compute(); return SmootherP_; } /**********************************************/ /*** Block Relaxation ***/ /**********************************************/ else if(SmooType=="block Gauss-Seidel" || SmooType=="symmetric block Gauss-Seidel" || SmooType=="block Jacobi" || SmooType=="block relaxation stand-alone" || SmooType=="block relaxation" ){ const Epetra_CrsMatrix* Acrs=dynamic_cast<const Epetra_CrsMatrix*>(A); if(!Acrs) return 0; string MyIfpackType="block relaxation stand-alone"; if(IfpackOverlap > 0) MyIfpackType="block relaxation"; string MyRelaxType="symmetric Gauss-Seidel"; if(SmooType=="block Gauss-Seidel") MyRelaxType="Gauss-Seidel"; else if(SmooType=="block Jacobi") MyRelaxType="Jacobi"; IFPACKList.set("relaxation: type", IFPACKList.get("relaxation: type",MyRelaxType)); IFPACKList.set("relaxation: sweeps", Sweeps); IFPACKList.set("relaxation: damping factor", omega); IFPACKList.set("relaxation: zero starting solution",false); if(verbose && !A->Comm().MyPID()){ cout << printMsg << "block " << IFPACKList.get("relaxation: type",MyRelaxType).c_str()<<" (sweeps=" << Sweeps << ",omega=" << omega << ")" <<endl; } Ifpack Factory; SmootherP_ = Factory.Create(MyIfpackType,const_cast<Epetra_CrsMatrix*>(Acrs),IfpackOverlap); if (SmootherP_ == 0) return 0; SmootherP_->SetParameters(IFPACKList); SmootherP_->Initialize(); SmootherP_->Compute(); return SmootherP_; } /**********************************************/ /*** Incomplete Factorization ***/ /**********************************************/ else if(SmooType == "ILU" || SmooType == "IC" || SmooType == "ILUT" || SmooType == "ICT" || SmooType == "SILU") { const Epetra_RowMatrix* Arow=dynamic_cast<const Epetra_RowMatrix*>(A); double MyLOF=0.0; if(SmooType=="ILUT" || SmooType=="ICT") MyLOF=List.get("smoother: ifpack level-of-fill",1.0); else MyLOF=List.get("smoother: ifpack level-of-fill",0.0); int MyIfpackOverlap = List.get("smoother: ifpack overlap", 0); double MyIfpackRT = List.get("smoother: ifpack relative threshold", 1.0); double MyIfpackAT = List.get("smoother: ifpack absolute threshold", 0.0); IFPACKList.set("ILU: sweeps",Sweeps); // Set the fact: LOF options, but only if they're not set already... All this sorcery is because level-of-fill // is an int for ILU and a double for ILUT. Lovely. if(SmooType=="ILUT" || SmooType=="ICT"){ IFPACKList.set("fact: level-of-fill", IFPACKList.get("fact: level-of-fill",MyLOF)); IFPACKList.set("fact: ilut level-of-fill", IFPACKList.get("fact: ilut level-of-fill",MyLOF)); IFPACKList.set("fact: ict level-of-fill", IFPACKList.get("fact: ict level-of-fill",MyLOF)); MyLOF=IFPACKList.get("fact: level-of-fill",MyLOF); } else{ IFPACKList.set("fact: level-of-fill", (int) IFPACKList.get("fact: level-of-fill",(int)MyLOF)); MyLOF=IFPACKList.get("fact: level-of-fill",(int)MyLOF); } IFPACKList.set("fact: relative threshold", MyIfpackRT); IFPACKList.set("fact: absolute threshold", MyIfpackAT); if(verbose && !A->Comm().MyPID()){ cout << printMsg << "IFPACK, type=`" << SmooType << "'," << endl << printMsg << PreOrPostSmoother << ",overlap=" << MyIfpackOverlap << endl; cout << printMsg << "level-of-fill=" << MyLOF; cout << ",rel. threshold=" << MyIfpackRT << ",abs. threshold=" << MyIfpackAT << endl; } Ifpack Factory; SmootherP_ = Factory.Create(SmooType,const_cast<Epetra_RowMatrix*>(Arow),IfpackOverlap); if (SmootherP_ == 0) return 0; SmootherP_->SetParameters(IFPACKList); SmootherP_->Initialize(); SmootherP_->Compute(); return SmootherP_; } /**********************************************/ /*** SORa ***/ /**********************************************/ else if(SmooType=="SORa"){ const Epetra_RowMatrix* Arow=dynamic_cast<const Epetra_RowMatrix*>(A); if(verbose && !A->Comm().MyPID()){ cout << printMsg << "IFPACK/SORa("<<IFPACKList.get("sora: alpha",1.5)<<","<<IFPACKList.get("sora: gamma",1.0)<<")" << ", sweeps = " <<IFPACKList.get("sora: sweeps",1)<<endl; if(IFPACKList.get("sora: oaz boundaries",false)) cout << printMsg << "oaz boundary handling enabled"<<endl; if(IFPACKList.get("sora: use interproc damping",false)) cout << printMsg << "interproc damping enabled"<<endl; if(IFPACKList.get("sora: use global damping",false)) cout << printMsg << "global damping enabled"<<endl; } Ifpack Factory; SmootherP_ = Factory.Create(SmooType,const_cast<Epetra_RowMatrix*>(Arow),IfpackOverlap); if (SmootherP_ == 0) return 0; SmootherP_->SetParameters(IFPACKList); SmootherP_->Initialize(); SmootherP_->Compute(); return SmootherP_; } else{ printf("ML_Gen_Smoother_Ifpack_New: Unknown preconditioner\n"); ML_CHK_ERR(0); } return 0; }/*ML_Gen_Smoother_Ifpack_New*/
int Ifpack_IHSS::Compute(){ if(!IsInitialized_) Initialize(); int rv; Ifpack Factory; Epetra_CrsMatrix *Askew=0,*Aherm=0; Ifpack_Preconditioner *Pskew=0, *Pherm=0; Time_.ResetStartTime(); // Create Aherm (w/o diagonal) rv=EpetraExt::MatrixMatrix::Add(*A_,false,.5,*A_,true,.5,Aherm); Aherm->FillComplete(); if(rv) IFPACK_CHK_ERR(-1); // Grab Aherm's diagonal Epetra_Vector avec(Aherm->RowMap()); IFPACK_CHK_ERR(Aherm->ExtractDiagonalCopy(avec)); // Compute alpha using the Bai, Golub & Ng 2003 formula, not the more multigrid-appropriate Hamilton, Benzi and Haber 2007. // PowerMethod(Aherm, EigMaxIters_,LambdaMax_); // Alpha_=LambdaMax_ / sqrt(EigRatio_); // Try something more Hamilton inspired, using the maximum diagonal value of Aherm. avec.MaxValue(&Alpha_); // Add alpha to the diagonal of Aherm for(int i=0;i<Aherm->NumMyRows();i++) avec[i]+=Alpha_; IFPACK_CHK_ERR(Aherm->ReplaceDiagonalValues(avec)); Aherm_=rcp(Aherm); // Compute Askew (and add diagonal) Askew=new Epetra_CrsMatrix(Copy,A_->RowMap(),0); rv=EpetraExt::MatrixMatrix::Add(*A_,false,.5,*A_,true,-.5,Askew); if(rv) IFPACK_CHK_ERR(-2); for(int i=0;i<Askew->NumMyRows();i++) { int gid=Askew->GRID(i); Askew->InsertGlobalValues(gid,1,&Alpha_,&gid); } Askew->FillComplete(); Askew_=rcp(Askew); // Compute preconditioner for Aherm Teuchos::ParameterList PLh=List_.sublist("ihss: hermetian list"); string htype=List_.get("ihss: hermetian type","ILU"); Pherm= Factory.Create(htype, Aherm); Pherm->SetParameters(PLh); IFPACK_CHK_ERR(Pherm->Compute()); Pherm_=rcp(Pherm); // Compute preconditoner for Askew Teuchos::ParameterList PLs=List_.sublist("ihss: skew hermetian list"); string stype=List_.get("ihss: skew hermetian type","ILU"); Pskew= Factory.Create(stype, Askew); Pskew->SetParameters(PLs); IFPACK_CHK_ERR(Pskew->Compute()); Pskew_=rcp(Pskew); // Label sprintf(Label_, "IFPACK IHSS (H,S)=(%s/%s)",htype.c_str(),stype.c_str()); // Counters IsComputed_=true; NumCompute_++; ComputeTime_ += Time_.ElapsedTime(); return 0; }
int main(int argc, char *argv[]) { // #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Belos::MPIFinalize mpiFinalize; // Will call finalize with *any* return (void)mpiFinalize; #endif // using Teuchos::RCP; using Teuchos::rcp; // // Get test parameters from command-line processor // bool verbose = false, proc_verbose = false; int frequency = -1; // how often residuals are printed by solver int numrhs = 15; // total number of right-hand sides to solve for int blocksize = 10; // blocksize used by solver int maxiters = -1; // maximum number of iterations for the solver to use std::string filename("bcsstk14.hb"); double tol = 1.0e-5; // relative residual tolerance Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("verbose","quiet",&verbose,"Print messages and results."); cmdp.setOption("frequency",&frequency,"Solvers frequency for printing residuals (#iters)."); cmdp.setOption("filename",&filename,"Filename for Harwell-Boeing test matrix."); cmdp.setOption("tol",&tol,"Relative residual tolerance used by CG solver."); cmdp.setOption("num-rhs",&numrhs,"Number of right-hand sides to be solved for."); cmdp.setOption("block-size",&blocksize,"Block size to be used by CG solver."); cmdp.setOption("max-iters",&maxiters,"Maximum number of iterations per linear system (-1 := adapted to problem/block size)."); if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { return -1; } if (!verbose) frequency = -1; // Reset frequency if verbosity is off // // Get the problem // int MyPID; RCP<Epetra_CrsMatrix> A; RCP<Epetra_MultiVector> X, B; int return_val =Belos::createEpetraProblem(filename,NULL,&A,&B,&X,&MyPID); if(return_val != 0) return return_val; proc_verbose = ( verbose && (MyPID==0) ); // // Solve using Belos // typedef double ST; typedef Epetra_Operator OP; typedef Epetra_MultiVector MV; typedef Belos::OperatorTraits<ST,MV,OP> OPT; typedef Belos::MultiVecTraits<ST,MV> MVT; // // *****Construct initial guess and random right-hand-sides ***** // if (numrhs != 1) { X = rcp( new Epetra_MultiVector( A->Map(), numrhs ) ); MVT::MvRandom( *X ); B = rcp( new Epetra_MultiVector( A->Map(), numrhs ) ); OPT::Apply( *A, *X, *B ); MVT::MvInit( *X, 0.0 ); } // // ************Construct preconditioner************* // ParameterList ifpackList; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // create the preconditioner. For valid PrecType values, // please check the documentation std::string PrecType = "ICT"; // incomplete Cholesky int OverlapLevel = 0; // must be >= 0. If Comm.NumProc() == 1, // it is ignored. RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) ); assert(Prec != Teuchos::null); // specify parameters for ICT ifpackList.set("fact: drop tolerance", 1e-9); ifpackList.set("fact: ict level-of-fill", 1.0); // the combine mode is on the following: // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax" // Their meaning is as defined in file Epetra_CombineMode.h ifpackList.set("schwarz: combine mode", "Add"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(ifpackList)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec->Compute()); // Create the Belos preconditioned operator from the Ifpack preconditioner. // NOTE: This is necessary because Belos expects an operator to apply the // preconditioner with Apply() NOT ApplyInverse(). RCP<Belos::EpetraPrecOp> belosPrec = rcp( new Belos::EpetraPrecOp( Prec ) ); // // *****Create parameter list for the block CG solver manager***** // const int NumGlobalElements = B->GlobalLength(); if (maxiters == -1) maxiters = NumGlobalElements/blocksize - 1; // maximum number of iterations to run // ParameterList belosList; belosList.set( "Block Size", blocksize ); // Blocksize to be used by iterative solver belosList.set( "Maximum Iterations", maxiters ); // Maximum number of iterations allowed belosList.set( "Convergence Tolerance", tol ); // Relative convergence tolerance requested if (verbose) { belosList.set( "Verbosity", Belos::Errors + Belos::Warnings + Belos::TimingDetails + Belos::FinalSummary + Belos::StatusTestDetails ); if (frequency > 0) belosList.set( "Output Frequency", frequency ); } else belosList.set( "Verbosity", Belos::Errors + Belos::Warnings ); // // *******Construct a preconditioned linear problem******** // RCP<Belos::LinearProblem<double,MV,OP> > problem = rcp( new Belos::LinearProblem<double,MV,OP>( A, X, B ) ); problem->setLeftPrec( belosPrec ); bool set = problem->setProblem(); if (set == false) { if (proc_verbose) std::cout << std::endl << "ERROR: Belos::LinearProblem failed to set up correctly!" << std::endl; return -1; } // Create an iterative solver manager. RCP< Belos::SolverManager<double,MV,OP> > solver = rcp( new Belos::BlockCGSolMgr<double,MV,OP>(problem, rcp(&belosList,false)) ); // // ******************************************************************* // *************Start the block CG iteration************************* // ******************************************************************* if (proc_verbose) { std::cout << std::endl << std::endl; std::cout << "Dimension of matrix: " << NumGlobalElements << std::endl; std::cout << "Number of right-hand sides: " << numrhs << std::endl; std::cout << "Block size used by solver: " << blocksize << std::endl; std::cout << "Max number of CG iterations: " << maxiters << std::endl; std::cout << "Relative residual tolerance: " << tol << std::endl; std::cout << std::endl; } // // Perform solve // Belos::ReturnType ret = solver->solve(); // // Compute actual residuals. // bool badRes = false; std::vector<double> actual_resids( numrhs ); std::vector<double> rhs_norm( numrhs ); Epetra_MultiVector resid(A->Map(), numrhs); OPT::Apply( *A, *X, resid ); MVT::MvAddMv( -1.0, resid, 1.0, *B, resid ); MVT::MvNorm( resid, actual_resids ); MVT::MvNorm( *B, rhs_norm ); if (proc_verbose) { std::cout<< "---------- Actual Residuals (normalized) ----------"<<std::endl<<std::endl; for ( int i=0; i<numrhs; i++) { double actRes = actual_resids[i]/rhs_norm[i]; std::cout<<"Problem "<<i<<" : \t"<< actRes <<std::endl; if (actRes > tol) badRes = true; } } if (ret!=Belos::Converged || badRes) { if (proc_verbose) std::cout << std::endl << "End Result: TEST FAILED" << std::endl; return -1; } // // Default return value // if (proc_verbose) std::cout << std::endl << "End Result: TEST PASSED" << std::endl; return 0; // } // end test_bl_pcg_hb.cpp
void MxGeoMultigridPrec::setup() { std::string smoothType = pList.get("linear solver : smoother type", "Gauss-Seidel"); std::string coarseSmoothType = pList.get("linear solver : coarse smoother", "Amesos"); int smoothSweeps = pList.get("linear solver : smoother sweeps", 1); int overlap = 1; levels = pList.get("linear solver : levels", 1); cycles = pList.get("linear solver : cycles", 1); output = pList.get("linear solver : output", 0); rcf = pList.get("linear solver : remove const field", false); // Gauss-Seidel preconditioner options Teuchos::ParameterList GSList; GSList.set("relaxation: type", "Gauss-Seidel"); //GSList.set("relaxation: type", "Jacobi"); GSList.set("relaxation: sweeps", smoothSweeps); GSList.set("relaxation: damping factor", 1.); GSList.set("relaxation: zero starting solution", false); // Chebyshev preconditioner options Teuchos::ParameterList ChebList; ChebList.set("chebyshev: degree", smoothSweeps); ChebList.set("chebyshev: zero starting solution", false); ChebList.set("chebyshev: ratio eigenvalue", 30.); // Amesos direct solver parameters Teuchos::ParameterList AmesosList; AmesosList.set("amesos: solver type", "Amesos_Klu"); AmesosList.set("AddToDiag", 1.e-12); // smoother parameters for intermediate levels Teuchos::ParameterList vSmootherList; std::string vPrecType; if (smoothType == "Gauss-Seidel") { vPrecType = "point relaxation"; vSmootherList = GSList; } else if (smoothType == "Chebyshev") { vPrecType = smoothType; vSmootherList = ChebList; } else { std::cout << "MxGeoMultigridPrec::setup(): unknown smoother type, '" << smoothType << "' for intermediate smoothers. Using gauss-seidel.\n"; vSmootherList = GSList; } // smoother parameters for coarsest level Teuchos::ParameterList cSmootherList; std::string cPrecType; if (coarseSmoothType == "Gauss-Seidel") { cSmootherList = GSList; cPrecType = "point relaxation"; } else if (coarseSmoothType == "Chebyshev") { cSmootherList = ChebList; cPrecType = coarseSmoothType; } else if (coarseSmoothType == "Amesos") { cSmootherList = AmesosList; cPrecType = coarseSmoothType; } else { std::cout << "MxGeoMultigridPrec::setup(): unknown smoother type, '" << coarseSmoothType << "', for coarsest smoother. Using Amesos_Klu.\n"; cSmootherList = AmesosList; cPrecType = "Amesos"; } Teuchos::ParameterList smootherList; std::string precType; Ifpack factory; double maxEig, minEig; // special case for when Amesos wants to alter our original operator if (levels == 1 and coarseSmoothType == "Amesos") { fineOpCopy = Teuchos::rcp(new Epetra_CrsMatrix(*ops[0])); //apparently Amesos changes the input matrix? smoothers.push_back(Teuchos::rcp(new Ifpack_Amesos(&*fineOpCopy))); smoothers[0]->SetParameters(AmesosList); smoothers[0]->Initialize(); smoothers[0]->Compute(); } else { //level 0 is original operator for (int level = 0; level < levels; ++level) { std::cout << " Initializing multigrid level " << level << std::endl; if (level == levels - 1) { smootherList = cSmootherList; precType = cPrecType; } else { smootherList = vSmootherList; precType = vPrecType; } // always find max eigenvalue for at least the prolongator smoother, // and possibly the cheyshev smoother { Epetra_Vector diag(ops[level]->RangeMap()); ops[level]->ExtractDiagonalCopy(diag); diag.Reciprocal(diag); std::cout << " Inf norm: " << ops[level]->NormInf() << "\n"; //diag.PutScalar(1.0); Ifpack_Chebyshev::CG(*ops[level], diag, 100, minEig, maxEig); std::cout << "unscaled operator: lambda max = " << maxEig << ", lambda min: " << minEig << "\n"; maxEigs.push_back(maxEig); if (precType == "Chebyshev") { smootherList.set("chebyshev: max eigenvalue", maxEig); smootherList.set("chebyshev: min eigenvalue", minEig); } } smoothers.push_back(Teuchos::rcp(factory.Create(precType, &*ops[level], overlap))); smoothers[level]->SetParameters(smootherList); smoothers[level]->Initialize(); smoothers[level]->Compute(); std::cout << *smoothers[level]; #if 0 // check klu solver if (level == levels - 1) { std::cout << "Checking KLU solver with Ax = 0\n"; Epetra_MultiVector zero(ops[level]->RowMap(), 1), work(ops[level]->RowMap(), 1); zero.PutScalar(0.0); work.Random(); smoothers[level]->ApplyInverse(zero, work); std::cout << work; } #endif } } }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif Teuchos::ParameterList GaleriList; int nx = 30; GaleriList.set("nx", nx); // GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("ny", nx); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); GaleriList.set("alpha", .0); GaleriList.set("diff", 1.0); GaleriList.set("conv", 100.0); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("UniFlow2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); Ifpack Factory; int Niters = 100; // ============================= // // Construct IHSS preconditioner // // ============================= // Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IHSS", &*A,0) ); Teuchos::ParameterList List; List.set("ihss: hermetian type","ILU"); List.set("ihss: skew hermetian type","ILU"); List.set("ihss: ratio eigenvalue",100.0); // Could set sublist values here to better control the ILU, but this isn't needed for this example. IFPACK_CHK_ERR(Prec->SetParameters(List)); IFPACK_CHK_ERR(Prec->Compute()); // ============================= // // Create solver Object // // ============================= // AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*Prec); solver.SetAztecOption(AZ_output, 1); solver.Iterate(Niters, 1e-8); // ============================= // // Construct SORa preconditioner // // ============================= // Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SORa", &*A,0) ); Teuchos::ParameterList List2; List2.set("sora: sweeps",1); // Could set sublist values here to better control the ILU, but this isn't needed for this example. IFPACK_CHK_ERR(Prec2->SetParameters(List2)); IFPACK_CHK_ERR(Prec2->Compute()); // ============================= // // Create solver Object // // ============================= // AztecOO solver2; LHS->PutScalar(0.0); solver2.SetUserMatrix(&*A); solver2.SetLHS(&*LHS); solver2.SetRHS(&*RHS); solver2.SetAztecOption(AZ_solver,AZ_gmres); solver2.SetPrecOperator(&*Prec2); solver2.SetAztecOption(AZ_output, 1); solver2.Iterate(Niters, 1e-8); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { using std::cout; using std::endl; // bool haveM = true; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose=false; bool isHermitian=false; std::string k_filename = "bfw782a.mtx"; std::string m_filename = "bfw782b.mtx"; std::string which = "LR"; Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("verbose","quiet",&verbose,"Print messages and results."); cmdp.setOption("sort",&which,"Targetted eigenvalues (SM,LM,SR,or LR)."); cmdp.setOption("K-filename",&k_filename,"Filename and path of the stiffness matrix."); cmdp.setOption("M-filename",&m_filename,"Filename and path of the mass matrix."); if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { #ifdef HAVE_MPI MPI_Finalize(); #endif return -1; } // //********************************************************************** //******************Set up the problem to be solved********************* //********************************************************************** // // *****Read in matrix from file****** // Teuchos::RCP<Epetra_Map> Map; Teuchos::RCP<Epetra_CrsMatrix> K, M; EpetraExt::readEpetraLinearSystem( k_filename, Comm, &K, &Map ); if (haveM) { EpetraExt::readEpetraLinearSystem( m_filename, Comm, &M, &Map ); } // // Build Preconditioner // Ifpack factory; std::string ifpack_type = "ILUT"; int overlap = 0; Teuchos::RCP<Ifpack_Preconditioner> ifpack_prec = Teuchos::rcp( factory.Create( ifpack_type, K.get(), overlap ) ); // // Set parameters and compute preconditioner // Teuchos::ParameterList ifpack_params; double droptol = 1e-2; double fill = 2.0; ifpack_params.set("fact: drop tolerance",droptol); ifpack_params.set("fact: ilut level-of-fill",fill); ifpack_prec->SetParameters(ifpack_params); ifpack_prec->Initialize(); ifpack_prec->Compute(); // // GeneralizedDavidson expects preconditioner to be applied with // "Apply" rather than "Apply_Inverse" // Teuchos::RCP<Epetra_Operator> prec = Teuchos::rcp( new Epetra_InvOperator(ifpack_prec.get()) ); // //************************************ // Start the block Davidson iteration //*********************************** // // Variables used for the Block Arnoldi Method // int nev = 5; int blockSize = 5; int maxDim = 40; int maxRestarts = 10; double tol = 1.0e-8; // Set verbosity level int verbosity = Anasazi::Errors + Anasazi::Warnings; if (verbose) { verbosity += Anasazi::FinalSummary + Anasazi::TimingDetails; } // // Create parameter list to pass into solver // Teuchos::ParameterList MyPL; MyPL.set( "Verbosity", verbosity ); MyPL.set( "Which", which ); MyPL.set( "Block Size", blockSize ); MyPL.set( "Maximum Subspace Dimension", maxDim ); MyPL.set( "Maximum Restarts", maxRestarts ); MyPL.set( "Convergence Tolerance", tol ); MyPL.set( "Initial Guess", "User" ); typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Anasazi::MultiVecTraits<double, MV> MVT; typedef Anasazi::OperatorTraits<double, MV, OP> OPT; // // Create the eigenproblem to be solved. // Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(*Map, blockSize) ); ivec->Random(); Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem; if (haveM) { MyProblem = Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>() ); MyProblem->setA(K); MyProblem->setM(M); MyProblem->setPrec(prec); MyProblem->setInitVec(ivec); } else { MyProblem = Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>() ); MyProblem->setA(K); MyProblem->setPrec(prec); MyProblem->setInitVec(ivec); } // Inform the eigenproblem that the (K,M) is Hermitian MyProblem->setHermitian( isHermitian ); // Set the number of eigenvalues requested MyProblem->setNEV( nev ); // Inform the eigenproblem that you are finished passing it information bool boolret = MyProblem->setProblem(); if (boolret != true) { if (verbose && MyPID == 0) { cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl; } #ifdef HAVE_MPI MPI_Finalize() ; #endif return -1; } // Initialize the Block Arnoldi solver Anasazi::GeneralizedDavidsonSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL); // Solve the problem to the specified tolerances or length Anasazi::ReturnType returnCode = MySolverMgr.solve(); if (returnCode != Anasazi::Converged && MyPID==0 && verbose) { cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl; } // Get the eigenvalues and eigenvectors from the eigenproblem Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution(); std::vector<Anasazi::Value<double> > evals = sol.Evals; Teuchos::RCP<MV> evecs = sol.Evecs; std::vector<int> index = sol.index; int numev = sol.numVecs; if (numev > 0) { // Compute residuals. Teuchos::LAPACK<int,double> lapack; std::vector<double> normR(numev); // The problem is non-Hermitian. int i=0; std::vector<int> curind(1); std::vector<double> resnorm(1), tempnrm(1); Teuchos::RCP<MV> tempKevec, Mevecs; Teuchos::RCP<const MV> tempeveci, tempMevec; Epetra_MultiVector Kevecs(*Map,numev); // Compute K*evecs OPT::Apply( *K, *evecs, Kevecs ); if (haveM) { Mevecs = Teuchos::rcp( new Epetra_MultiVector(*Map,numev) ); OPT::Apply( *M, *evecs, *Mevecs ); } else { Mevecs = evecs; } Teuchos::SerialDenseMatrix<int,double> Breal(1,1), Bimag(1,1); while (i<numev) { if (index[i]==0) { // Get a view of the M*evecr curind[0] = i; tempMevec = MVT::CloneView( *Mevecs, curind ); // Get a copy of A*evecr tempKevec = MVT::CloneCopy( Kevecs, curind ); // Compute K*evecr - lambda*M*evecr Breal(0,0) = evals[i].realpart; MVT::MvTimesMatAddMv( -1.0, *tempMevec, Breal, 1.0, *tempKevec ); // Compute the norm of the residual and increment counter MVT::MvNorm( *tempKevec, resnorm ); normR[i] = resnorm[0]; i++; } else { // Get a view of the real part of M*evecr curind[0] = i; tempMevec = MVT::CloneView( *Mevecs, curind ); // Get a copy of K*evecr tempKevec = MVT::CloneCopy( Kevecs, curind ); // Get a view of the imaginary part of the eigenvector (eveci) curind[0] = i+1; tempeveci = MVT::CloneView( *Mevecs, curind ); // Set the eigenvalue into Breal and Bimag Breal(0,0) = evals[i].realpart; Bimag(0,0) = evals[i].imagpart; // Compute K*evecr - M*evecr*lambdar + M*eveci*lambdai MVT::MvTimesMatAddMv( -1.0, *tempMevec, Breal, 1.0, *tempKevec ); MVT::MvTimesMatAddMv( 1.0, *tempeveci, Bimag, 1.0, *tempKevec ); MVT::MvNorm( *tempKevec, tempnrm ); // Get a copy of K*eveci tempKevec = MVT::CloneCopy( Kevecs, curind ); // Compute K*eveci - M*eveci*lambdar - M*evecr*lambdai MVT::MvTimesMatAddMv( -1.0, *tempMevec, Bimag, 1.0, *tempKevec ); MVT::MvTimesMatAddMv( -1.0, *tempeveci, Breal, 1.0, *tempKevec ); MVT::MvNorm( *tempKevec, resnorm ); // Compute the norms and scale by magnitude of eigenvalue normR[i] = lapack.LAPY2( tempnrm[0], resnorm[0] ); normR[i+1] = normR[i]; i=i+2; } } // Output computed eigenvalues and their direct residuals if (verbose && MyPID==0) { cout.setf(std::ios_base::right, std::ios_base::adjustfield); cout<<endl<< "Actual Residuals"<<endl; cout<< std::setw(16) << "Real Part" << std::setw(16) << "Imag Part" << std::setw(20) << "Direct Residual"<< endl; cout<<"-----------------------------------------------------------"<<endl; for (int j=0; j<numev; j++) { cout<< std::setw(16) << evals[j].realpart << std::setw(16) << evals[j].imagpart << std::setw(20) << normR[j] << endl; } cout<<"-----------------------------------------------------------"<<endl; } } #ifdef EPETRA_MPI MPI_Finalize() ; #endif return 0; } // end BlockKrylovSchurEpetraExFile.cpp
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ============================ // // Construct ILU preconditioner // // ---------------------------- // // I wanna test funky values to be sure that they have the same // influence on the algorithms, both old and new int LevelFill = 2; double DropTol = 0.3333; double Condest; Teuchos::RefCountPtr<Ifpack_CrsIct> ICT; ICT = Teuchos::rcp( new Ifpack_CrsIct(*A,DropTol,LevelFill) ); ICT->SetAbsoluteThreshold(0.00123); ICT->SetRelativeThreshold(0.9876); // Init values from A ICT->InitValues(*A); // compute the factors ICT->Factor(); // and now estimate the condition number ICT->Condest(false,Condest); if( Comm.MyPID() == 0 ) { cout << "Condition number estimate (level-of-fill = " << LevelFill << ") = " << Condest << endl; } // Define label for printing out during the solve phase string label = "Ifpack_CrsIct Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = 0"; ICT->SetLabel(label.c_str()); // Here we create an AztecOO object LHS->PutScalar(0.0); int Niters = 1200; AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*ICT); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int OldIters = solver.NumIters(); // now rebuild the same preconditioner using ICT, we expect the same // number of iterations Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IC", &*A) ); Teuchos::ParameterList List; List.get("fact: level-of-fill", 2); List.get("fact: drop tolerance", 0.3333); List.get("fact: absolute threshold", 0.00123); List.get("fact: relative threshold", 0.9876); List.get("fact: relaxation value", 0.0); IFPACK_CHK_ERR(Prec->SetParameters(List)); IFPACK_CHK_ERR(Prec->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*Prec); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int NewIters = solver.NumIters(); if (OldIters != NewIters) IFPACK_CHK_ERR(-1); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ int main(int argc, char *argv[]) { using namespace std; using namespace Teuchos; using namespace PHX; #ifdef HAVE_MPI MPI_Init(&argc, &argv); #endif try { RCP<Time> total_time = TimeMonitor::getNewTimer("Total Run Time"); TimeMonitor tm(*total_time); bool print_debug_info = true; cout << "\nStarting Epetra_VBR_Test Example!\n" << endl; // ********************************************************* // * Build the Finite Element data structures // ********************************************************* // Create the mesh, one strip of 2D elements. const std::size_t num_local_cells = 5; double domain_length = 1.0; double dx = domain_length / static_cast<double>(num_local_cells); std::vector<int> global_id(4); global_id[0] = 0; global_id[1] = 2; global_id[2] = 3; global_id[3] = 1; std::vector<double> x_coords(4); std::vector<double> y_coords(4); std::vector<Element_Linear2D> cells; for (std::size_t i = 0; i < num_local_cells; ++i) { x_coords[0] = static_cast<double>(i) * dx; x_coords[1] = x_coords[0] + dx; x_coords[2] = x_coords[0] + dx; x_coords[3] = static_cast<double>(i) * dx; y_coords[0] = 0.0; y_coords[1] = 0.0; y_coords[2] = 1.0; y_coords[3] = 1.0; Element_Linear2D e(global_id, i, i, x_coords, y_coords); cells.push_back(e); // update global indices for next element for (std::size_t i=0; i < global_id.size(); ++i) global_id[i] += 2; } // Divide mesh into workset blocks const std::size_t workset_size = 5; std::vector<MyWorkset> worksets; { std::vector<Element_Linear2D>::iterator cell_it = cells.begin(); std::size_t count = 0; MyWorkset w; w.local_offset = cell_it->localElementIndex(); w.begin = cell_it; for (; cell_it != cells.end(); ++cell_it) { ++count; std::vector<Element_Linear2D>::iterator next = cell_it; ++next; if ( count == workset_size || next == cells.end()) { w.end = next; w.num_cells = count; worksets.push_back(w); count = 0; if (next != cells.end()) { w.local_offset = next->localElementIndex(); w.begin = next; } } } } if (print_debug_info) { cout << "Printing Element Information" << endl; for (std::size_t i = 0; i < worksets.size(); ++i) { std::vector<Element_Linear2D>::iterator it = worksets[i].begin; for (; it != worksets[i].end; ++it) cout << *it << endl; } } if (print_debug_info) { for (std::size_t i = 0; i < worksets.size(); ++i) { cout << "Printing Workset Information" << endl; cout << "worksets[" << i << "]" << endl; cout << " num_cells =" << worksets[i].num_cells << endl; cout << " local_offset =" << worksets[i].local_offset << endl; std::vector<Element_Linear2D>::iterator it = worksets[i].begin; for (; it != worksets[i].end; ++it) cout << " cell_local_index =" << it->localElementIndex() << endl; } cout << endl; } // ********************************************************* // * Build the Newton solver data structures // ********************************************************* // Setup Nonlinear Problem (build Epetra_Vector and Epetra_CrsMatrix) // Newton's method: J delta_x = -f const std::size_t num_eq = 2; const std::size_t num_nodes = 2 * (num_local_cells +1); const std::size_t num_dof = num_nodes * num_eq; RCP<Epetra_Vector> x; RCP<Epetra_Vector> delta_x; RCP<Epetra_Vector> f; RCP<Epetra_VbrRowMatrix> Jac; { Epetra_SerialComm comm; Epetra_BlockMap map(static_cast<int>(num_nodes), static_cast<int>(num_eq), 0, comm); Epetra_DataAccess copy = ::Copy; Epetra_CrsGraph graph(copy, map, 3); std::vector<Element_Linear2D>::iterator e = cells.begin(); for (; e != cells.end(); ++e) { for (int row = 0; row < e->numNodes(); ++row) { for (int col = 0; col < e->numNodes(); ++col) { int global_row = e->globalNodeId(row); int global_col = e->globalNodeId(col); graph.InsertGlobalIndices(global_row, 1, &global_col); } } } graph.FillComplete(); graph.Print(cout); Epetra_SerialDenseMatrix block_matrix(2,2); Epetra_SerialDenseMatrix diag_block_matrix(2,2); RCP<Epetra_VbrMatrix> Jac_vbr = rcp(new Epetra_VbrMatrix(copy,graph)); Epetra_Util util; e = cells.begin(); for (; e != cells.end(); ++e) { for (int row = 0; row < e->numNodes(); ++row) { int global_row = e->globalNodeId(row); block_matrix(0,0) = util.RandomDouble(); block_matrix(0,1) = util.RandomDouble(); block_matrix(1,0) = util.RandomDouble(); block_matrix(1,1) = util.RandomDouble(); diag_block_matrix(0,0) = 100.0*util.RandomDouble(); diag_block_matrix(0,1) = util.RandomDouble(); diag_block_matrix(1,0) = util.RandomDouble(); diag_block_matrix(1,1) = 100.0*util.RandomDouble(); for (int col = 0; col < e->numNodes(); ++col) { int global_col = e->globalNodeId(col); Jac_vbr->BeginReplaceMyValues(global_row, 1, &global_col); if (global_row==global_col) Jac_vbr->SubmitBlockEntry(diag_block_matrix); else Jac_vbr->SubmitBlockEntry(block_matrix); Jac_vbr->EndSubmitEntries(); } } } Jac_vbr->FillComplete(); x = rcp(new Epetra_Vector(map)); delta_x = rcp(new Epetra_Vector(map)); f = rcp(new Epetra_Vector(map)); x->PutScalar(1.0); Jac_vbr->Apply(*x,*f); Jac = rcpWithEmbeddedObjPostDestroy(new Epetra_VbrRowMatrix(Jac_vbr.get()), Jac_vbr); } if (print_debug_info) { x->Print(cout); Jac->Print(cout); f->Print(cout); } // ********************************************************* // * Build Preconditioner (Ifpack) // ********************************************************* Ifpack Factory; std::string PrecType = "ILU"; int OverlapLevel = 1; RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*Jac, OverlapLevel) ); ParameterList ifpackList; ifpackList.set("fact: drop tolerance", 1e-9); ifpackList.set("fact: level-of-fill", 1); ifpackList.set("schwarz: combine mode", "Add"); IFPACK_CHK_ERR(Prec->SetParameters(ifpackList)); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); IFPACK_CHK_ERR(Prec->Condest()); RCP<Belos::EpetraPrecOp> belosPrec = rcp( new Belos::EpetraPrecOp( Prec ) ); // ********************************************************* // * Build linear solver (Belos) // ********************************************************* // Linear solver parameters typedef double ST; typedef Teuchos::ScalarTraits<ST> SCT; typedef SCT::magnitudeType MT; typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Belos::MultiVecTraits<ST,MV> MVT; typedef Belos::OperatorTraits<ST,MV,OP> OPT; RCP<ParameterList> belosList = rcp(new ParameterList); belosList->set<int>("Num Blocks", num_dof); belosList->set<int>("Block Size", 1); belosList->set<int>("Maximum Iterations", 400); belosList->set<int>("Maximum Restarts", 0); belosList->set<MT>( "Convergence Tolerance", 1.0e-4); int verbosity = Belos::Errors + Belos::Warnings; if (false) { verbosity += Belos::TimingDetails + Belos::StatusTestDetails; belosList->set<int>( "Output Frequency", -1); } if (print_debug_info) { verbosity += Belos::Debug; belosList->set<int>( "Output Frequency", -1); } belosList->set( "Verbosity", verbosity ); RCP<Epetra_MultiVector> F = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(f); RCP<Epetra_MultiVector> DX = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(delta_x); RCP<Belos::LinearProblem<double,MV,OP> > problem = rcp(new Belos::LinearProblem<double,MV,OP>(Jac, DX, F) ); problem->setRightPrec( belosPrec ); RCP< Belos::SolverManager<double,MV,OP> > gmres_solver = rcp( new Belos::BlockGmresSolMgr<double,MV,OP>(problem, belosList) ); // ********************************************************* // * Solve the system // ********************************************************* RCP<Time> linear_solve_time = TimeMonitor::getNewTimer("Linear Solve Time"); std::size_t num_gmres_iterations = 0; { TimeMonitor t(*linear_solve_time); delta_x->PutScalar(0.0); IFPACK_CHK_ERR(Prec->Compute()); problem->setProblem(); Belos::ReturnType ret = gmres_solver->solve(); int num_iters = gmres_solver->getNumIters(); num_gmres_iterations += num_iters; if (print_debug_info) std::cout << "Number of gmres iterations performed for this solve: " << num_iters << std::endl; if (ret!=Belos::Converged) { std::cout << std::endl << "WARNING: Belos did not converge!" << std::endl; } } delta_x->Print(cout); // ********************************************************************* // Finished all testing // ********************************************************************* std::cout << "\nRun has completed successfully!\n" << std::endl; // ********************************************************************* // ********************************************************************* } catch (const std::exception& e) { std::cout << "************************************************" << endl; std::cout << "************************************************" << endl; std::cout << "Exception Caught!" << endl; std::cout << "Error message is below\n " << e.what() << endl; std::cout << "************************************************" << endl; } catch (...) { std::cout << "************************************************" << endl; std::cout << "************************************************" << endl; std::cout << "Unknown Exception Caught!" << endl; std::cout << "************************************************" << endl; } TimeMonitor::summarize(); #ifdef HAVE_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { // initialize MPI and Epetra communicator #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif Teuchos::ParameterList GaleriList; // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); // =============================================================== // // B E G I N N I N G O F I F P A C K C O N S T R U C T I O N // // =============================================================== // Teuchos::ParameterList List; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // create the preconditioner. For valid PrecType values, // please check the documentation std::string PrecType = "Amesos"; int OverlapLevel = 2; // must be >= 0. If Comm.NumProc() == 1, // it is ignored. Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) ); assert(Prec != Teuchos::null); // specify the Amesos solver to be used. // If the selected solver is not available, // IFPACK will try to use Amesos' KLU (which is usually always // compiled). Amesos' serial solvers are: // "Amesos_Klu", "Amesos_Umfpack", "Amesos_Superlu" List.set("amesos: solver type", "Amesos_Klu"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(List)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. // At this call, Amesos will perform the symbolic factorization. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. At this call, Amesos will perform the // numeric factorization. IFPACK_CHK_ERR(Prec->Compute()); // =================================================== // // E N D O F I F P A C K C O N S T R U C T I O N // // =================================================== // // At this point, we need some additional objects // to define and solve the linear system. // defines LHS and RHS Epetra_Vector LHS(A->OperatorDomainMap()); Epetra_Vector RHS(A->OperatorDomainMap()); // solution is constant LHS.PutScalar(1.0); // now build corresponding RHS A->Apply(LHS,RHS); // now randomize the solution RHS.Random(); // need an Epetra_LinearProblem to define AztecOO solver Epetra_LinearProblem Problem(&*A,&LHS,&RHS); // now we can allocate the AztecOO solver AztecOO Solver(Problem); // specify solver Solver.SetAztecOption(AZ_solver,AZ_gmres); Solver.SetAztecOption(AZ_output,32); // HERE WE SET THE IFPACK PRECONDITIONER Solver.SetPrecOperator(&*Prec); // .. and here we solve // NOTE: with one process, the solver must converge in // one iteration. Solver.Iterate(1550,1e-8); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; /*int npRows = -1; int npCols = -1; bool useTwoD = false; int randomize = 1; std::string matrix = "Laplacian"; Epetra_CrsMatrix *AK = NULL; std::string filename = "email.mtx"; read_matrixmarket_file((char*) filename.c_str(), Comm, AK, useTwoD, npRows, npCols, randomize, false, (matrix.find("Laplacian")!=std::string::npos)); Teuchos::RCP<Epetra_CrsMatrix> A(AK); const Epetra_Map *AMap = &(AK->DomainMap()); Teuchos::RCP<const Epetra_Map> Map(AMap, false);*/ int nx = 30; Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ==================================================== // // Compare support graph preconditioners to no precond. // // ---------------------------------------------------- // const double tol = 1e-5; const int maxIter = 500; // Baseline: No preconditioning // Compute number of iterations, to compare to IC later. // Here we create an AztecOO object LHS->PutScalar(0.0); AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int Iters = solver.NumIters(); int SupportIters; Ifpack Factory; Teuchos::ParameterList List; #ifdef HAVE_IFPACK_AMESOS ////////////////////////////////////////////////////// // Same test with Ifpack_SupportGraph // Factored with Amesos Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportAmesos = Teuchos::rcp( Factory.Create("MSF Amesos", &*A) ); List.set("amesos: solver type","Klu"); List.set("MST: keep diagonal", 1.0); List.set("MST: randomize", 1); //List.set("fact: absolute threshold", 3.0); IFPACK_CHK_ERR(PrecSupportAmesos->SetParameters(List)); IFPACK_CHK_ERR(PrecSupportAmesos->Initialize()); IFPACK_CHK_ERR(PrecSupportAmesos->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); //AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecSupportAmesos); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); SupportIters = solver.NumIters(); // Compare to no preconditioning if (SupportIters > 2*Iters) IFPACK_CHK_ERR(-1); #endif ////////////////////////////////////////////////////// // Same test with Ifpack_SupportGraph // Factored with IC Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportIC = Teuchos::rcp( Factory.Create("MSF IC", &*A) ); IFPACK_CHK_ERR(PrecSupportIC->SetParameters(List)); IFPACK_CHK_ERR(PrecSupportIC->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); //AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecSupportIC); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); SupportIters = solver.NumIters(); // Compare to no preconditioning if (SupportIters > 2*Iters) IFPACK_CHK_ERR(-1); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc, &argv); #endif using namespace std; using namespace Teuchos; using namespace PHX; try { RCP<Time> total_time = TimeMonitor::getNewTimer("Total Run Time"); TimeMonitor tm(*total_time); RCP<Time> residual_eval_time = TimeMonitor::getNewTimer("Residual Evaluation Time"); RCP<Time> jacobian_eval_time = TimeMonitor::getNewTimer("Jacobian Evaluation Time"); RCP<Time> linear_solve_time = TimeMonitor::getNewTimer("Linear Solve Time"); RCP<Time> nonlinear_solve_time = TimeMonitor::getNewTimer("Nonlinear Solve Time"); RCP<Time> preconditioner_solve_time = TimeMonitor::getNewTimer("Preconditioner Time"); RCP<Time> setup_time = TimeMonitor::getNewTimer("Setup Time (not scalable)"); RCP<Time> jv_eval_time = TimeMonitor::getNewTimer("Jv (AD)"); RCP<Time> matvec = TimeMonitor::getNewTimer("Matvec"); setup_time->start(); bool print_debug_info = false; #ifdef HAVE_MPI RCP<Epetra_Comm> comm = rcp(new Epetra_MpiComm(MPI_COMM_WORLD)); #else RCP<Epetra_Comm> comm = rcp(new Epetra_SerialComm); #endif Teuchos::basic_FancyOStream<char> os(rcp(&std::cout,false)); os.setShowProcRank(true); os.setProcRankAndSize(comm->MyPID(), comm->NumProc()); if (comm->MyPID() == 0) cout << "\nStarting FEM_Nonlinear Example!\n" << endl; // ********************************************************* // * Build the Finite Element data structures // ********************************************************* // Problem dimension - a 2D problem const static int dim = 2; // Create the mesh MeshBuilder mb(comm, 10, 3, 1.0, 1.0, 8); if (print_debug_info) os << mb; std::vector<Element_Linear2D>& cells = *(mb.myElements()); // Divide mesh into workset blocks const std::size_t workset_size = 15; std::vector<MyWorkset> worksets; { std::vector<Element_Linear2D>::iterator cell_it = cells.begin(); std::size_t count = 0; MyWorkset w; w.local_offset = cell_it->localElementIndex(); w.begin = cell_it; for (; cell_it != cells.end(); ++cell_it) { ++count; std::vector<Element_Linear2D>::iterator next = cell_it; ++next; if ( count == workset_size || next == cells.end()) { w.end = next; w.num_cells = count; worksets.push_back(w); count = 0; if (next != cells.end()) { w.local_offset = next->localElementIndex(); w.begin = next; } } } } if (print_debug_info) { cout << "Printing Element Information" << endl; for (std::size_t i = 0; i < worksets.size(); ++i) { std::vector<Element_Linear2D>::iterator it = worksets[i].begin; for (; it != worksets[i].end; ++it) cout << *it << endl; } } if (print_debug_info) { for (std::size_t i = 0; i < worksets.size(); ++i) { cout << "Printing Workset Information" << endl; cout << "worksets[" << i << "]" << endl; cout << " num_cells =" << worksets[i].num_cells << endl; cout << " local_offset =" << worksets[i].local_offset << endl; std::vector<Element_Linear2D>::iterator it = worksets[i].begin; for (; it != worksets[i].end; ++it) cout << " cell_local_index =" << it->localElementIndex() << endl; } cout << endl; } // ********************************************************* // * Build the Newton solver data structures // ********************************************************* // Setup Nonlinear Problem (build Epetra_Vector and Epetra_CrsMatrix) // Newton's method: J delta_x = -f const std::size_t num_eq = 2; LinearObjectFactory lof(mb, comm, num_eq); if (print_debug_info) { ofstream file("OwnedGraph.dat", ios::out | ios::app); Teuchos::basic_FancyOStream<char> p(rcp(&file,false)); p.setShowProcRank(true); p.setProcRankAndSize(comm->MyPID(), comm->NumProc()); lof.ownedGraph()->Print(p); } Epetra_Map owned_map = *(lof.ownedMap()); Epetra_Map overlapped_map = *(lof.overlappedMap()); Epetra_CrsGraph owned_graph = *(lof.ownedGraph()); Epetra_CrsGraph overlapped_graph = *(lof.overlappedGraph()); // Solution vector x RCP<Epetra_Vector> owned_x = rcp(new Epetra_Vector(owned_map,true)); RCP<Epetra_Vector> overlapped_x = rcp(new Epetra_Vector(overlapped_map,true)); // Update vector x RCP<Epetra_Vector> owned_delta_x = rcp(new Epetra_Vector(owned_map,true)); // Residual vector f RCP<Epetra_Vector> owned_f = rcp(new Epetra_Vector(owned_map,true)); RCP<Epetra_Vector> overlapped_f = rcp(new Epetra_Vector(overlapped_map,true)); // Jacobian Matrix Epetra_DataAccess copy = ::Copy; RCP<Epetra_CrsMatrix> owned_jac = rcp(new Epetra_CrsMatrix(copy, owned_graph)); RCP<Epetra_CrsMatrix> overlapped_jac = rcp(new Epetra_CrsMatrix(copy, overlapped_graph)); // Import/export RCP<Epetra_Import> importer = rcp(new Epetra_Import(overlapped_map, owned_map)); RCP<Epetra_Export> exporter = rcp(new Epetra_Export(overlapped_map, owned_map)); // Sets bc for initial guess applyBoundaryConditions(1.0, *owned_x, *owned_jac, *owned_f, mb); // ********************************************************* // * Build the FieldManager // ********************************************************* RCP< vector<string> > dof_names = rcp(new vector<string>(num_eq)); (*dof_names)[0] = "Temperature"; (*dof_names)[1] = "Velocity X"; RCP<DataLayout> qp_scalar = rcp(new MDALayout<Cell,QuadPoint>(workset_size,4)); RCP<DataLayout> node_scalar = rcp(new MDALayout<Cell,Node>(workset_size,4)); RCP<DataLayout> qp_vec = rcp(new MDALayout<Cell,QuadPoint,Dim>(workset_size,4,dim)); RCP<DataLayout> node_vec = rcp(new MDALayout<Cell,Node,Dim>(workset_size,4,dim)); RCP<DataLayout> dummy = rcp(new MDALayout<Cell>(0)); map<string, RCP<ParameterList> > evaluators_to_build; { // Gather Solution RCP<ParameterList> p = rcp(new ParameterList); int type = MyFactoryTraits<MyTraits>::id_gather_solution; p->set<int>("Type", type); p->set< RCP< vector<string> > >("Solution Names", dof_names); p->set< RCP<Epetra_Vector> >("Solution Vector", overlapped_x); p->set< RCP<DataLayout> >("Data Layout", node_scalar); evaluators_to_build["Gather Solution"] = p; } { // FE Interpolation - Temperature RCP<ParameterList> p = rcp(new ParameterList); int type = MyFactoryTraits<MyTraits>::id_feinterpolation; p->set<int>("Type", type); p->set<string>("Node Variable Name", "Temperature"); p->set<string>("QP Variable Name", "Temperature"); p->set<string>("Gradient QP Variable Name", "Temperature Gradient"); p->set< RCP<DataLayout> >("Node Data Layout", node_scalar); p->set< RCP<DataLayout> >("QP Scalar Data Layout", qp_scalar); p->set< RCP<DataLayout> >("QP Vector Data Layout", qp_vec); evaluators_to_build["FE Interpolation Temperature"] = p; } { // FE Interpolation - Velocity X RCP<ParameterList> p = rcp(new ParameterList); int type = MyFactoryTraits<MyTraits>::id_feinterpolation; p->set<int>("Type", type); p->set<string>("Node Variable Name", "Velocity X"); p->set<string>("QP Variable Name", "Velocity X"); p->set<string>("Gradient QP Variable Name", "Velocity X Gradient"); p->set< RCP<DataLayout> >("Node Data Layout", node_scalar); p->set< RCP<DataLayout> >("QP Scalar Data Layout", qp_scalar); p->set< RCP<DataLayout> >("QP Vector Data Layout", qp_vec); evaluators_to_build["FE Interpolation Velocity X"] = p; } { // Evaluate residual RCP<ParameterList> p = rcp(new ParameterList); int type = MyFactoryTraits<MyTraits>::id_equations; p->set<int>("Type", type); p->set< RCP< vector<string> > >("Solution Names", dof_names); p->set< RCP<DataLayout> >("Node Data Layout", node_scalar); p->set< RCP<DataLayout> >("QP Data Layout", qp_scalar); p->set< RCP<DataLayout> >("Gradient QP Data Layout", qp_vec); evaluators_to_build["Equations"] = p; } { // Scatter Solution RCP<ParameterList> p = rcp(new ParameterList); int type = MyFactoryTraits<MyTraits>::id_scatter_residual; p->set<int>("Type", type); RCP< vector<string> > res_names = rcp(new vector<string>(num_eq)); (*res_names)[0] = "Residual Temperature"; (*res_names)[1] = "Residual Velocity X"; p->set< RCP< vector<string> > >("Residual Names", res_names); p->set< RCP<Epetra_Vector> >("Residual Vector", overlapped_f); p->set< RCP<Epetra_CrsMatrix> >("Jacobian Matrix", overlapped_jac); p->set< RCP<DataLayout> >("Dummy Data Layout", dummy); p->set< RCP<DataLayout> >("Data Layout", node_scalar); evaluators_to_build["Scatter Residual"] = p; } // Build Field Evaluators for each evaluation type EvaluatorFactory<MyTraits,MyFactoryTraits<MyTraits> > factory; RCP< vector< RCP<Evaluator_TemplateManager<MyTraits> > > > evaluators; evaluators = factory.buildEvaluators(evaluators_to_build); // Create a FieldManager FieldManager<MyTraits> fm; // Register all Evaluators registerEvaluators(evaluators, fm); // Request quantities to assemble RESIDUAL PDE operators { typedef MyTraits::Residual::ScalarT ResScalarT; Tag<ResScalarT> res_tag("Scatter", dummy); fm.requireField<MyTraits::Residual>(res_tag); // Request quantities to assemble JACOBIAN PDE operators typedef MyTraits::Jacobian::ScalarT JacScalarT; Tag<JacScalarT> jac_tag("Scatter", dummy); fm.requireField<MyTraits::Jacobian>(jac_tag); // Request quantities to assemble Jv operators typedef MyTraits::Jv::ScalarT JvScalarT; Tag<JvScalarT> jv_tag("Scatter", dummy); fm.requireField<MyTraits::Jv>(jv_tag); } { RCP<Time> registration_time = TimeMonitor::getNewTimer("Post Registration Setup Time"); { TimeMonitor t(*registration_time); fm.postRegistrationSetupForType<MyTraits::Residual>(NULL); fm.postRegistrationSetupForType<MyTraits::Jacobian>(NULL); fm.postRegistrationSetupForType<MyTraits::Jv>(NULL); } } if (print_debug_info) cout << fm << endl; // ********************************************************* // * Evaluate Jacobian and Residual (required for ML to // * to be constructed properly // ********************************************************* { //TimeMonitor t(*jacobian_eval_time); overlapped_x->Import(*owned_x, *importer, Insert); owned_f->PutScalar(0.0); overlapped_f->PutScalar(0.0); owned_jac->PutScalar(0.0); overlapped_jac->PutScalar(0.0); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Jacobian>(worksets[i]); owned_f->Export(*overlapped_f, *exporter, Add); owned_jac->Export(*overlapped_jac, *exporter, Add); applyBoundaryConditions(1.0, *owned_x, *owned_jac, *owned_f, mb); } // ********************************************************* // * Build Preconditioner (Ifpack or ML) // ********************************************************* bool use_ml = false; RCP<Belos::EpetraPrecOp> belosPrec; RCP<Ifpack_Preconditioner> ifpack_prec; RCP<ML_Epetra::MultiLevelPreconditioner> ml_prec; if (!use_ml) { Ifpack Factory; std::string PrecType = "ILU"; int OverlapLevel = 0; ifpack_prec = Teuchos::rcp( Factory.Create(PrecType,owned_jac.get(),OverlapLevel) ); ParameterList ifpackList; ifpackList.set("fact: drop tolerance", 1e-9); ifpackList.set("fact: level-of-fill", 1); ifpackList.set("schwarz: combine mode", "Add"); IFPACK_CHK_ERR(ifpack_prec->SetParameters(ifpackList)); IFPACK_CHK_ERR(ifpack_prec->Initialize()); belosPrec = rcp( new Belos::EpetraPrecOp( ifpack_prec ) ); } else { ParameterList ml_params; ML_Epetra::SetDefaults("SA",ml_params); //ml_params.set("Base Method Defaults", "SA"); ml_params.set("ML label", "Phalanx_Test"); ml_params.set("ML output", 10); ml_params.set("print unused", 1); ml_params.set("max levels", 4); ml_params.set("PDE equations",2); ml_params.set("prec type","MGV"); ml_params.set("increasing or decreasing","increasing"); // ml_params.set("aggregation: nodes per aggregate",50); ml_params.set("aggregation: type","Uncoupled"); ml_params.set("aggregation: damping factor", 0.0); ml_params.set("coarse: type","Amesos-KLU"); //ml_params.set("coarse: type","IFPACK"); ml_params.set("coarse: max size", 1000); //ml_params.set("smoother: type","IFPACK"); ml_params.set("smoother: type","block Gauss-Seidel"); ml_params.set("smoother: ifpack type","ILU"); ml_params.set("smoother: ifpack overlap",1); ml_params.sublist("smoother: ifpack list").set("fact: level-of-fill",1); ml_params.sublist("smoother: ifpack list").set("schwarz: reordering type","rcm"); ml_prec = rcp( new ML_Epetra::MultiLevelPreconditioner(*owned_jac, ml_params, true) ); belosPrec = rcp( new Belos::EpetraPrecOp( ml_prec ) ); } // ********************************************************* // * Build linear solver (Belos) // ********************************************************* // Linear solver parameters typedef double ST; typedef Teuchos::ScalarTraits<ST> SCT; typedef SCT::magnitudeType MT; typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Belos::MultiVecTraits<ST,MV> MVT; typedef Belos::OperatorTraits<ST,MV,OP> OPT; RCP<ParameterList> belosList = rcp(new ParameterList); belosList->set<int>("Num Blocks", 400); belosList->set<int>("Block Size", 1); belosList->set<int>("Maximum Iterations", 400); belosList->set<int>("Maximum Restarts", 0); belosList->set<MT>( "Convergence Tolerance", 1.0e-4); int verbosity = Belos::Errors + Belos::Warnings; if (false) { verbosity += Belos::TimingDetails + Belos::StatusTestDetails; belosList->set<int>( "Output Frequency", -1); } if (print_debug_info) { verbosity += Belos::Debug; belosList->set<int>( "Output Frequency", -1); } belosList->set( "Verbosity", verbosity ); RCP<Epetra_MultiVector> F = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(owned_f); RCP<Epetra_MultiVector> DX = Teuchos::rcp_implicit_cast<Epetra_MultiVector>(owned_delta_x); RCP<Belos::LinearProblem<double,MV,OP> > problem = rcp(new Belos::LinearProblem<double,MV,OP>(owned_jac, DX, F) ); problem->setRightPrec( belosPrec ); RCP< Belos::SolverManager<double,MV,OP> > gmres_solver = rcp( new Belos::BlockGmresSolMgr<double,MV,OP>(problem, belosList) ); setup_time->stop(); // Timing for Mat-Vec using sacado RCP<Epetra_Vector> owned_v = rcp(new Epetra_Vector(owned_map,true)); RCP<Epetra_Vector> overlapped_v = rcp(new Epetra_Vector(overlapped_map,true)); RCP<Epetra_Vector> owned_Jv = rcp(new Epetra_Vector(owned_map,true)); RCP<Epetra_Vector> overlapped_Jv = rcp(new Epetra_Vector(overlapped_map,true)); owned_x->PutScalar(1.0); owned_v->PutScalar(1.0); int iter = 0; while (iter != 30) { overlapped_x->Import(*owned_x, *importer, Insert); overlapped_v->Import(*owned_v, *importer, Insert); owned_f->PutScalar(0.0); overlapped_f->PutScalar(0.0); owned_jac->PutScalar(0.0); overlapped_jac->PutScalar(0.0); owned_Jv->PutScalar(0.0); overlapped_Jv->PutScalar(0.0); for (std::size_t i = 0; i < worksets.size(); ++i) { worksets[i].v = overlapped_v; worksets[i].Jv = overlapped_Jv; } { TimeMonitor t(*residual_eval_time); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Residual>(worksets[i]); } { TimeMonitor t(*jacobian_eval_time); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Jacobian>(worksets[i]); } { TimeMonitor t(*jv_eval_time); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Jv>(worksets[i]); } owned_f->Export(*overlapped_f, *exporter, Add); owned_jac->Export(*overlapped_jac, *exporter, Add); owned_Jv->Export(*overlapped_Jv, *exporter, Add); applyBoundaryConditions(1.0, *owned_x, *owned_jac, *owned_f, mb); { TimeMonitor t(*matvec); owned_jac->Apply(*owned_x, *owned_f); } ++iter; } //owned_jac->Print(std::cout); //overlapped_Jv->Print(std::cout); //owned_Jv->Print(std::cout); // NOTE: in the future we can set up the nonlinear solver below to // do Jacobian-Free Newton-Krylov solves to test the matvec /* // ********************************************************* // * Solve the system // ********************************************************* // Set initial guess owned_x->PutScalar(1.0); // Evaluate Residual { TimeMonitor t(*residual_eval_time); overlapped_x->Import(*owned_x, *importer, Insert); owned_f->PutScalar(0.0); overlapped_f->PutScalar(0.0); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Residual>(worksets[i]); owned_f->Export(*overlapped_f, *exporter, Add); applyBoundaryConditions(1.0, *owned_x, *owned_jac, *owned_f, mb); } if (print_debug_info) { printVector("x_owned", *owned_x, -1); printVector("f_owned", *owned_f, -1); } // Newton Loop bool converged = false; std::size_t num_newton_steps = 0; std::size_t num_gmres_iterations = 0; checkConvergence(comm->MyPID(), num_newton_steps, *owned_f, *owned_delta_x, converged); while (!converged && num_newton_steps < 20) { TimeMonitor t(*nonlinear_solve_time); // Evaluate Residual and Jacobian { TimeMonitor t(*jacobian_eval_time); overlapped_x->Import(*owned_x, *importer, Insert); owned_f->PutScalar(0.0); overlapped_f->PutScalar(0.0); owned_jac->PutScalar(0.0); overlapped_jac->PutScalar(0.0); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Jacobian>(worksets[i]); owned_f->Export(*overlapped_f, *exporter, Add); owned_jac->Export(*overlapped_jac, *exporter, Add); applyBoundaryConditions(1.0, *owned_x, *owned_jac, *owned_f, mb); } if (print_debug_info) { printVector("x_owned", *owned_x, num_newton_steps); printVector("x_overlapped", *overlapped_x, num_newton_steps); printVector("f_owned", *owned_f, num_newton_steps); printMatrix("jacobian_owned", *owned_jac, num_newton_steps); } owned_f->Scale(-1.0); // Solve linear problem { TimeMonitor t(*linear_solve_time); owned_delta_x->PutScalar(0.0); { TimeMonitor tp(*preconditioner_solve_time); if (use_ml) ml_prec->ReComputePreconditioner(); else IFPACK_CHK_ERR(ifpack_prec->Compute()); } problem->setProblem(); Belos::ReturnType ret = gmres_solver->solve(); int num_iters = gmres_solver->getNumIters(); num_gmres_iterations += num_iters; //if (print_debug_info) if (comm->MyPID() == 0) std::cout << "Number of gmres iterations performed for this solve: " << num_iters << std::endl; if (ret!=Belos::Converged && comm->MyPID() == 0) { std::cout << std::endl << "WARNING: Belos did not converge!" << std::endl; } } owned_x->Update(1.0, *owned_delta_x, 1.0); { // Evaluate Residual Only TimeMonitor t(*residual_eval_time); overlapped_x->Import(*owned_x, *importer, Insert); owned_f->PutScalar(0.0); overlapped_f->PutScalar(0.0); for (std::size_t i = 0; i < worksets.size(); ++i) fm.evaluateFields<MyTraits::Residual>(worksets[i]); owned_f->Export(*overlapped_f, *exporter, Add); applyBoundaryConditions(1.0, *owned_x, *owned_jac, *owned_f, mb); } num_newton_steps += 1; checkConvergence(comm->MyPID(), num_newton_steps, *owned_f, *owned_delta_x, converged); } if (print_debug_info) printVector("f_owned", *owned_f, num_newton_steps); if (comm->MyPID() == 0) { if (converged) cout << "\n\nNewton Iteration Converged!\n" << endl; else cout << "\n\nNewton Iteration Failed to Converge!\n" << endl; } RCP<Time> file_io = TimeMonitor::getNewTimer("File IO"); { TimeMonitor t(*file_io); // Create a list of node coordinates std::map<int, std::vector<double> > coordinates; Teuchos::RCP< std::vector<Element_Linear2D> > cells = mb.myElements(); for (std::vector<Element_Linear2D>::iterator cell = cells->begin(); cell != cells->end(); ++cell) { const shards::Array<double,shards::NaturalOrder,Node,Dim>& coords = cell->nodeCoordinates(); for (int node=0; node < cell->numNodes(); ++node) { coordinates[cell->globalNodeId(node)].resize(dim); coordinates[cell->globalNodeId(node)][0] = coords(node,0); coordinates[cell->globalNodeId(node)][1] = coords(node,1); } } { std::vector< RCP<ofstream> > files; for (std::size_t eq = 0; eq < num_eq; ++eq) { std::stringstream ost; ost << "upper_DOF" << eq << "_PID" << comm->MyPID() << ".dat"; files.push_back( rcp(new std::ofstream(ost.str().c_str()), ios::out | ios::trunc) ); files[eq]->precision(10); } const std::vector<int>& node_list = mb.topNodeSetGlobalIds(); for (std::size_t node = 0; node < node_list.size(); ++node) { int lid = owned_x->Map().LID(node_list[node] * num_eq); for (std::size_t eq = 0; eq < num_eq; ++eq) { int dof_index = lid + eq; *(files[eq]) << coordinates[node_list[node]][0] << " " << (*owned_x)[dof_index] << endl; } } } { std::vector< RCP<ofstream> > files; for (std::size_t eq = 0; eq < num_eq; ++eq) { std::stringstream ost; ost << "lower_DOF" << eq << "_PID" << comm->MyPID() << ".dat"; files.push_back( rcp(new std::ofstream(ost.str().c_str()), ios::out | ios::trunc) ); files[eq]->precision(10); } const std::vector<int>& node_list = mb.bottomNodeSetGlobalIds(); for (std::size_t node = 0; node < node_list.size(); ++node) { int lid = owned_x->Map().LID(node_list[node] * num_eq); for (std::size_t eq = 0; eq < num_eq; ++eq) { int dof_index = lid + eq; *(files[eq]) << coordinates[node_list[node]][0] << " " << (*owned_x)[dof_index] << endl; } } } } TEUCHOS_TEST_FOR_EXCEPTION(!converged, std::runtime_error, "Problem failed to converge!"); TEUCHOS_TEST_FOR_EXCEPTION(num_newton_steps != 10, std::runtime_error, "Incorrect number of Newton steps!"); // Only check num gmres steps in serial #ifndef HAVE_MPI TEUCHOS_TEST_FOR_EXCEPTION(num_gmres_iterations != 10, std::runtime_error, "Incorrect number of GMRES iterations!"); #endif */ // ********************************************************************* // Finished all testing // ********************************************************************* if (comm->MyPID() == 0) std::cout << "\nRun has completed successfully!\n" << std::endl; // ********************************************************************* // ********************************************************************* } catch (const std::exception& e) { std::cout << "************************************************" << endl; std::cout << "************************************************" << endl; std::cout << "Exception Caught!" << endl; std::cout << "Error message is below\n " << e.what() << endl; std::cout << "************************************************" << endl; } catch (...) { std::cout << "************************************************" << endl; std::cout << "************************************************" << endl; std::cout << "Unknown Exception Caught!" << endl; std::cout << "************************************************" << endl; } TimeMonitor::summarize(); #ifdef HAVE_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { using std::cout; using std::endl; int i; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); // Number of dimension of the domain int space_dim = 2; // Size of each of the dimensions of the domain std::vector<double> brick_dim( space_dim ); brick_dim[0] = 1.0; brick_dim[1] = 1.0; // Number of elements in each of the dimensions of the domain std::vector<int> elements( space_dim ); elements[0] = 10; elements[1] = 10; // Create problem Teuchos::RCP<ModalProblem> testCase = Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) ); // Get the stiffness and mass matrices Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false ); Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false ); // // ************Construct preconditioner************* // Teuchos::ParameterList ifpackList; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // create the preconditioner. For valid PrecType values, // please check the documentation std::string PrecType = "ICT"; // incomplete Cholesky int OverlapLevel = 0; // must be >= 0. If Comm.NumProc() == 1, // it is ignored. Teuchos::RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*K, OverlapLevel) ); assert(Prec != Teuchos::null); // specify parameters for ICT ifpackList.set("fact: drop tolerance", 1e-4); ifpackList.set("fact: ict level-of-fill", 0.); // the combine mode is on the following: // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax" // Their meaning is as defined in file Epetra_CombineMode.h ifpackList.set("schwarz: combine mode", "Add"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(ifpackList)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec->Compute()); // //*******************************************************/ // Set up Belos Block CG operator for inner iteration //*******************************************************/ // int blockSize = 3; // block size used by linear solver and eigensolver [ not required to be the same ] int maxits = K->NumGlobalRows(); // maximum number of iterations to run // // Create the Belos::LinearProblem // Teuchos::RCP<Belos::LinearProblem<double,Epetra_MultiVector,Epetra_Operator> > My_LP = Teuchos::rcp( new Belos::LinearProblem<double,Epetra_MultiVector,Epetra_Operator>() ); My_LP->setOperator( K ); // Create the Belos preconditioned operator from the Ifpack preconditioner. // NOTE: This is necessary because Belos expects an operator to apply the // preconditioner with Apply() NOT ApplyInverse(). Teuchos::RCP<Epetra_Operator> belosPrec = Teuchos::rcp( new Epetra_InvOperator( Prec.get() ) ); My_LP->setLeftPrec( belosPrec ); // // Create the ParameterList for the Belos Operator // Teuchos::RCP<Teuchos::ParameterList> My_List = Teuchos::rcp( new Teuchos::ParameterList() ); My_List->set( "Solver", "BlockCG" ); My_List->set( "Maximum Iterations", maxits ); My_List->set( "Block Size", 1 ); My_List->set( "Convergence Tolerance", 1e-12 ); // // Create the Belos::EpetraOperator // Teuchos::RCP<Belos::EpetraOperator> BelosOp = Teuchos::rcp( new Belos::EpetraOperator( My_LP, My_List )); // // ************************************ // Start the block Arnoldi iteration // ************************************ // // Variables used for the Block Arnoldi Method // double tol = 1.0e-8; int nev = 10; int numBlocks = 3*nev/blockSize; int maxRestarts = 5; //int step = 5; std::string which = "LM"; int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary; // // Create parameter list to pass into solver // Teuchos::ParameterList MyPL; MyPL.set( "Verbosity", verbosity ); MyPL.set( "Which", which ); MyPL.set( "Block Size", blockSize ); MyPL.set( "Num Blocks", numBlocks ); MyPL.set( "Maximum Restarts", maxRestarts ); MyPL.set( "Convergence Tolerance", tol ); //MyPL.set( "Step Size", step ); typedef Epetra_MultiVector MV; typedef Epetra_Operator OP; typedef Anasazi::MultiVecTraits<double, MV> MVT; typedef Anasazi::OperatorTraits<double, MV, OP> OPT; // Create an Epetra_MultiVector for an initial vector to start the solver. // Note: This needs to have the same number of columns as the blocksize. Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->Map(), blockSize) ); MVT::MvRandom( *ivec ); // Call the ctor that calls the petra ctor for a matrix Teuchos::RCP<Anasazi::EpetraGenOp> Aop = Teuchos::rcp( new Anasazi::EpetraGenOp(BelosOp, M, false) ); Teuchos::RCP<Anasazi::BasicEigenproblem<double,MV,OP> > MyProblem = Teuchos::rcp( new Anasazi::BasicEigenproblem<double,MV,OP>(Aop, M, ivec) ); // Inform the eigenproblem that the matrix pencil (K,M) is symmetric MyProblem->setHermitian(true); // Set the number of eigenvalues requested MyProblem->setNEV( nev ); // Inform the eigenproblem that you are finished passing it information bool boolret = MyProblem->setProblem(); if (boolret != true) { if (MyPID == 0) { cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl; } #ifdef HAVE_MPI MPI_Finalize() ; #endif return -1; } // Initialize the Block Arnoldi solver Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL); // Solve the problem to the specified tolerances or length Anasazi::ReturnType returnCode = MySolverMgr.solve(); if (returnCode != Anasazi::Converged && MyPID==0) { cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl; } // Get the eigenvalues and eigenvectors from the eigenproblem Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution(); std::vector<Anasazi::Value<double> > evals = sol.Evals; Teuchos::RCP<MV> evecs = sol.Evecs; int numev = sol.numVecs; if (numev > 0) { Teuchos::SerialDenseMatrix<int,double> dmatr(numev,numev); Epetra_MultiVector tempvec(K->Map(), MVT::GetNumberVecs( *evecs )); OPT::Apply( *K, *evecs, tempvec ); MVT::MvTransMv( 1.0, tempvec, *evecs, dmatr ); if (MyPID==0) { double compeval = 0.0; cout.setf(std::ios_base::right, std::ios_base::adjustfield); cout<<"Actual Eigenvalues (obtained by Rayleigh quotient) : "<<endl; cout<<"------------------------------------------------------"<<endl; cout<<std::setw(16)<<"Real Part" <<std::setw(16)<<"Rayleigh Error"<<endl; cout<<"------------------------------------------------------"<<endl; for (i=0; i<numev; i++) { compeval = dmatr(i,i); cout<<std::setw(16)<<compeval <<std::setw(16)<<Teuchos::ScalarTraits<double>::magnitude(compeval-1.0/evals[i].realpart) <<endl; } cout<<"------------------------------------------------------"<<endl; } } #ifdef EPETRA_MPI MPI_Finalize(); #endif return 0; }
// ======================================================================= // GOAL: test that the names in the factory do not change. This test // will not solve any linear system. // int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif Teuchos::ParameterList GaleriList; const int n = 9; GaleriList.set("n", n); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Linear", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Minij", &*Map, GaleriList) ); Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec; Prec = Teuchos::rcp( Factory.Create("point relaxation", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("point relaxation stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("block relaxation", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("block relaxation stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("IC", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("ICT", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("ILU", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("ILUT", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("IC stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("ICT stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("ILU stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("ILUT stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; #ifdef HAVE_IFPACK_AMESOS Prec = Teuchos::rcp( Factory.Create("Amesos", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("Amesos stand-alone", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; #endif Prec = Teuchos::rcp( Factory.Create("Chebyshev", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("Polynomial", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; Prec = Teuchos::rcp( Factory.Create("Krylov", &*A) ); assert (Prec != Teuchos::null); IFPACK_CHK_ERR(Prec->Initialize()); IFPACK_CHK_ERR(Prec->Compute()); cout << *Prec; if (Comm.MyPID() == 0) cout << "Test `PrecondititonerFactory.exe' passed!" << endl; #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ========================================= // // Compare IC preconditioners to no precond. // // ----------------------------------------- // const double tol = 1e-5; const int maxIter = 500; // Baseline: No preconditioning // Compute number of iterations, to compare to IC later. // Here we create an AztecOO object LHS->PutScalar(0.0); AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); //solver.SetPrecOperator(&*PrecDiag); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int Iters = solver.NumIters(); //cout << "No preconditioner iterations: " << Iters << endl; #if 0 // Not sure how to use Ifpack_CrsRick - leave out for now. // // I wanna test funky values to be sure that they have the same // influence on the algorithms, both old and new int LevelFill = 2; double DropTol = 0.3333; double Condest; Teuchos::RefCountPtr<Ifpack_CrsRick> IC; Ifpack_IlukGraph mygraph (A->Graph(), 0, 0); IC = Teuchos::rcp( new Ifpack_CrsRick(*A, mygraph) ); IC->SetAbsoluteThreshold(0.00123); IC->SetRelativeThreshold(0.9876); // Init values from A IC->InitValues(*A); // compute the factors IC->Factor(); // and now estimate the condition number IC->Condest(false,Condest); if( Comm.MyPID() == 0 ) { cout << "Condition number estimate (level-of-fill = " << LevelFill << ") = " << Condest << endl; } // Define label for printing out during the solve phase std::string label = "Ifpack_CrsRick Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = 0"; IC->SetLabel(label.c_str()); // Here we create an AztecOO object LHS->PutScalar(0.0); AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*IC); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int RickIters = solver.NumIters(); //cout << "Ifpack_Rick iterations: " << RickIters << endl; // Compare to no preconditioning if (RickIters > Iters/2) IFPACK_CHK_ERR(-1); #endif ////////////////////////////////////////////////////// // Same test with Ifpack_IC // This is Crout threshold Cholesky, so different than IC(0) Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecIC = Teuchos::rcp( Factory.Create("IC", &*A) ); Teuchos::ParameterList List; //List.get("fact: ict level-of-fill", 2.); //List.get("fact: drop tolerance", 0.3333); //List.get("fact: absolute threshold", 0.00123); //List.get("fact: relative threshold", 0.9876); //List.get("fact: relaxation value", 0.0); IFPACK_CHK_ERR(PrecIC->SetParameters(List)); IFPACK_CHK_ERR(PrecIC->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); //AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecIC); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int ICIters = solver.NumIters(); //cout << "Ifpack_IC iterations: " << ICIters << endl; // Compare to no preconditioning if (ICIters > Iters/2) IFPACK_CHK_ERR(-1); #if 0 ////////////////////////////////////////////////////// // Same test with Ifpack_ICT // This is another threshold Cholesky Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecICT = Teuchos::rcp( Factory.Create("ICT", &*A) ); //Teuchos::ParameterList List; //List.get("fact: level-of-fill", 2); //List.get("fact: drop tolerance", 0.3333); //List.get("fact: absolute threshold", 0.00123); //List.get("fact: relative threshold", 0.9876); //List.get("fact: relaxation value", 0.0); IFPACK_CHK_ERR(PrecICT->SetParameters(List)); IFPACK_CHK_ERR(PrecICT->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecICT); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int ICTIters = solver.NumIters(); //cout << "Ifpack_ICT iterations: " << ICTIters << endl; // Compare to no preconditioning if (ICTIters > Iters/2) IFPACK_CHK_ERR(-1); #endif #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif Teuchos::ParameterList GaleriList; // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; GaleriList.set("n", nx * nx); GaleriList.set("nx", nx); GaleriList.set("ny", nx); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Linear", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); // =============================================================== // // B E G I N N I N G O F I F P A C K C O N S T R U C T I O N // // =============================================================== // Teuchos::ParameterList List; // allocates an IFPACK factory. No data is associated // to this object (only method Create()). Ifpack Factory; // create the preconditioner. For valid PrecType values, // please check the documentation string PrecType = "ILU"; // incomplete LU int OverlapLevel = 1; // must be >= 0. If Comm.NumProc() == 1, // it is ignored. Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) ); assert(Prec != Teuchos::null); // specify parameters for ILU List.set("fact: drop tolerance", 1e-9); List.set("fact: level-of-fill", 1); // the combine mode is on the following: // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax" // Their meaning is as defined in file Epetra_CombineMode.h List.set("schwarz: combine mode", "Add"); // sets the parameters IFPACK_CHK_ERR(Prec->SetParameters(List)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec->Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec->Compute()); // =================================================== // // E N D O F I F P A C K C O N S T R U C T I O N // // =================================================== // // At this point, we need some additional objects // to define and solve the linear system. // defines LHS and RHS Epetra_Vector LHS(A->OperatorDomainMap()); Epetra_Vector RHS(A->OperatorDomainMap()); // solution is constant LHS.PutScalar(1.0); // now build corresponding RHS A->Apply(LHS,RHS); // now randomize the solution RHS.Random(); // need an Epetra_LinearProblem to define AztecOO solver Epetra_LinearProblem Problem(&*A,&LHS,&RHS); // now we can allocate the AztecOO solver AztecOO Solver(Problem); // specify solver Solver.SetAztecOption(AZ_solver,AZ_gmres); Solver.SetAztecOption(AZ_output,32); // HERE WE SET THE IFPACK PRECONDITIONER Solver.SetPrecOperator(&*Prec); // .. and here we solve Solver.Iterate(1550,1e-8); cout << *Prec; #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; Teuchos::ParameterList GaleriList; int nx = 30; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ============================ // // Construct ILU preconditioner // // ---------------------------- // // I wanna test funky values to be sure that they have the same // influence on the algorithms, both old and new int LevelFill = 2; double DropTol = 0.3333; double Athresh = 0.0123; double Rthresh = 0.9876; double Relax = 0.1; int Overlap = 2; Teuchos::RefCountPtr<Ifpack_IlukGraph> Graph; Teuchos::RefCountPtr<Ifpack_CrsRiluk> RILU; Graph = Teuchos::rcp( new Ifpack_IlukGraph(A->Graph(), LevelFill, Overlap) ); int ierr; ierr = Graph->ConstructFilledGraph(); IFPACK_CHK_ERR(ierr); RILU = Teuchos::rcp( new Ifpack_CrsRiluk(*Graph) ); RILU->SetAbsoluteThreshold(Athresh); RILU->SetRelativeThreshold(Rthresh); RILU->SetRelaxValue(Relax); int initerr = RILU->InitValues(*A); if (initerr!=0) cout << Comm << "*ERR* InitValues = " << initerr; RILU->Factor(); // Define label for printing out during the solve phase string label = "Ifpack_CrsRiluk Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = " + toString(Overlap) + " Athresh = " + toString(Athresh) + " Rthresh = " + toString(Rthresh); // Here we create an AztecOO object LHS->PutScalar(0.0); int Niters = 1200; AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*RILU); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int OldIters = solver.NumIters(); // now rebuild the same preconditioner using RILU, we expect the same // number of iterations Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("ILU", &*A, Overlap) ); Teuchos::ParameterList List; List.get("fact: level-of-fill", LevelFill); List.get("fact: drop tolerance", DropTol); List.get("fact: absolute threshold", Athresh); List.get("fact: relative threshold", Rthresh); List.get("fact: relax value", Relax); IFPACK_CHK_ERR(Prec->SetParameters(List)); IFPACK_CHK_ERR(Prec->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*Prec); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int NewIters = solver.NumIters(); if (OldIters != NewIters) IFPACK_CHK_ERR(-1); #ifdef HAVE_IFPACK_SUPERLU // Now test w/ SuperLU's ILU, if we've got it Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SILU", &*A,0) ); Teuchos::ParameterList SList; SList.set("fact: drop tolerance",1e-4); SList.set("fact: zero pivot threshold",.1); SList.set("fact: maximum fill factor",10.0); // NOTE: There is a bug in SuperLU 4.0 which will crash the code if the maximum fill factor is set too low. // This bug was reported to Sherry Li on 4/8/10. SList.set("fact: silu drop rule",9); IFPACK_CHK_ERR(Prec2->SetParameters(SList)); IFPACK_CHK_ERR(Prec2->Compute()); LHS->PutScalar(0.0); solver.SetPrecOperator(&*Prec2); solver.Iterate(Niters, 5.0e-5); Prec2->Print(cout); #endif #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }