void set_options_pack6() { solver->SetAztecOption(AZ_solver, AZ_gmres); solver->SetAztecOption(AZ_kspace, 10000); solver->SetAztecOption(AZ_precond, AZ_Jacobi); // solver->SetAztecOption(AZ_precond, AZ_none); }
void set_options_pack5() { solver->SetAztecOption(AZ_solver, AZ_gmres); solver->SetAztecOption(AZ_kspace, 10000); solver->SetAztecOption(AZ_precond, AZ_Neumann); // solver->SetAztecOption(AZ_solver, AZ_gmres_condnum); // solver->SetAztecOption(AZ_kspace, 1000); // solver->SetAztecOption(AZ_output, AZ_none); }
void set_options_pack3() { solver->SetAztecOption(AZ_solver, AZ_tfqmr); solver->SetAztecOption(AZ_scaling, AZ_none); solver->SetAztecOption(AZ_precond, AZ_ls); solver->SetAztecOption(AZ_conv, AZ_r0); // solver->SetAztecOption(AZ_output, 1); solver->SetAztecOption(AZ_pre_calc, AZ_calc); solver->SetAztecOption(AZ_max_iter, 1550); solver->SetAztecOption(AZ_poly_ord, 5); solver->SetAztecOption(AZ_overlap, AZ_none); solver->SetAztecOption(AZ_kspace, 60); solver->SetAztecOption(AZ_aux_vec, AZ_resid); solver->SetAztecParam(AZ_tol, 4.00e-9); solver->SetAztecParam(AZ_drop, 0.0); solver->SetAztecParam(AZ_ilut_fill, 1.50); solver->SetAztecParam(AZ_omega, 1.); // solver->SetAztecOption(AZ_output, AZ_none); }
int main(int argc, char *argv[]) { int n = 32; // spatial discretization (per dimension) int num_KL = 2; // number of KL terms int p = 3; // polynomial order double mu = 0.1; // mean of exponential random field double s = 0.2; // std. dev. of exponential r.f. bool nonlinear_expansion = false; // nonlinear expansion of diffusion coeff // (e.g., log-normal) bool symmetric = false; // use symmetric formulation double g_mean_exp = 0.172988; // expected response mean double g_std_dev_exp = 0.0380007; // expected response std. dev. double g_tol = 1e-6; // tolerance on determining success // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif int MyPID; try { { TEUCHOS_FUNC_TIME_MONITOR("Total PCE Calculation Time"); // Create a communicator for Epetra objects Teuchos::RCP<const Epetra_Comm> globalComm; #ifdef HAVE_MPI globalComm = Teuchos::rcp(new Epetra_MpiComm(MPI_COMM_WORLD)); #else globalComm = Teuchos::rcp(new Epetra_SerialComm); #endif MyPID = globalComm->MyPID(); // Create Stochastic Galerkin basis and expansion Teuchos::Array< Teuchos::RCP<const Stokhos::OneDOrthogPolyBasis<int,double> > > bases(num_KL); for (int i=0; i<num_KL; i++) bases[i] = Teuchos::rcp(new Stokhos::LegendreBasis<int,double>(p,true)); Teuchos::RCP<const Stokhos::CompletePolynomialBasis<int,double> > basis = Teuchos::rcp(new Stokhos::CompletePolynomialBasis<int,double>(bases, 1e-12)); int sz = basis->size(); Teuchos::RCP<Stokhos::Sparse3Tensor<int,double> > Cijk; if (nonlinear_expansion) Cijk = basis->computeTripleProductTensor(sz); else Cijk = basis->computeTripleProductTensor(num_KL+1); Teuchos::RCP<Stokhos::OrthogPolyExpansion<int,double> > expansion = Teuchos::rcp(new Stokhos::AlgebraicOrthogPolyExpansion<int,double>(basis, Cijk)); if (MyPID == 0) std::cout << "Stochastic Galerkin expansion size = " << sz << std::endl; // Create stochastic parallel distribution int num_spatial_procs = -1; Teuchos::ParameterList parallelParams; parallelParams.set("Number of Spatial Processors", num_spatial_procs); // parallelParams.set("Rebalance Stochastic Graph", true); // Teuchos::ParameterList& isorropia_params = // parallelParams.sublist("Isorropia"); // isorropia_params.set("Balance objective", "nonzeros"); Teuchos::RCP<Stokhos::ParallelData> sg_parallel_data = Teuchos::rcp(new Stokhos::ParallelData(basis, Cijk, globalComm, parallelParams)); Teuchos::RCP<const EpetraExt::MultiComm> sg_comm = sg_parallel_data->getMultiComm(); Teuchos::RCP<const Epetra_Comm> app_comm = sg_parallel_data->getSpatialComm(); // Create application Teuchos::RCP<twoD_diffusion_ME> model = Teuchos::rcp(new twoD_diffusion_ME(app_comm, n, num_KL, mu, s, basis, nonlinear_expansion, symmetric)); // Setup stochastic Galerkin algorithmic parameters Teuchos::RCP<Teuchos::ParameterList> sgParams = Teuchos::rcp(new Teuchos::ParameterList); if (!nonlinear_expansion) { sgParams->set("Parameter Expansion Type", "Linear"); sgParams->set("Jacobian Expansion Type", "Linear"); } Teuchos::ParameterList precParams; precParams.set("default values", "SA"); precParams.set("ML output", 0); precParams.set("max levels",5); precParams.set("increasing or decreasing","increasing"); precParams.set("aggregation: type", "Uncoupled"); precParams.set("smoother: type","ML symmetric Gauss-Seidel"); precParams.set("smoother: sweeps",2); precParams.set("smoother: pre or post", "both"); precParams.set("coarse: max size", 200); //precParams.set("PDE equations",sz); #ifdef HAVE_ML_AMESOS precParams.set("coarse: type","Amesos-KLU"); #else precParams.set("coarse: type","Jacobi"); #endif // Create stochastic Galerkin model evaluator Teuchos::RCP<Stokhos::SGModelEvaluator_Interlaced> sg_model = Teuchos::rcp(new Stokhos::SGModelEvaluator_Interlaced( model, basis, Teuchos::null, expansion, sg_parallel_data, sgParams)); // Set up stochastic parameters Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> sg_p_poly = sg_model->create_p_sg(0); for (int i=0; i<num_KL; i++) { sg_p_poly->term(i,0)[i] = 0.0; sg_p_poly->term(i,1)[i] = 1.0; } // Create vectors and operators Teuchos::RCP<const Epetra_Vector> sg_p = sg_p_poly->getBlockVector(); Teuchos::RCP<Epetra_Vector> sg_x = Teuchos::rcp(new Epetra_Vector(*(sg_model->get_x_map()))); sg_x->PutScalar(0.0); Teuchos::RCP<Epetra_Vector> sg_f = Teuchos::rcp(new Epetra_Vector(*(sg_model->get_f_map()))); Teuchos::RCP<Epetra_Vector> sg_dx = Teuchos::rcp(new Epetra_Vector(*(sg_model->get_x_map()))); Teuchos::RCP<Epetra_CrsMatrix> sg_J = Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(sg_model->create_W()); Teuchos::RCP<ML_Epetra::MultiLevelPreconditioner> sg_M = Teuchos::rcp(new ML_Epetra::MultiLevelPreconditioner(*sg_J, precParams, false)); // Setup InArgs and OutArgs EpetraExt::ModelEvaluator::InArgs sg_inArgs = sg_model->createInArgs(); EpetraExt::ModelEvaluator::OutArgs sg_outArgs = sg_model->createOutArgs(); sg_inArgs.set_p(1, sg_p); sg_inArgs.set_x(sg_x); sg_outArgs.set_f(sg_f); sg_outArgs.set_W(sg_J); // Evaluate model sg_model->evalModel(sg_inArgs, sg_outArgs); sg_M->ComputePreconditioner(); // Print initial residual norm double norm_f; sg_f->Norm2(&norm_f); if (MyPID == 0) std::cout << "\nInitial residual norm = " << norm_f << std::endl; // Setup AztecOO solver AztecOO aztec; if (symmetric) aztec.SetAztecOption(AZ_solver, AZ_cg); else aztec.SetAztecOption(AZ_solver, AZ_gmres); aztec.SetAztecOption(AZ_precond, AZ_none); aztec.SetAztecOption(AZ_kspace, 20); aztec.SetAztecOption(AZ_conv, AZ_r0); aztec.SetAztecOption(AZ_output, 1); aztec.SetUserOperator(sg_J.get()); aztec.SetPrecOperator(sg_M.get()); aztec.SetLHS(sg_dx.get()); aztec.SetRHS(sg_f.get()); // Solve linear system aztec.Iterate(1000, 1e-12); // Update x sg_x->Update(-1.0, *sg_dx, 1.0); // Save solution to file EpetraExt::VectorToMatrixMarketFile("stochastic_solution_interlaced.mm", *sg_x); // Save RHS to file EpetraExt::VectorToMatrixMarketFile("stochastic_RHS_interlaced.mm", *sg_f); // Save operator to file EpetraExt::RowMatrixToMatrixMarketFile("stochastic_operator_interlaced.mm", *sg_J); // Save mean and variance to file Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> sg_x_poly = sg_model->create_x_sg(View, sg_x.get()); Epetra_Vector mean(*(model->get_x_map())); Epetra_Vector std_dev(*(model->get_x_map())); sg_x_poly->computeMean(mean); sg_x_poly->computeStandardDeviation(std_dev); EpetraExt::VectorToMatrixMarketFile("mean_gal_interlaced.mm", mean); EpetraExt::VectorToMatrixMarketFile("std_dev_gal_interlaced.mm", std_dev); // Compute new residual & response function EpetraExt::ModelEvaluator::OutArgs sg_outArgs2 = sg_model->createOutArgs(); Teuchos::RCP<Epetra_Vector> sg_g = Teuchos::rcp(new Epetra_Vector(*(sg_model->get_g_map(0)))); sg_f->PutScalar(0.0); sg_outArgs2.set_f(sg_f); sg_outArgs2.set_g(0, sg_g); sg_model->evalModel(sg_inArgs, sg_outArgs2); // Print initial residual norm sg_f->Norm2(&norm_f); if (MyPID == 0) std::cout << "\nFinal residual norm = " << norm_f << std::endl; // Print mean and standard deviation of responses Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> sg_g_poly = sg_model->create_g_sg(0, View, sg_g.get()); Epetra_Vector g_mean(*(model->get_g_map(0))); Epetra_Vector g_std_dev(*(model->get_g_map(0))); sg_g_poly->computeMean(g_mean); sg_g_poly->computeStandardDeviation(g_std_dev); std::cout.precision(16); // std::cout << "\nResponse Expansion = " << std::endl; // std::cout.precision(12); // sg_g_poly->print(std::cout); std::cout << "\nResponse Mean = " << std::endl << g_mean << std::endl; std::cout << "Response Std. Dev. = " << std::endl << g_std_dev << std::endl; // Determine if example passed bool passed = false; if (norm_f < 1.0e-10 && std::abs(g_mean[0]-g_mean_exp) < g_tol && std::abs(g_std_dev[0]-g_std_dev_exp) < g_tol) passed = true; if (MyPID == 0) { if (passed) std::cout << "Example Passed!" << std::endl; else std::cout << "Example Failed!" << std::endl; } } Teuchos::TimeMonitor::summarize(std::cout); Teuchos::TimeMonitor::zeroOutTimers(); } catch (std::exception& e) { std::cout << e.what() << std::endl; } catch (string& s) { std::cout << s << std::endl; } catch (char *s) { std::cout << s << std::endl; } catch (...) { std::cout << "Caught unknown exception!" <<std:: endl; } #ifdef HAVE_MPI MPI_Finalize() ; #endif }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ========================================= // // Compare IC preconditioners to no precond. // // ----------------------------------------- // const double tol = 1e-5; const int maxIter = 500; // Baseline: No preconditioning // Compute number of iterations, to compare to IC later. // Here we create an AztecOO object LHS->PutScalar(0.0); AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); //solver.SetPrecOperator(&*PrecDiag); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int Iters = solver.NumIters(); //cout << "No preconditioner iterations: " << Iters << endl; #if 0 // Not sure how to use Ifpack_CrsRick - leave out for now. // // I wanna test funky values to be sure that they have the same // influence on the algorithms, both old and new int LevelFill = 2; double DropTol = 0.3333; double Condest; Teuchos::RefCountPtr<Ifpack_CrsRick> IC; Ifpack_IlukGraph mygraph (A->Graph(), 0, 0); IC = Teuchos::rcp( new Ifpack_CrsRick(*A, mygraph) ); IC->SetAbsoluteThreshold(0.00123); IC->SetRelativeThreshold(0.9876); // Init values from A IC->InitValues(*A); // compute the factors IC->Factor(); // and now estimate the condition number IC->Condest(false,Condest); if( Comm.MyPID() == 0 ) { cout << "Condition number estimate (level-of-fill = " << LevelFill << ") = " << Condest << endl; } // Define label for printing out during the solve phase std::string label = "Ifpack_CrsRick Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = 0"; IC->SetLabel(label.c_str()); // Here we create an AztecOO object LHS->PutScalar(0.0); AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*IC); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int RickIters = solver.NumIters(); //cout << "Ifpack_Rick iterations: " << RickIters << endl; // Compare to no preconditioning if (RickIters > Iters/2) IFPACK_CHK_ERR(-1); #endif ////////////////////////////////////////////////////// // Same test with Ifpack_IC // This is Crout threshold Cholesky, so different than IC(0) Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecIC = Teuchos::rcp( Factory.Create("IC", &*A) ); Teuchos::ParameterList List; //List.get("fact: ict level-of-fill", 2.); //List.get("fact: drop tolerance", 0.3333); //List.get("fact: absolute threshold", 0.00123); //List.get("fact: relative threshold", 0.9876); //List.get("fact: relaxation value", 0.0); IFPACK_CHK_ERR(PrecIC->SetParameters(List)); IFPACK_CHK_ERR(PrecIC->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); //AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecIC); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int ICIters = solver.NumIters(); //cout << "Ifpack_IC iterations: " << ICIters << endl; // Compare to no preconditioning if (ICIters > Iters/2) IFPACK_CHK_ERR(-1); #if 0 ////////////////////////////////////////////////////// // Same test with Ifpack_ICT // This is another threshold Cholesky Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecICT = Teuchos::rcp( Factory.Create("ICT", &*A) ); //Teuchos::ParameterList List; //List.get("fact: level-of-fill", 2); //List.get("fact: drop tolerance", 0.3333); //List.get("fact: absolute threshold", 0.00123); //List.get("fact: relative threshold", 0.9876); //List.get("fact: relaxation value", 0.0); IFPACK_CHK_ERR(PrecICT->SetParameters(List)); IFPACK_CHK_ERR(PrecICT->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecICT); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int ICTIters = solver.NumIters(); //cout << "Ifpack_ICT iterations: " << ICTIters << endl; // Compare to no preconditioning if (ICTIters > Iters/2) IFPACK_CHK_ERR(-1); #endif #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int ierr=HIPS_Initialize(1); HIPS_ExitOnError(ierr); int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; Teuchos::ParameterList GaleriList; int nx = 100; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); // GaleriList.set("ny", nx); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ============================ // // Construct ILU preconditioner // // ---------------------------- // Teuchos::RefCountPtr<Ifpack_HIPS> RILU; RILU = Teuchos::rcp( new Ifpack_HIPS(&*A) ); Teuchos::ParameterList List; List.set("hips: id",0); List.set("hips: setup output",2); List.set("hips: iteration output",0); List.set("hips: drop tolerance",5e-3); List.set("hips: graph symmetric",1); RILU->SetParameters(List); RILU->Initialize(); RILU->Compute(); // Here we create an AztecOO object LHS->PutScalar(0.0); int Niters = 50; AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*RILU); solver.SetAztecOption(AZ_output, 1); solver.Iterate(Niters, 1.0e-8); int OldIters = solver.NumIters(); HIPS_Finalize(); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; /*int npRows = -1; int npCols = -1; bool useTwoD = false; int randomize = 1; std::string matrix = "Laplacian"; Epetra_CrsMatrix *AK = NULL; std::string filename = "email.mtx"; read_matrixmarket_file((char*) filename.c_str(), Comm, AK, useTwoD, npRows, npCols, randomize, false, (matrix.find("Laplacian")!=std::string::npos)); Teuchos::RCP<Epetra_CrsMatrix> A(AK); const Epetra_Map *AMap = &(AK->DomainMap()); Teuchos::RCP<const Epetra_Map> Map(AMap, false);*/ int nx = 30; Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ==================================================== // // Compare support graph preconditioners to no precond. // // ---------------------------------------------------- // const double tol = 1e-5; const int maxIter = 500; // Baseline: No preconditioning // Compute number of iterations, to compare to IC later. // Here we create an AztecOO object LHS->PutScalar(0.0); AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); int Iters = solver.NumIters(); int SupportIters; Ifpack Factory; Teuchos::ParameterList List; #ifdef HAVE_IFPACK_AMESOS ////////////////////////////////////////////////////// // Same test with Ifpack_SupportGraph // Factored with Amesos Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportAmesos = Teuchos::rcp( Factory.Create("MSF Amesos", &*A) ); List.set("amesos: solver type","Klu"); List.set("MST: keep diagonal", 1.0); List.set("MST: randomize", 1); //List.set("fact: absolute threshold", 3.0); IFPACK_CHK_ERR(PrecSupportAmesos->SetParameters(List)); IFPACK_CHK_ERR(PrecSupportAmesos->Initialize()); IFPACK_CHK_ERR(PrecSupportAmesos->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); //AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecSupportAmesos); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); SupportIters = solver.NumIters(); // Compare to no preconditioning if (SupportIters > 2*Iters) IFPACK_CHK_ERR(-1); #endif ////////////////////////////////////////////////////// // Same test with Ifpack_SupportGraph // Factored with IC Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportIC = Teuchos::rcp( Factory.Create("MSF IC", &*A) ); IFPACK_CHK_ERR(PrecSupportIC->SetParameters(List)); IFPACK_CHK_ERR(PrecSupportIC->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); //AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*PrecSupportIC); solver.SetAztecOption(AZ_output, 16); solver.Iterate(maxIter, tol); SupportIters = solver.NumIters(); // Compare to no preconditioning if (SupportIters > 2*Iters) IFPACK_CHK_ERR(-1); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
void switch_to_full_output_mode() { solver->SetAztecOption(AZ_output, AZ_all); }
//[CK] i took this funtion out of the solve-function void switch_to_no_output_mode() { solver->SetAztecOption(AZ_output, AZ_none); }
int shylu_dist_solve<Epetra_CrsMatrix,Epetra_MultiVector>( shylu_symbolic<Epetra_CrsMatrix,Epetra_MultiVector> *ssym, shylu_data<Epetra_CrsMatrix,Epetra_MultiVector> *data, shylu_config<Epetra_CrsMatrix,Epetra_MultiVector> *config, const Epetra_MultiVector& X, Epetra_MultiVector& Y ) { int err; AztecOO *solver = 0; assert(X.Map().SameAs(Y.Map())); //assert(X.Map().SameAs(A_->RowMap())); const Epetra_MultiVector *newX; newX = &X; //rd_->redistribute(X, newX); int nvectors = newX->NumVectors(); // May have to use importer/exporter Epetra_Map BsMap(-1, data->Snr, data->SRowElems, 0, X.Comm()); Epetra_Map BdMap(-1, data->Dnr, data->DRowElems, 0, X.Comm()); Epetra_MultiVector Bs(BsMap, nvectors); Epetra_Import BsImporter(BsMap, newX->Map()); assert(BsImporter.SourceMap().SameAs(newX->Map())); assert((newX->Map()).SameAs(BsImporter.SourceMap())); Bs.Import(*newX, BsImporter, Insert); Epetra_MultiVector Xs(BsMap, nvectors); Epetra_SerialComm LComm; // Use Serial Comm for the local vectors. Epetra_Map LocalBdMap(-1, data->Dnr, data->DRowElems, 0, LComm); Epetra_MultiVector localrhs(LocalBdMap, nvectors); Epetra_MultiVector locallhs(LocalBdMap, nvectors); Epetra_MultiVector Z(BdMap, nvectors); Epetra_MultiVector Bd(BdMap, nvectors); Epetra_Import BdImporter(BdMap, newX->Map()); assert(BdImporter.SourceMap().SameAs(newX->Map())); assert((newX->Map()).SameAs(BdImporter.SourceMap())); Bd.Import(*newX, BdImporter, Insert); int lda; double *values; err = Bd.ExtractView(&values, &lda); assert (err == 0); int nrows = ssym->C->RowMap().NumMyElements(); // copy to local vector //TODO: OMP ? assert(lda == nrows); for (int v = 0; v < nvectors; v++) { for (int i = 0; i < nrows; i++) { err = localrhs.ReplaceMyValue(i, v, values[i+v*lda]); assert (err == 0); } } // TODO : Do we need to reset the lhs and rhs here ? if (config->amesosForDiagonal) { ssym->LP->SetRHS(&localrhs); ssym->LP->SetLHS(&locallhs); ssym->Solver->Solve(); } else { ssym->ifSolver->ApplyInverse(localrhs, locallhs); } err = locallhs.ExtractView(&values, &lda); assert (err == 0); // copy to distributed vector //TODO: OMP ? assert(lda == nrows); for (int v = 0; v < nvectors; v++) { for (int i = 0; i < nrows; i++) { err = Z.ReplaceMyValue(i, v, values[i+v*lda]); assert (err == 0); } } Epetra_MultiVector temp1(BsMap, nvectors); ssym->R->Multiply(false, Z, temp1); Bs.Update(-1.0, temp1, 1.0); Xs.PutScalar(0.0); Epetra_LinearProblem Problem(data->Sbar.get(), &Xs, &Bs); if (config->schurSolver == "Amesos") { Amesos_BaseSolver *solver2 = data->dsolver; data->LP2->SetLHS(&Xs); data->LP2->SetRHS(&Bs); //cout << "Calling solve *****************************" << endl; solver2->Solve(); //cout << "Out of solve *****************************" << endl; } else { if (config->libName == "Belos") { solver = data->innersolver; solver->SetLHS(&Xs); solver->SetRHS(&Bs); } else { // See the comment above on why we are not able to reuse the solver // when outer solve is AztecOO as well. solver = new AztecOO(); //solver.SetPrecOperator(precop_); solver->SetAztecOption(AZ_solver, AZ_gmres); // Do not use AZ_none solver->SetAztecOption(AZ_precond, AZ_dom_decomp); //solver->SetAztecOption(AZ_precond, AZ_none); //solver->SetAztecOption(AZ_precond, AZ_Jacobi); ////solver->SetAztecOption(AZ_precond, AZ_Neumann); //solver->SetAztecOption(AZ_overlap, 3); //solver->SetAztecOption(AZ_subdomain_solve, AZ_ilu); //solver->SetAztecOption(AZ_output, AZ_all); //solver->SetAztecOption(AZ_diagnostics, AZ_all); solver->SetProblem(Problem); } // What should be a good inner_tolerance :-) ? solver->Iterate(config->inner_maxiters, config->inner_tolerance); } Epetra_MultiVector temp(BdMap, nvectors); ssym->C->Multiply(false, Xs, temp); temp.Update(1.0, Bd, -1.0); //Epetra_SerialComm LComm; // Use Serial Comm for the local vectors. //Epetra_Map LocalBdMap(-1, data->Dnr, data->DRowElems, 0, LComm); //Epetra_MultiVector localrhs(LocalBdMap, nvectors); //Epetra_MultiVector locallhs(LocalBdMap, nvectors); //int lda; //double *values; err = temp.ExtractView(&values, &lda); assert (err == 0); //int nrows = data->Cptr->RowMap().NumMyElements(); // copy to local vector //TODO: OMP ? assert(lda == nrows); for (int v = 0; v < nvectors; v++) { for (int i = 0; i < nrows; i++) { err = localrhs.ReplaceMyValue(i, v, values[i+v*lda]); assert (err == 0); } } if (config->amesosForDiagonal) { ssym->LP->SetRHS(&localrhs); ssym->LP->SetLHS(&locallhs); ssym->Solver->Solve(); } else { ssym->ifSolver->ApplyInverse(localrhs, locallhs); } err = locallhs.ExtractView(&values, &lda); assert (err == 0); // copy to distributed vector //TODO: OMP ? assert(lda == nrows); for (int v = 0; v < nvectors; v++) { for (int i = 0; i < nrows; i++) { err = temp.ReplaceMyValue(i, v, values[i+v*lda]); assert (err == 0); } } // For checking faults //if (NumApplyInverse_ == 5) temp.ReplaceMyValue(0, 0, 0.0); Epetra_Export XdExporter(BdMap, Y.Map()); Y.Export(temp, XdExporter, Insert); Epetra_Export XsExporter(BsMap, Y.Map()); Y.Export(Xs, XsExporter, Insert); if (config->libName == "Belos" || config->schurSolver == "Amesos") { // clean up } else { delete solver; } return 0; }//end shylu_dist_solve <epetra,epetra>
// // [JW] i prefer this option pack .. ilut with gmres(1k krylov) // btw: 10k krylov vectors is a ram killer ... // aztecOO documentation suggests krylov vectors around max iterations .. - 1k .. // void set_options_pack7() { solver->SetAztecOption(AZ_solver, AZ_gmres); solver->SetAztecOption(AZ_kspace, 1000); }
/* aztec/examples * AZ_defaults(options, params); * * options[AZ_solver] = AZ_cgs; * options[AZ_scaling] = AZ_none; * options[AZ_precond] = AZ_ls; * options[AZ_conv] = AZ_r0; * options[AZ_output] = 1; * options[AZ_pre_calc] = AZ_calc; * options[AZ_max_iter] = 1550; * options[AZ_poly_ord] = 5; * options[AZ_overlap] = AZ_none; * options[AZ_kspace] = 60; * options[AZ_aux_vec] = AZ_resid; * params[AZ_tol] = 4.00e-9; * params[AZ_drop] = 0.0; * params[AZ_ilut_fill] = 1.5; * params[AZ_omega] = 1.; * * * * */ void set_options_pack4() { solver->SetAztecOption(AZ_solver, AZ_bicgstab); // solver->SetAztecOption(AZ_output, AZ_none); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif Teuchos::ParameterList GaleriList; int nx = 30; GaleriList.set("nx", nx); // GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("ny", nx); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); GaleriList.set("alpha", .0); GaleriList.set("diff", 1.0); GaleriList.set("conv", 100.0); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("UniFlow2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); Ifpack Factory; int Niters = 100; // ============================= // // Construct IHSS preconditioner // // ============================= // Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IHSS", &*A,0) ); Teuchos::ParameterList List; List.set("ihss: hermetian type","ILU"); List.set("ihss: skew hermetian type","ILU"); List.set("ihss: ratio eigenvalue",100.0); // Could set sublist values here to better control the ILU, but this isn't needed for this example. IFPACK_CHK_ERR(Prec->SetParameters(List)); IFPACK_CHK_ERR(Prec->Compute()); // ============================= // // Create solver Object // // ============================= // AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*Prec); solver.SetAztecOption(AZ_output, 1); solver.Iterate(Niters, 1e-8); // ============================= // // Construct SORa preconditioner // // ============================= // Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SORa", &*A,0) ); Teuchos::ParameterList List2; List2.set("sora: sweeps",1); // Could set sublist values here to better control the ILU, but this isn't needed for this example. IFPACK_CHK_ERR(Prec2->SetParameters(List2)); IFPACK_CHK_ERR(Prec2->Compute()); // ============================= // // Create solver Object // // ============================= // AztecOO solver2; LHS->PutScalar(0.0); solver2.SetUserMatrix(&*A); solver2.SetLHS(&*LHS); solver2.SetRHS(&*RHS); solver2.SetAztecOption(AZ_solver,AZ_gmres); solver2.SetPrecOperator(&*Prec2); solver2.SetAztecOption(AZ_output, 1); solver2.Iterate(Niters, 1e-8); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
int shylu_local_solve<Epetra_CrsMatrix, Epetra_MultiVector> ( shylu_symbolic<Epetra_CrsMatrix,Epetra_MultiVector> *ssym, shylu_data<Epetra_CrsMatrix,Epetra_MultiVector> *data, shylu_config<Epetra_CrsMatrix,Epetra_MultiVector> *config, const Epetra_MultiVector& X, Epetra_MultiVector& Y ) { int err; #ifndef NDEBUG int nvectors = X.NumVectors(); assert (nvectors == data->localrhs->NumVectors()); #endif // NDEBUG // Initialize the X vector for iterative solver data->Xs->PutScalar(0.0); // Get local portion of X data->localrhs->Import(X, *(data->BdImporter), Insert); data->localrhs->Print(std::cout); std::cout << " " << std::endl; data->locallhs->Print(std::cout); // locallhs is z in paper if (config->amesosForDiagonal) { std::cout << "calling amesos for diagon" << endl; ssym->OrigLP->SetRHS((data->localrhs).getRawPtr()); ssym->OrigLP->SetLHS((data->locallhs).getRawPtr()); std::cout << "set RHS and LHS " << std::endl; ssym->ReIdx_LP->fwd(); ssym->Solver->Solve(); } else { ssym->ifSolver->ApplyInverse(*(data->localrhs), *(data->locallhs)); } err = ssym->R->Multiply(false, *(data->locallhs), *(data->temp1)); assert (err == 0); // Export temp1 to a dist vector - temp2 data->temp2->Import(*(data->temp1), *(data->DistImporter), Insert); //Epetra_MultiVector Bs(SMap, nvectors); // b_2 - R * z in ShyLU paper data->Bs->Import(X, *(data->BsImporter), Insert); data->Bs->Update(-1.0, *(data->temp2), 1.0); AztecOO *solver = 0; Epetra_LinearProblem Problem(data->Sbar.get(), (data->Xs).getRawPtr(), (data->Bs).getRawPtr()); if ((config->schurSolver == "G") || (config->schurSolver == "IQR")) { IFPACK_CHK_ERR(data->iqrSolver->Solve(*(data->schur_op), *(data->Bs), *(data->Xs))); } else if (config->schurSolver == "Amesos") { Amesos_BaseSolver *solver2 = data->dsolver; data->OrigLP2->SetLHS((data->Xs).getRawPtr()); data->OrigLP2->SetRHS((data->Bs).getRawPtr()); data->ReIdx_LP2->fwd(); //cout << "Calling solve *****************************" << endl; solver2->Solve(); //cout << "Out of solve *****************************" << endl; } else { if (config->libName == "Belos") { solver = data->innersolver; solver->SetLHS((data->Xs).getRawPtr()); solver->SetRHS((data->Bs).getRawPtr()); } else { // See the comment above on why we are not able to reuse the solver // when outer solve is AztecOO as well. solver = new AztecOO(); //solver.SetPrecOperator(precop_); solver->SetAztecOption(AZ_solver, AZ_gmres); // Do not use AZ_none solver->SetAztecOption(AZ_precond, AZ_dom_decomp); //solver->SetAztecOption(AZ_precond, AZ_none); //solver->SetAztecOption(AZ_precond, AZ_Jacobi); ////solver->SetAztecOption(AZ_precond, AZ_Neumann); //solver->SetAztecOption(AZ_overlap, 3); //solver->SetAztecOption(AZ_subdomain_solve, AZ_ilu); //solver->SetAztecOption(AZ_output, AZ_all); //solver->SetAztecOption(AZ_diagnostics, AZ_all); solver->SetProblem(Problem); } // What should be a good inner_tolerance :-) ? solver->Iterate(config->inner_maxiters, config->inner_tolerance); } // Import Xs locally data->LocalXs->Import(*(data->Xs), *(data->XsImporter), Insert); err = ssym->C->Multiply(false, *(data->LocalXs), *(data->temp3)); assert (err == 0); data->temp3->Update(1.0, *(data->localrhs), -1.0); if (config->amesosForDiagonal) { ssym->OrigLP->SetRHS((data->temp3).getRawPtr()); ssym->OrigLP->SetLHS((data->locallhs).getRawPtr()); ssym->ReIdx_LP->fwd(); ssym->Solver->Solve(); } else { ssym->ifSolver->ApplyInverse(*(data->temp3), *(data->locallhs)); } Y.Export(*(data->locallhs), *(data->XdExporter), Insert); Y.Export(*(data->LocalXs), *(data->XsExporter), Insert); if (config->libName == "Belos" || config->schurSolver == "Amesos") { // clean up } else { delete solver; } return 0; }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ============================ // // Construct ILU preconditioner // // ---------------------------- // // I wanna test funky values to be sure that they have the same // influence on the algorithms, both old and new int LevelFill = 2; double DropTol = 0.3333; double Condest; Teuchos::RefCountPtr<Ifpack_CrsIct> ICT; ICT = Teuchos::rcp( new Ifpack_CrsIct(*A,DropTol,LevelFill) ); ICT->SetAbsoluteThreshold(0.00123); ICT->SetRelativeThreshold(0.9876); // Init values from A ICT->InitValues(*A); // compute the factors ICT->Factor(); // and now estimate the condition number ICT->Condest(false,Condest); if( Comm.MyPID() == 0 ) { cout << "Condition number estimate (level-of-fill = " << LevelFill << ") = " << Condest << endl; } // Define label for printing out during the solve phase string label = "Ifpack_CrsIct Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = 0"; ICT->SetLabel(label.c_str()); // Here we create an AztecOO object LHS->PutScalar(0.0); int Niters = 1200; AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*ICT); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int OldIters = solver.NumIters(); // now rebuild the same preconditioner using ICT, we expect the same // number of iterations Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IC", &*A) ); Teuchos::ParameterList List; List.get("fact: level-of-fill", 2); List.get("fact: drop tolerance", 0.3333); List.get("fact: absolute threshold", 0.00123); List.get("fact: relative threshold", 0.9876); List.get("fact: relaxation value", 0.0); IFPACK_CHK_ERR(Prec->SetParameters(List)); IFPACK_CHK_ERR(Prec->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_cg); solver.SetPrecOperator(&*Prec); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int NewIters = solver.NumIters(); if (OldIters != NewIters) IFPACK_CHK_ERR(-1); #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }
void set_options_pack_fem() { solver->SetAztecOption(AZ_precond, AZ_dom_decomp ); solver->SetAztecOption(AZ_subdomain_solve, AZ_lu); solver->SetAztecOption(AZ_solver, AZ_cg); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; // matrix downloaded from MatrixMarket char FileName[] = "../HBMatrices/fidap005.rua"; Epetra_Map * readMap; // Pointers because of Trilinos_Util_ReadHb2Epetra Epetra_CrsMatrix * readA; Epetra_Vector * readx; Epetra_Vector * readb; Epetra_Vector * readxexact; // Call routine to read in HB problem Trilinos_Util_ReadHb2Epetra(FileName, Comm, readMap, readA, readx, readb, readxexact); int NumGlobalElements = readMap->NumGlobalElements(); // Create uniform distributed map Epetra_Map map(NumGlobalElements, 0, Comm); // Create Exporter to distribute read-in matrix and vectors Epetra_Export exporter(*readMap, map); Epetra_CrsMatrix A(Copy, map, 0); Epetra_Vector x(map); Epetra_Vector b(map); Epetra_Vector xexact(map); Epetra_Time FillTimer(Comm); A.Export(*readA, exporter, Add); x.Export(*readx, exporter, Add); b.Export(*readb, exporter, Add); xexact.Export(*readxexact, exporter, Add); A.FillComplete(); delete readA; delete readx; delete readb; delete readxexact; delete readMap; // ============================ // // Construct ILU preconditioner // // ---------------------------- // // modify those parameters int LevelFill = 1; double DropTol = 0.0; double Condest; Ifpack_CrsIct * ICT = NULL; ICT = new Ifpack_CrsIct(A,DropTol,LevelFill); // Init values from A ICT->InitValues(A); // compute the factors ICT->Factor(); // and now estimate the condition number ICT->Condest(false,Condest); cout << Condest << endl; if( Comm.MyPID() == 0 ) { cout << "Condition number estimate (level-of-fill = " << LevelFill << ") = " << Condest << endl; } // Define label for printing out during the solve phase string label = "Ifpack_CrsIct Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = 0"; ICT->SetLabel(label.c_str()); // Here we create an AztecOO object AztecOO solver; solver.SetUserMatrix(&A); solver.SetLHS(&x); solver.SetRHS(&b); solver.SetAztecOption(AZ_solver,AZ_cg); // Here we set the IFPACK preconditioner and specify few parameters solver.SetPrecOperator(ICT); int Niters = 1200; solver.SetAztecOption(AZ_kspace, Niters); solver.SetAztecOption(AZ_output, 20); solver.Iterate(Niters, 5.0e-5); if (ICT!=0) delete ICT; #ifdef HAVE_MPI MPI_Finalize() ; #endif return 0 ; }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm (MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); bool verbose = false; if (MyPID==0) verbose = true; Teuchos::ParameterList GaleriList; int nx = 30; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) ); LHS->PutScalar(0.0); RHS->Random(); // ============================ // // Construct ILU preconditioner // // ---------------------------- // // I wanna test funky values to be sure that they have the same // influence on the algorithms, both old and new int LevelFill = 2; double DropTol = 0.3333; double Athresh = 0.0123; double Rthresh = 0.9876; double Relax = 0.1; int Overlap = 2; Teuchos::RefCountPtr<Ifpack_IlukGraph> Graph; Teuchos::RefCountPtr<Ifpack_CrsRiluk> RILU; Graph = Teuchos::rcp( new Ifpack_IlukGraph(A->Graph(), LevelFill, Overlap) ); int ierr; ierr = Graph->ConstructFilledGraph(); IFPACK_CHK_ERR(ierr); RILU = Teuchos::rcp( new Ifpack_CrsRiluk(*Graph) ); RILU->SetAbsoluteThreshold(Athresh); RILU->SetRelativeThreshold(Rthresh); RILU->SetRelaxValue(Relax); int initerr = RILU->InitValues(*A); if (initerr!=0) cout << Comm << "*ERR* InitValues = " << initerr; RILU->Factor(); // Define label for printing out during the solve phase string label = "Ifpack_CrsRiluk Preconditioner: LevelFill = " + toString(LevelFill) + " Overlap = " + toString(Overlap) + " Athresh = " + toString(Athresh) + " Rthresh = " + toString(Rthresh); // Here we create an AztecOO object LHS->PutScalar(0.0); int Niters = 1200; AztecOO solver; solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*RILU); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int OldIters = solver.NumIters(); // now rebuild the same preconditioner using RILU, we expect the same // number of iterations Ifpack Factory; Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("ILU", &*A, Overlap) ); Teuchos::ParameterList List; List.get("fact: level-of-fill", LevelFill); List.get("fact: drop tolerance", DropTol); List.get("fact: absolute threshold", Athresh); List.get("fact: relative threshold", Rthresh); List.get("fact: relax value", Relax); IFPACK_CHK_ERR(Prec->SetParameters(List)); IFPACK_CHK_ERR(Prec->Compute()); // Here we create an AztecOO object LHS->PutScalar(0.0); solver.SetUserMatrix(&*A); solver.SetLHS(&*LHS); solver.SetRHS(&*RHS); solver.SetAztecOption(AZ_solver,AZ_gmres); solver.SetPrecOperator(&*Prec); solver.SetAztecOption(AZ_output, 16); solver.Iterate(Niters, 5.0e-5); int NewIters = solver.NumIters(); if (OldIters != NewIters) IFPACK_CHK_ERR(-1); #ifdef HAVE_IFPACK_SUPERLU // Now test w/ SuperLU's ILU, if we've got it Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SILU", &*A,0) ); Teuchos::ParameterList SList; SList.set("fact: drop tolerance",1e-4); SList.set("fact: zero pivot threshold",.1); SList.set("fact: maximum fill factor",10.0); // NOTE: There is a bug in SuperLU 4.0 which will crash the code if the maximum fill factor is set too low. // This bug was reported to Sherry Li on 4/8/10. SList.set("fact: silu drop rule",9); IFPACK_CHK_ERR(Prec2->SetParameters(SList)); IFPACK_CHK_ERR(Prec2->Compute()); LHS->PutScalar(0.0); solver.SetPrecOperator(&*Prec2); solver.Iterate(Niters, 5.0e-5); Prec2->Print(cout); #endif #ifdef HAVE_MPI MPI_Finalize() ; #endif return(EXIT_SUCCESS); }