int main(int argc, char *argv[]) { Teuchos::GlobalMPISession mpiSession(&argc, &argv); bool success = false; bool verbose = false; try { // Parse the command line using Teuchos::CommandLineProcessor; CommandLineProcessor clp; clp.throwExceptions(false); clp.addOutputSetupOptions(true); clp.setOption( "v", "disable-verbosity", &verbose, "Enable verbosity" ); CommandLineProcessor::EParseCommandLineReturn parse_return = clp.parse(argc,argv,&std::cerr); if( parse_return != CommandLineProcessor::PARSE_SUCCESSFUL ) return parse_return; if (verbose) std::cout << "Verbosity Activated" << std::endl; else std::cout << "Verbosity Disabled" << std::endl; // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif const int num_elements = 400; // Check we have only one processor since this problem doesn't work // for more than one proc if (Comm.NumProc() > num_elements) { std::cerr << "Error! Number of elements must be greate than number of processors!" << std::endl; return EXIT_FAILURE; } // Create the model evaluator object double paramC = 0.99; Teuchos::RCP<ModelEvaluatorHeq<double> > model = modelEvaluatorHeq<double>(Teuchos::rcp(&Comm,false),num_elements,paramC); ::Stratimikos::DefaultLinearSolverBuilder builder; Teuchos::RCP<Teuchos::ParameterList> p = Teuchos::rcp(new Teuchos::ParameterList); p->set("Linear Solver Type", "AztecOO"); p->sublist("Linear Solver Types").sublist("AztecOO").sublist("Forward Solve").sublist("AztecOO Settings").set("Output Frequency",20); p->set("Preconditioner Type", "Ifpack"); builder.setParameterList(p); Teuchos::RCP< ::Thyra::LinearOpWithSolveFactoryBase<double> > lowsFactory = builder.createLinearSolveStrategy(""); model->set_W_factory(lowsFactory); // Create the initial guess Teuchos::RCP< ::Thyra::VectorBase<double> > initial_guess = model->getNominalValues().get_x()->clone_v(); Thyra::V_S(initial_guess.ptr(),Teuchos::ScalarTraits<double>::one()); Teuchos::RCP<NOX::Thyra::Group> nox_group = Teuchos::rcp(new NOX::Thyra::Group(*initial_guess, model)); //Teuchos::rcp(new NOX::Thyra::Group(*initial_guess, model, model->create_W_op(), lowsFactory, Teuchos::null, Teuchos::null)); nox_group->computeF(); // Create the NOX status tests and the solver // Create the convergence tests Teuchos::RCP<NOX::StatusTest::NormF> absresid = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms = Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8)); Teuchos::RCP<NOX::StatusTest::Combo> converged = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND)); converged->addStatusTest(absresid); converged->addStatusTest(wrms); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(20)); Teuchos::RCP<NOX::StatusTest::FiniteValue> fv = Teuchos::rcp(new NOX::StatusTest::FiniteValue); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(fv); combo->addStatusTest(converged); combo->addStatusTest(maxiters); // Create nox parameter list Teuchos::RCP<Teuchos::ParameterList> nl_params = Teuchos::rcp(new Teuchos::ParameterList); nl_params->set("Nonlinear Solver", "Anderson Accelerated Fixed-Point"); nl_params->sublist("Anderson Parameters").set("Storage Depth", 5); nl_params->sublist("Anderson Parameters").set("Mixing Parameter", 1.0); nl_params->sublist("Anderson Parameters").set("Acceleration Start Iteration", 5); nl_params->sublist("Anderson Parameters").sublist("Preconditioning").set("Precondition", false); nl_params->sublist("Direction").sublist("Newton").sublist("Linear Solver").set("Tolerance", 1.0e-4); nl_params->sublist("Printing").sublist("Output Information").set("Details",true); nl_params->sublist("Printing").sublist("Output Information").set("Outer Iteration",true); //nl_params->sublist("Printing").sublist("Output Information").set("Outer Iteration StatusTest",true); // Create the solver Teuchos::RCP<NOX::Solver::Generic> solver = NOX::Solver::buildSolver(nox_group, combo, nl_params); NOX::StatusTest::StatusType solvStatus = solver->solve(); // 1. Convergence int status = 0; if (solvStatus != NOX::StatusTest::Converged) status = 1; // 2. Number of iterations if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 14) status = 2; success = status==0; if (success) std::cout << "Test passed!" << std::endl; else std::cout << "Test failed!" << std::endl; } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); return ( success ? EXIT_SUCCESS : EXIT_FAILURE ); }
int main(int argc, char *argv[]) { int ierr = 0; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc, &argv); int rank; // My process ID MPI_Comm_rank(MPI_COMM_WORLD, &rank); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else int rank = 0; Epetra_SerialComm Comm; #endif bool verbose = false; // Check if we should print results to standard out if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; int verbose_int = verbose ? 1 : 0; Comm.Broadcast(&verbose_int, 1, 0); verbose = verbose_int==1 ? true : false; Comm.SetTracebackMode(0); // This should shut down any error traceback reporting int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); if(verbose && MyPID==0) std::cout << Epetra_Version() << std::endl << std::endl; if (verbose) std::cout << "Processor "<<MyPID<<" of "<< NumProc << " is alive."<< std::endl; // unused: bool verbose1 = verbose; // Redefine verbose to only print on PE 0 if(verbose && rank!=0) verbose = false; if (verbose) std::cout << "Test the memory management system of the class CrsMatrix (memory leak, invalid free)" << std::endl; // // Test 1: code initially proposed to illustrate bug #5499 // if(Comm.NumProc() == 1) { // this is a sequential test if (verbose) std::cout << "* Using Copy, ColMap, Variable number of indices per row and Static profile (cf. bug #5499)." << std::endl; // Row Map Epetra_Map RowMap(2, 0, Comm); // ColMap std::vector<int> colids(2); colids[0]=0; colids[1]=1; Epetra_Map ColMap(-1, 2, &colids[0], 0, Comm); // NumEntriesPerRow std::vector<int> NumEntriesPerRow(2); NumEntriesPerRow[0]=2; NumEntriesPerRow[1]=2; // Test Epetra_CrsMatrix A(Copy, RowMap, ColMap, &NumEntriesPerRow[0], true); // Bug #5499 shows up because InsertGlobalValues() is not called (CrsMatrix::Values_ not allocated but freed) A.FillComplete(); } // // Test 1 Bis: same as Test1, but without ColMap and variable number of indices per row. Does not seems to matter // if(Comm.NumProc() == 1) { // this is a sequential test if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and Static profile" << std::endl; Epetra_Map RowMap(2, 0, Comm); // Test Epetra_CrsMatrix A(Copy, RowMap, 1, true); // Bug #5499 shows up because InsertGlobalValues() is not called (CrsMatrix::Values_ not allocated but freed) A.FillComplete(); } // // Test 2: same as Test 1 Bis but with one call to InsertGlobalValues. // if(Comm.NumProc() == 1) { if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and Static profile + InsertGlobalValues()." << std::endl; Epetra_Map RowMap(2, 0, Comm); // Test Epetra_CrsMatrix A(Copy, RowMap, 1, true); std::vector<int> Indices(1); std::vector<double> Values(1); Values[0] = 2; Indices[0] = 0; A.InsertGlobalValues(0, 1, &Values[0], &Indices[0]); // Memory leak if CrsMatrix::Values not freed A.FillComplete(); } // // Test 3: check if the patch is not introducing some obvious regression // if(Comm.NumProc() == 1) { if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and Dynamic profile" << std::endl; Epetra_Map RowMap(2, 0, Comm); // Test Epetra_CrsMatrix A(Copy, RowMap, 1, false); A.FillComplete(); } // // Test 4: idem but with one call to InsertGlobalValues. // if(Comm.NumProc() == 1) { if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and Dynamic profile + InsertGlobalValues()." << std::endl; Epetra_Map RowMap(2, 0, Comm); // Test Epetra_CrsMatrix A(Copy, RowMap, 1, false); std::vector<int> Indices(1); std::vector<double> Values(1); Values[0] = 2; Indices[0] = 0; A.InsertGlobalValues(0, 1, &Values[0], &Indices[0]); A.FillComplete(); } if(Comm.NumProc() == 1) { if (verbose) std::cout << "* Using Copy, Static Graph()." << std::endl; Epetra_Map RowMap(1, 0, Comm); // Test Epetra_CrsGraph G(Copy, RowMap, 1); std::vector<int> Indices(1); Indices[0] = 0; G.InsertGlobalIndices(0, 1, &Indices[0]); G.FillComplete(); Epetra_CrsMatrix A(Copy, G); std::vector<double> Values(1); Values[0] = 2; A.ReplaceGlobalValues(0, 1, &Values[0], &Indices[0]); A.FillComplete(); double norminf = A.NormInf(); if (verbose) std::cout << "** Inf Norm of Matrix = " << norminf << "." << std::endl; std::cout << A << std::endl; } if(Comm.NumProc() == 1) { if (verbose) std::cout << "* Using Copy, Fixed number of indices per row and static profile + InsertGlobalValues() for a single row." << std::endl; Epetra_Map RowMap(1, 0, Comm); // Test Epetra_CrsMatrix A(Copy, RowMap, 1, true); std::vector<int> Indices(1); std::vector<double> Values(1); Values[0] = 2; Indices[0] = 0; A.InsertGlobalValues(0, 1, &Values[0], &Indices[0]); A.FillComplete(); } /* if (bool) { if (verbose) std::cout << std::endl << "tests FAILED" << std::endl << std::endl; } else {*/ if (verbose) std::cout << std::endl << "tests PASSED" << std::endl << std::endl; /* } */ #ifdef EPETRA_MPI MPI_Finalize(); #endif return ierr; }
int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Check verbosity level bool verbose = false; if (argc > 1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 101; // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << endl; cout << "Test failed!" << endl; throw "NOX Error"; } // Create the interface between NOX and the application // This object is derived from NOX::Epetra::Interface Teuchos::RCP<Interface> interface = Teuchos::rcp(new Interface(NumGlobalElements, Comm)); // Set the PDE factor (for nonlinear forcing term). This could be specified // via user input. interface->setPDEfactor(1000.0); // Use a scaled vector space. The scaling must also be registered // with the linear solver so the linear system is consistent! Teuchos::RCP<Epetra_Vector> scaleVec = Teuchos::rcp(new Epetra_Vector( *(interface->getSolution()))); scaleVec->PutScalar(2.0); Teuchos::RCP<NOX::Epetra::Scaling> scaling = Teuchos::rcp(new NOX::Epetra::Scaling); scaling->addUserScaling(NOX::Epetra::Scaling::Left, scaleVec); // Use a weighted vector space for scaling all norms Teuchos::RCP<NOX::Epetra::VectorSpace> weightedVectorSpace = Teuchos::rcp(new NOX::Epetra::VectorSpaceScaledL2(scaling)); // Get the vector from the Problem Teuchos::RCP<Epetra_Vector> soln = interface->getSolution(); Teuchos::RCP<NOX::Epetra::Vector> noxSoln = Teuchos::rcp(new NOX::Epetra::Vector(soln, NOX::Epetra::Vector::CreateCopy, NOX::DeepCopy, weightedVectorSpace)); // Begin Nonlinear Solver ************************************ // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& nlParams = *(nlParamsPtr.get()); // Set the nonlinear solver method nlParams.set("Nonlinear Solver", "Line Search Based"); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList& printParams = nlParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); if (verbose) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::LinearSolverDetails + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::Debug + NOX::Utils::TestDetails + NOX::Utils::Error); else printParams.set("Output Information", NOX::Utils::Error + NOX::Utils::TestDetails); // Create a print class for controlling output below NOX::Utils printing(printParams); // Sublist for line search Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search"); searchParams.set("Method", "Full Step"); // Sublist for direction Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); dirParams.set("Method", "Newton"); Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton"); newtonParams.set("Forcing Term Method", "Constant"); // Sublist for linear solver for the Newton method Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 800); lsParams.set("Tolerance", 1e-4); // Various Preconditioner options //lsParams.set("Preconditioner", "AztecOO"); lsParams.set("Preconditioner", "Ifpack"); lsParams.set("Preconditioner Reuse Policy", "Reuse"); lsParams.set("Max Age Of Prec", 5); // Add a user defined pre/post operator object Teuchos::RCP<NOX::Abstract::PrePostOperator> ppo = Teuchos::rcp(new UserPrePostOperator(printing)); nlParams.sublist("Solver Options").set("User Defined Pre/Post Operator", ppo); // Let's force all status tests to do a full check nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete"); // User supplied (Epetra_RowMatrix) Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian(); // Create the linear system Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams, iReq, iJac, Analytic, *noxSoln, scaling)); // Create the Group Teuchos::RCP<NOX::Epetra::Group> grpPtr = Teuchos::rcp(new NOX::Epetra::Group(printParams, iReq, *noxSoln, linSys)); NOX::Epetra::Group& grp = *grpPtr; // uncomment the following for loca supergroups //MF->setGroupForComputeF(*grpPtr); //FD->setGroupForComputeF(*grpPtr); // Create the convergence tests Teuchos::RCP<NOX::StatusTest::NormF> absresid = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::NormF> relresid = Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2)); Teuchos::RCP<NOX::StatusTest::NormUpdate> update = Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5)); Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms = Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8)); Teuchos::RCP<NOX::StatusTest::Combo> converged = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND)); converged->addStatusTest(absresid); converged->addStatusTest(relresid); converged->addStatusTest(wrms); converged->addStatusTest(update); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(20)); Teuchos::RCP<NOX::StatusTest::FiniteValue> fv = Teuchos::rcp(new NOX::StatusTest::FiniteValue); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(fv); combo->addStatusTest(converged); combo->addStatusTest(maxiters); // Create the solver Teuchos::RCP<NOX::Solver::Generic> solver = NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr); NOX::StatusTest::StatusType solvStatus = solver->solve(); // End Nonlinear Solver ************************************** // Get the Epetra_Vector with the final solution from the solver const NOX::Epetra::Group& finalGroup = dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup()); const Epetra_Vector& finalSolution = (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())). getEpetraVector(); // Output the parameter list if (verbose) { if (printing.isPrintType(NOX::Utils::Parameters)) { printing.out() << endl << "Final Parameters" << endl << "****************" << endl; solver->getList().print(printing.out()); printing.out() << endl; } } // Print solution char file_name[25]; FILE *ifp; int NumMyElements = soln->Map().NumMyElements(); (void) sprintf(file_name, "output.%d",MyPID); ifp = fopen(file_name, "w"); for (int i=0; i<NumMyElements; i++) fprintf(ifp, "%d %E\n", soln->Map().MinMyGID()+i, finalSolution[i]); fclose(ifp); // Tests int status = 0; // Converged // 1. Convergence if (solvStatus != NOX::StatusTest::Converged) { status = 1; if (printing.isPrintType(NOX::Utils::Error)) printing.out() << "Nonlinear solver failed to converge!" << endl; } #ifndef HAVE_MPI // 2. Linear solve iterations (53) - SERIAL TEST ONLY! // The number of linear iterations changes with # of procs. if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 53) { status = 2; } #endif // 3. Nonlinear solve iterations (10) if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 10) status = 3; // 4. Test the pre/post iterate options { UserPrePostOperator* ppoPtr = dynamic_cast<UserPrePostOperator*>(ppo.get()); if (ppoPtr->getNumRunPreIterate() != 10) status = 4; if (ppoPtr->getNumRunPostIterate() != 10) status = 4; if (ppoPtr->getNumRunPreSolve() != 1) status = 4; if (ppoPtr->getNumRunPostSolve() != 1) status = 4; } // Summarize test results if (status == 0) printing.out() << "Test passed!" << endl; else printing.out() << "Test failed!" << endl; #ifdef HAVE_MPI MPI_Finalize(); #endif // Final return value (0 = successfull, non-zero = failure) return status; }
int main(int argc, char *argv[]) { int ierr = 0; // scale factor to test arc-length scaling double scale = 1.0; // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Get the number of elements from the command line int NumGlobalElements = 100 + 1; // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << endl; exit(1); } // Create the FiniteElementProblem class. This creates all required // Epetra objects for the problem and allows calls to the // function (RHS) and Jacobian evaluation routines. FiniteElementProblem Problem(NumGlobalElements, Comm, scale); // Get the vector from the Problem Epetra_Vector& soln = Problem.getSolution(); // Initialize Solution soln.PutScalar(1.0); // Begin LOCA Solver ************************************ // Create parameter list Teuchos::RCP<Teuchos::ParameterList> paramList = Teuchos::rcp(new Teuchos::ParameterList); // Create LOCA sublist Teuchos::ParameterList& locaParamsList = paramList->sublist("LOCA"); // Create the stepper sublist and set the stepper parameters Teuchos::ParameterList& locaStepperList = locaParamsList.sublist("Stepper"); locaStepperList.set("Continuation Method", "Arc Length"); locaStepperList.set("Bordered Solver Method", "Householder"); locaStepperList.set("Number of Continuation Parameters", 2); locaStepperList.set("Epsilon", 0.1); locaStepperList.set("Max Charts", 10000); locaStepperList.set("Verbosity", 1); locaStepperList.set("Page Charts", 1); locaStepperList.set("Dump Polyhedra", true); locaStepperList.set("Dump Centers", false); locaStepperList.set("Filename", "MFresults"); locaStepperList.set("Enable Arc Length Scaling", false); locaStepperList.set("Max Nonlinear Iterations", 15); locaStepperList.set("Aggressiveness", 0.0); locaStepperList.set("Max Solution Component", 6.0); // Create sublist for each continuation parameter Teuchos::ParameterList& paramList1 = locaStepperList.sublist("Continuation Parameter 1"); paramList1.set("Parameter Name", "Right BC"); paramList1.set("Initial Value", 0.1); paramList1.set("Max Value", 4.0); paramList1.set("Min Value", 0.0); paramList1.set("Initial Step Size", 0.1); paramList1.set("Max Step Size", 0.2); paramList1.set("Min Step Size", 1.0e-3); Teuchos::ParameterList& paramList2 = locaStepperList.sublist("Continuation Parameter 2"); paramList2.set("Parameter Name", "Nonlinear Factor"); paramList2.set("Initial Value", 1.0); paramList2.set("Max Value", 4.0); paramList2.set("Min Value", 0.0); paramList2.set("Initial Step Size", 0.1); paramList2.set("Max Step Size", 0.2); paramList2.set("Min Step Size", 1.0e-3); // Create predictor sublist Teuchos::ParameterList& predictorList = locaParamsList.sublist("Predictor"); predictorList.set("Method", "Tangent"); // Create the "Solver" parameters sublist to be used with NOX Solvers Teuchos::ParameterList& nlParams = paramList->sublist("NOX"); // Create the NOX printing parameter list Teuchos::ParameterList& nlPrintParams = nlParams.sublist("Printing"); nlPrintParams.set("MyPID", MyPID); nlPrintParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::LinearSolverDetails + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::StepperIteration + NOX::Utils::StepperDetails + NOX::Utils::StepperParameters); // Create the "Linear Solver" sublist for Newton's method Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); Teuchos::ParameterList& newParams = dirParams.sublist("Newton"); Teuchos::ParameterList& lsParams = newParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 100); lsParams.set("Tolerance", 1e-4); lsParams.set("Output Frequency", 50); lsParams.set("Scaling", "None"); lsParams.set("Preconditioner", "Ifpack"); // Create and initialize the parameter vector LOCA::ParameterVector pVector; pVector.addParameter("Right BC", 0.1); pVector.addParameter("Nonlinear Factor",1.0); pVector.addParameter("Left BC", 0.0); // Create the interface between the test problem and the nonlinear solver // This is created by the user using inheritance of the abstract base class: Teuchos::RCP<Problem_Interface> interface = Teuchos::rcp(new Problem_Interface(Problem)); Teuchos::RCP<LOCA::Epetra::Interface::Required> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; // Create the Epetra_RowMatrixfor the Jacobian/Preconditioner Teuchos::RCP<Epetra_RowMatrix> Amat = Teuchos::rcp(&Problem.getJacobian(),false); // Create the linear systems Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linsys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(nlPrintParams, lsParams, iReq, iJac, Amat, soln)); // Create the loca vector NOX::Epetra::Vector locaSoln(soln); // Create Epetra factory Teuchos::RCP<LOCA::Abstract::Factory> epetraFactory = Teuchos::rcp(new LOCA::Epetra::Factory); // Create global data object Teuchos::RCP<LOCA::GlobalData> globalData = LOCA::createGlobalData(paramList, epetraFactory); // Create the Group Teuchos::RCP<LOCA::Epetra::Group> grp = Teuchos::rcp(new LOCA::Epetra::Group(globalData, nlPrintParams, iReq, locaSoln, linsys, pVector)); grp->computeF(); // Create the Solver convergence test //NOX::StatusTest::NormWRMS wrms(1.0e-2, 1.0e-8); Teuchos::RCP<NOX::StatusTest::NormF> wrms = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(locaStepperList.get("Max Nonlinear Iterations", 10))); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(wrms); combo->addStatusTest(maxiters); // Create the stepper LOCA::MultiStepper stepper(globalData, grp, combo, paramList); LOCA::Abstract::Iterator::IteratorStatus status = stepper.run(); if (status == LOCA::Abstract::Iterator::Finished) globalData->locaUtils->out() << "All tests passed" << endl; else { if (globalData->locaUtils->isPrintType(NOX::Utils::Error)) globalData->locaUtils->out() << "Stepper failed to converge!" << std::endl; } // Output the parameter list if (globalData->locaUtils->isPrintType(NOX::Utils::StepperParameters)) { globalData->locaUtils->out() << std::endl << "Final Parameters" << std::endl << "****************" << std::endl; stepper.getList()->print(globalData->locaUtils->out()); globalData->locaUtils->out() << std::endl; } LOCA::destroyGlobalData(globalData); #ifdef HAVE_MPI MPI_Finalize() ; #endif /* end main */ return ierr ; }
TEUCHOS_UNIT_TEST( EpetraOperatorWrapper, basic ) { #ifdef HAVE_MPI Epetra_MpiComm comm(MPI_COMM_WORLD); #else Epetra_SerialComm comm; #endif out << "\nRunning on " << comm.NumProc() << " processors\n"; int nx = 39; // essentially random values int ny = 53; out << "Using Trilinos_Util to create test matrices\n"; // create some big blocks to play with Trilinos_Util::CrsMatrixGallery FGallery("recirc_2d",comm); FGallery.Set("nx",nx); FGallery.Set("ny",ny); RCP<Epetra_CrsMatrix> F = rcp(FGallery.GetMatrix(),false); Trilinos_Util::CrsMatrixGallery CGallery("laplace_2d",comm); CGallery.Set("nx",nx); CGallery.Set("ny",ny); RCP<Epetra_CrsMatrix> C = rcp(CGallery.GetMatrix(),false); Trilinos_Util::CrsMatrixGallery BGallery("diag",comm); BGallery.Set("nx",nx*ny); BGallery.Set("a",5.0); RCP<Epetra_CrsMatrix> B = rcp(BGallery.GetMatrix(),false); Trilinos_Util::CrsMatrixGallery BtGallery("diag",comm); BtGallery.Set("nx",nx*ny); BtGallery.Set("a",3.0); RCP<Epetra_CrsMatrix> Bt = rcp(BtGallery.GetMatrix(),false); // load'em up in a thyra operator out << "Building block2x2 Thyra matrix ... wrapping in EpetraOperatorWrapper\n"; const RCP<const LinearOpBase<double> > A = Thyra::block2x2<double>( Thyra::epetraLinearOp(F), Thyra::epetraLinearOp(Bt), Thyra::epetraLinearOp(B), Thyra::epetraLinearOp(C), "A" ); const RCP<Thyra::EpetraOperatorWrapper> epetra_A = rcp(new Thyra::EpetraOperatorWrapper(A)); // begin the tests! const Epetra_Map & rangeMap = epetra_A->OperatorRangeMap(); const Epetra_Map & domainMap = epetra_A->OperatorDomainMap(); // check to see that the number of global elements is correct TEST_EQUALITY(rangeMap.NumGlobalElements(), 2*nx*ny); TEST_EQUALITY(domainMap.NumGlobalElements(), 2*nx*ny); // largest global ID should be one less then the # of elements TEST_EQUALITY(rangeMap.NumGlobalElements()-1, rangeMap.MaxAllGID()); TEST_EQUALITY(domainMap.NumGlobalElements()-1, domainMap.MaxAllGID()); // create a vector to test: copyThyraIntoEpetra { const RCP<VectorBase<double> > tv = Thyra::createMember(A->domain()); Thyra::randomize(-100.0, 100.0, tv.ptr()); const RCP<const VectorBase<double> > tv_0 = Thyra::productVectorBase<double>(tv)->getVectorBlock(0); const RCP<const VectorBase<double> > tv_1 = Thyra::productVectorBase<double>(tv)->getVectorBlock(1); const Thyra::ConstDetachedSpmdVectorView<double> vv_0(tv_0); const Thyra::ConstDetachedSpmdVectorView<double> vv_1(tv_1); int off_0 = vv_0.globalOffset(); int off_1 = vv_1.globalOffset(); // create its Epetra counter part Epetra_Vector ev(epetra_A->OperatorDomainMap()); epetra_A->copyThyraIntoEpetra(*tv, ev); // compare handle_tv to ev! TEST_EQUALITY(tv->space()->dim(), as<Ordinal>(ev.GlobalLength())); const int numMyElements = domainMap.NumMyElements(); double tval = 0.0; for(int i=0; i < numMyElements; i++) { int gid = domainMap.GID(i); if(gid<nx*ny) tval = vv_0[gid-off_0]; else tval = vv_1[gid-off_1-nx*ny]; TEST_EQUALITY(ev[i], tval); } } // create a vector to test: copyEpetraIntoThyra { // create an Epetra vector Epetra_Vector ev(epetra_A->OperatorDomainMap()); ev.Random(); // create its thyra counterpart const RCP<VectorBase<double> > tv = Thyra::createMember(A->domain()); const RCP<const VectorBase<double> > tv_0 = Thyra::productVectorBase<double>(tv)->getVectorBlock(0); const RCP<const VectorBase<double> > tv_1 = Thyra::productVectorBase<double>(tv)->getVectorBlock(1); const Thyra::ConstDetachedSpmdVectorView<double> vv_0(tv_0); const Thyra::ConstDetachedSpmdVectorView<double> vv_1(tv_1); int off_0 = rcp_dynamic_cast<const Thyra::SpmdVectorSpaceBase<double> >( tv_0->space())->localOffset(); int off_1 = rcp_dynamic_cast<const Thyra::SpmdVectorSpaceBase<double> >( tv_1->space())->localOffset(); epetra_A->copyEpetraIntoThyra(ev, tv.ptr()); // compare tv to ev! TEST_EQUALITY(tv->space()->dim(), as<Ordinal>(ev.GlobalLength())); int numMyElements = domainMap.NumMyElements(); double tval = 0.0; for(int i=0;i<numMyElements;i++) { int gid = domainMap.GID(i); if(gid<nx*ny) tval = vv_0[gid-off_0]; else tval = vv_1[gid-off_1-nx*ny]; TEST_EQUALITY(ev[i], tval); } } // Test using Thyra::LinearOpTester const RCP<const LinearOpBase<double> > thyraEpetraOp = epetraLinearOp(epetra_A); LinearOpTester<double> linearOpTester; linearOpTester.show_all_tests(true); const bool checkResult = linearOpTester.check(*thyraEpetraOp, inOutArg(out)); TEST_ASSERT(checkResult); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int nx; if (argc > 1) nx = (int) strtol(argv[1],NULL,10); else nx = 256; int ny = nx * Comm.NumProc(); // each subdomain is a square ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", ny); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); int NumNodes = nx*ny; int NumPDEEqns = 2; Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList); Epetra_CrsMatrix* CrsA = CreateCrsMatrix("Laplace2D", Map, GaleriList); Epetra_VbrMatrix* A = CreateVbrMatrix(CrsA, NumPDEEqns); Epetra_Vector LHS(A->DomainMap()); LHS.PutScalar(0); Epetra_Vector RHS(A->DomainMap()); RHS.Random(); Epetra_LinearProblem Problem(A, &LHS, &RHS); AztecOO solver(Problem); double *x_coord = 0, *y_coord = 0, *z_coord = 0; Epetra_MultiVector *coords = CreateCartesianCoordinates("2D", &(CrsA->Map()), GaleriList); double **ttt; if (!coords->ExtractView(&ttt)) { x_coord = ttt[0]; y_coord = ttt[1]; } else { printf("Error extracting coordinate vectors\n"); # ifdef HAVE_MPI MPI_Finalize() ; # endif exit(EXIT_FAILURE); } ParameterList MLList; SetDefaults("SA",MLList); MLList.set("ML output",10); MLList.set("max levels",10); MLList.set("increasing or decreasing","increasing"); MLList.set("smoother: type", "Chebyshev"); MLList.set("smoother: sweeps", 3); // *) if a low number, it will use all the available processes // *) if a big number, it will use only processor 0 on the next level MLList.set("aggregation: next-level aggregates per process", 1); MLList.set("aggregation: type (level 0)", "Zoltan"); MLList.set("aggregation: type (level 1)", "Uncoupled"); MLList.set("aggregation: type (level 2)", "Zoltan"); MLList.set("aggregation: smoothing sweeps", 2); MLList.set("x-coordinates", x_coord); MLList.set("y-coordinates", y_coord); MLList.set("z-coordinates", z_coord); // specify the reduction with respect to the previous level // (very small values can break the code) int ratio = 16; MLList.set("aggregation: global aggregates (level 0)", NumNodes / ratio); MLList.set("aggregation: global aggregates (level 1)", NumNodes / (ratio * ratio)); MLList.set("aggregation: global aggregates (level 2)", NumNodes / (ratio * ratio * ratio)); MultiLevelPreconditioner* MLPrec = new MultiLevelPreconditioner(*A, MLList, true); solver.SetPrecOperator(MLPrec); solver.SetAztecOption(AZ_solver, AZ_cg_condnum); solver.SetAztecOption(AZ_output, 1); solver.Iterate(100, 1e-12); // compute the real residual Epetra_Vector Residual(A->DomainMap()); //1.0 * RHS + 0.0 * RHS - 1.0 * (A * LHS) A->Apply(LHS,Residual); Residual.Update(1.0, RHS, 0.0, RHS, -1.0); double rn; Residual.Norm2(&rn); if (Comm.MyPID() == 0 ) std::cout << "||b-Ax||_2 = " << rn << endl; if (Comm.MyPID() == 0 && rn > 1e-5) { std::cout << "TEST FAILED!!!!" << endl; # ifdef HAVE_MPI MPI_Finalize() ; # endif exit(EXIT_FAILURE); } delete MLPrec; delete coords; delete Map; delete CrsA; delete A; if (Comm.MyPID() == 0) std::cout << "TEST PASSED" << endl; #ifdef HAVE_MPI MPI_Finalize() ; #endif exit(EXIT_SUCCESS); }
// ====================================================================== int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif verbose = (Comm.MyPID() == 0); for (int i = 1 ; i < argc ; ++i) { if (strcmp(argv[i],"-s") == 0) { SymmetricGallery = true; Solver = AZ_cg; } } // size of the global matrix. Teuchos::ParameterList GaleriList; int nx = 30; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A; if (SymmetricGallery) A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); else A = Teuchos::rcp( Galeri::CreateCrsMatrix("Recirc2D", &*Map, GaleriList) ); // test the preconditioner int TestPassed = true; // ======================================== // // first verify that we can get convergence // // with all point relaxation methods // // ======================================== // if(!BasicTest("Jacobi",A,false)) TestPassed = false; if(!BasicTest("symmetric Gauss-Seidel",A,false)) TestPassed = false; if(!BasicTest("symmetric Gauss-Seidel",A,false,true)) TestPassed = false; if (!SymmetricGallery) { if(!BasicTest("Gauss-Seidel",A,false)) TestPassed = false; if(!BasicTest("Gauss-Seidel",A,true)) TestPassed = false; if(!BasicTest("Gauss-Seidel",A,false,true)) TestPassed = false; if(!BasicTest("Gauss-Seidel",A,true,true)) TestPassed = false; } // ============================= // // check uses as preconditioners // // ============================= // if(!KrylovTest("symmetric Gauss-Seidel",A,false)) TestPassed = false; if(!KrylovTest("symmetric Gauss-Seidel",A,false,true)) TestPassed = false; if (!SymmetricGallery) { if(!KrylovTest("Gauss-Seidel",A,false)) TestPassed = false; if(!KrylovTest("Gauss-Seidel",A,true)) TestPassed = false; if(!KrylovTest("Gauss-Seidel",A,false,true)) TestPassed = false; if(!KrylovTest("Gauss-Seidel",A,true,true)) TestPassed = false; } // ================================== // // compare point and block relaxation // // ================================== // //TestPassed = TestPassed && // ComparePointAndBlock("Jacobi",A,1); TestPassed = TestPassed && ComparePointAndBlock("Jacobi",A,10); //TestPassed = TestPassed && //ComparePointAndBlock("symmetric Gauss-Seidel",A,1); TestPassed = TestPassed && ComparePointAndBlock("symmetric Gauss-Seidel",A,10); if (!SymmetricGallery) { //TestPassed = TestPassed && //ComparePointAndBlock("Gauss-Seidel",A,1); TestPassed = TestPassed && ComparePointAndBlock("Gauss-Seidel",A,10); } // ============================ // // verify effect of # of blocks // // ============================ // { int Iters4, Iters8, Iters16; Iters4 = CompareBlockSizes("Jacobi",A,4); Iters8 = CompareBlockSizes("Jacobi",A,8); Iters16 = CompareBlockSizes("Jacobi",A,16); if ((Iters16 > Iters8) && (Iters8 > Iters4)) { if (verbose) cout << "Test passed" << endl; } else { if (verbose) cout << "TEST FAILED!" << endl; TestPassed = TestPassed && false; } } // ================================== // // verify effect of overlap in Jacobi // // ================================== // { int Iters0, Iters2, Iters4; Iters0 = CompareBlockOverlap(A,0); Iters2 = CompareBlockOverlap(A,2); Iters4 = CompareBlockOverlap(A,4); if ((Iters4 < Iters2) && (Iters2 < Iters0)) { if (verbose) cout << "Test passed" << endl; } else { if (verbose) cout << "TEST FAILED!" << endl; TestPassed = TestPassed && false; } } // ============ // // final output // // ============ // if (!TestPassed) { cout << "Test `TestRelaxation.exe' failed!" << endl; exit(EXIT_FAILURE); } #ifdef HAVE_MPI MPI_Finalize(); #endif cout << endl; cout << "Test `TestRelaxation.exe' passed!" << endl; cout << endl; return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif Teuchos::CommandLineProcessor clp( false ); // Default run-time options that can be changed from the command line CouplingSolveMethod method = MATRIX_FREE ; bool verbose = true ; int NumGlobalNodes = 20 ; int probSizeRatio = 1 ; bool useMatlab = false ; bool doOffBlocks = false ; std::string solvType = "seidel" ; // Coupling parameters double alpha = 0.50 ; double beta = 0.40 ; // Physical parameters double radiation = 5.67 ; double initVal = 0.995 ; string outputDir = "." ; string goldDir = "." ; clp.setOption<CouplingSolveMethod>( "solvemethod", &method, 4, SolveMethodValues, SolveMethodNames, "Selects the coupling method to use"); clp.setOption( "verbose", "no-verbose", &verbose, "Verbosity on or off." ); clp.setOption( "n", &NumGlobalNodes, "Number of elements" ); clp.setOption( "nratio", &probSizeRatio, "Ratio of size of problem 2 to problem 1" ); clp.setOption( "offblocks", "no-offblocks", &doOffBlocks, "Include off-diagonal blocks in preconditioning matrix" ); clp.setOption( "matlab", "no-matlab", &useMatlab, "Use Matlab debugging engine" ); clp.setOption( "solvType", &solvType, "Solve Type. Valid choices are: jacobi, seidel" ); clp.setOption( "alpha", &alpha, "Interfacial coupling coefficient, alpha" ); clp.setOption( "beta", &beta, "Interfacial coupling coefficient, beta" ); clp.setOption( "radiation", &radiation, "Radiation source term coefficient, R" ); clp.setOption( "initialval", &initVal, "Initial guess for solution values" ); clp.setOption( "outputdir", &outputDir, "Directory to output mesh and results into. Default is \"./\"" ); clp.setOption( "golddir", &goldDir, "Directory to read gold test from. Default is \"./\"" ); Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return = clp.parse(argc,argv); if( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) return parse_return; outputDir += "/"; goldDir += "/"; // Create and reset the Timer Epetra_Time myTimer(Comm); double startWallTime = myTimer.WallTime(); // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); NumGlobalNodes++; // convert #elements to #nodes // The number of unknowns must be at least equal to the number of processors. if (NumGlobalNodes < NumProc) { cout << "numGlobalNodes = " << NumGlobalNodes << " cannot be < number of processors = " << NumProc << endl; exit(1); } // Begin Nonlinear Solver ************************************ // NOTE: For now these parameters apply to all problems handled by // Problem_Manager. Each problem could be made to have its own // parameter list as wwell as its own convergence test(s). // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& nlParams = *(nlParamsPtr.get()); // Set the nonlinear solver method nlParams.set("Nonlinear Solver", "Line Search Based"); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList& printParams = nlParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); printParams.set("Output Information", NOX::Utils::Warning + NOX::Utils::OuterIteration + NOX::Utils::InnerIteration + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::OuterIterationStatusTest + NOX::Utils::LinearSolverDetails + NOX::Utils::TestDetails ); NOX::Utils outputUtils(printParams); // Sublist for line search Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search"); searchParams.set("Method", "Full Step"); // Sublist for direction Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); dirParams.set("Method", "Newton"); Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton"); newtonParams.set("Forcing Term Method", "Constant"); // Sublist for linear solver for the Newton method Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 800); lsParams.set("Tolerance", 1e-4); lsParams.set("Output Frequency", 50); lsParams.set("Preconditioner", "AztecOO"); // Create the convergence tests // Note: as for the parameter list, both (all) problems use the same // convergence test(s) for now, but each could have its own. Teuchos::RCP<NOX::StatusTest::NormF> absresid = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::NormUpdate> update = Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5)); Teuchos::RCP<NOX::StatusTest::Combo> converged = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND)); converged->addStatusTest(absresid); //converged->addStatusTest(update); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(500)); Teuchos::RCP<NOX::StatusTest::FiniteValue> finiteValue = Teuchos::rcp(new NOX::StatusTest::FiniteValue); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(converged); combo->addStatusTest(maxiters); combo->addStatusTest(finiteValue); // Create the Problem Manager Problem_Manager problemManager(Comm, false, 0, useMatlab); // Note that each problem could contain its own nlParams list as well as // its own convergence test(s). problemManager.registerParameters(nlParamsPtr); problemManager.registerStatusTest(combo); // Domain boundary temperaures double Tleft = 0.98 ; double Tright = 1.0 ; // Distinguish certain parameters needed for T1_analytic double peclet_1 = 9.0 ; double peclet_2 = 0.0 ; double kappa_1 = 1.0 ; double kappa_2 = 0.1 ; double T1_analytic = ConvDiff_PDE::computeAnalyticInterfaceTemp( radiation, Tleft, Tright, kappa_2, peclet_1 ); // Create Region 1 PDE string myName = "Region_1" ; double radiation_reg1 = 0.0 ; double xmin = 0.0 ; double xmax = 1.0 ; ConvDiff_PDE Reg1_PDE ( Comm, peclet_1, radiation_reg1, kappa_1, alpha, xmin, xmax, Tleft, T1_analytic, NumGlobalNodes, myName ); // Override default initialization with values we want Reg1_PDE.initializeSolution(initVal); problemManager.addProblem(Reg1_PDE); // Create Region 2 PDE myName = "Region_2" ; xmin = 1.0 ; xmax = 2.0 ; ConvDiff_PDE Reg2_PDE ( Comm, peclet_2, radiation, kappa_2, beta, xmin, xmax, T1_analytic, Tright, probSizeRatio*NumGlobalNodes, myName ); // For this problem involving interfacial coupling, the problems are given control // over whether or not and how to construct off-block contributions to the // Jacobian/Preconditioner matrix. We explicitly told the problem manager to omit // off-blocks via the OffBlock_Manager class. Here, we inform each problem. Reg1_PDE.setExpandJacobian( doOffBlocks ); Reg2_PDE.setExpandJacobian( doOffBlocks ); // Override default initialization with values we want Reg2_PDE.initializeSolution(initVal); problemManager.addProblem(Reg2_PDE); problemManager.createDependency(Reg1_PDE, Reg2_PDE); problemManager.createDependency(Reg2_PDE, Reg1_PDE); problemManager.registerComplete(); // A consistencyy check associated with using BroydenOperator if( 0 ) { Epetra_CrsGraph maskGraph(Copy, problemManager.getCompositeSoln()->Map(), 0); map<int, Teuchos::RCP<GenericEpetraProblem> >::iterator problemIter = problemManager.getProblems().begin(); map<int, Teuchos::RCP<GenericEpetraProblem> >::iterator problemLast = problemManager.getProblems().end(); // Loop over each problem being managed and ascertain its graph as well // as its graph from its dependencies for( ; problemIter != problemLast; ++problemIter ) { GenericEpetraProblem & problem = *(*problemIter).second; int probId = problem.getId(); // Get the indices map for copying data from this problem into // the composite problem map<int, Teuchos::RCP<Epetra_IntVector> > & problemToCmpositeIndices = problemManager.getProblemToCompositeIndices(); Epetra_IntVector & problemIndices = *(problemToCmpositeIndices[probId]); // Get known dependencies on the other problem for( unsigned int k = 0; k < problem.getDependentProblems().size(); ++k) { // Get the needed objects for the depend problem GenericEpetraProblem & dependProblem = *(problemManager.getProblems()[problem.getDependentProblems()[k]]); int dependId = dependProblem.getId(); Epetra_IntVector & dependIndices = *(problemManager.getProblemToCompositeIndices()[dependId]); map<int, vector<int> > offBlockIndices; problem.getOffBlockIndices( offBlockIndices ); map<int, vector<int> >::iterator indIter = offBlockIndices.begin(), indIter_end = offBlockIndices.end() ; for( ; indIter != indIter_end; ++indIter ) { int compositeRow = problemIndices[(*indIter).first]; vector<int> & colIndices = (*indIter).second; // Convert column indices to composite values for( unsigned int cols = 0; cols < colIndices.size(); ++cols ) colIndices[cols] = dependIndices[ colIndices[cols] ]; maskGraph.InsertGlobalIndices( compositeRow, colIndices.size(), &colIndices[0] ); } } } maskGraph.FillComplete(); cout << maskGraph << endl; NOX::Epetra::BroydenOperator * broydenOp = dynamic_cast<NOX::Epetra::BroydenOperator*>( problemManager.getJacobianOperator().get() ); broydenOp->removeEntriesFromBroydenUpdate( maskGraph ); #ifdef HAVE_NOX_DEBUG broydenOp->outputActiveEntries(); #endif } problemManager.outputStatus(std::cout); cout << "\n\tAnalytic solution, T_1 = " << T1_analytic << "\n" << endl; // Print initial solution if( verbose ) problemManager.outputSolutions( outputDir, 0 ); // Identify the test problem if( outputUtils.isPrintType(NOX::Utils::TestDetails) ) outputUtils.out() << "Starting epetra/MultiPhysics/example_yeckel.exe" << endl; // Identify processor information #ifdef HAVE_MPI outputUtils.out() << "This test is broken in parallel." << endl; outputUtils.out() << "Test failed!" << endl; MPI_Finalize(); return -1; #else if (outputUtils.isPrintType(NOX::Utils::TestDetails)) outputUtils.out() << "Serial Run" << endl; #endif // Identify the test problem if( outputUtils.isPrintType(NOX::Utils::TestDetails) ) outputUtils.out() << "Starting epetra/MultiPhysics/example_yeckel.exe" << endl; // Identify processor information #ifdef HAVE_MPI outputUtils.out() << "This test is broken in parallel." << endl; outputUtils.out() << "Test failed!" << endl; MPI_Finalize(); return -1; #else if (outputUtils.isPrintType(NOX::Utils::TestDetails)) outputUtils.out() << "Serial Run" << endl; #endif // Solve the coupled problem switch( method ) { case MATRIX_FREE : problemManager.solveMF(); // Need a status test check here .... break; case SCHUR_BASED : problemManager.solveSchurBased(); break; case LOOSE_HARDCODED : problemManager.solve(); // Hard-coded loose coupling break; case LOOSE_LIBRARY : default : { // Create the loose coupling solver manager Teuchos::RCP<vector<Teuchos::RCP<NOX::Solver::Generic> > > solversVec = Teuchos::rcp( new vector<Teuchos::RCP<NOX::Solver::Generic> > ); map<int, Teuchos::RCP<NOX::Solver::Generic> >::iterator iter = problemManager.getSolvers().begin(), iter_end = problemManager.getSolvers().end() ; for( ; iter_end != iter; ++iter ) { cout << " ........ registered Solver::Manager # " << (*iter).first << endl; solversVec->push_back( (*iter).second ); } // Package the Problem_Manager as the DataExchange::Intreface Teuchos::RCP<NOX::Multiphysics::DataExchange::Interface> dataExInterface = Teuchos::rcp( &problemManager, false ); Teuchos::RCP<NOX::StatusTest::MaxIters> fixedPt_maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(20)); if( "jacobi" == solvType ) nlParamsPtr->sublist("Solver Options").set("Fixed Point Iteration Type", "Jacobi"); NOX::Multiphysics::Solver::Manager cplSolv( solversVec, dataExInterface, fixedPt_maxiters, nlParamsPtr ); cplSolv.solve(); // Refresh all problems with solutions from solver problemManager.copyAllGroupXtoProblems(); // Reset all solver groups to force recomputation of residuals problemManager.resetAllCurrentGroupX(); } } // Output timing info if( 0 == MyPID ) cout << "\nTimings :\n\tWallTime --> " << myTimer.WallTime() - startWallTime << " sec." << "\n\tElapsedTime --> " << myTimer.ElapsedTime() << " sec." << endl << endl; if( verbose ) problemManager.outputSolutions( outputDir, 1 ); // Create a TestCompare class int status = 0; NOX::TestCompare tester( outputUtils.out(), outputUtils ); double abstol = 1.e-4; double reltol = 1.e-4 ; map<int, Teuchos::RCP<GenericEpetraProblem> >::iterator iter = problemManager.getProblems().begin(), iter_end = problemManager.getProblems().end() ; for( ; iter_end != iter; ++iter ) { ConvDiff_PDE & problem = dynamic_cast<ConvDiff_PDE &>( *(*iter).second ); string msg = "Numerical-to-Exact Solution comparison for problem \"" + problem.getName() + "\""; // Need NOX::Epetra::Vectors for tests NOX::Epetra::Vector numerical ( problem.getSolution() , NOX::Epetra::Vector::CreateView ); NOX::Epetra::Vector analytic ( problem.getExactSolution(), NOX::Epetra::Vector::CreateView ); status += tester.testVector( numerical, analytic, reltol, abstol, msg ); } // Summarize test results if( status == 0 ) outputUtils.out() << "Test passed!" << endl; else outputUtils.out() << "Test failed!" << endl; #ifdef HAVE_MPI MPI_Finalize() ; #endif return 0 ; }
// main driver int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc, &argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif if (Comm.NumProc() != 2) { #ifdef HAVE_MPI MPI_Finalize(); #endif return(0); } int NumMyElements = 0; // NODES assigned to this processor int NumMyExternalElements = 0; // nodes used by this proc, but not hosted int NumMyTotalElements = 0; int FE_NumMyElements = 0; // TRIANGLES assigned to this processor int * MyGlobalElements = 0; // nodes assigned to this processor Epetra_IntSerialDenseMatrix T; // store the grid connectivity int MyPID=Comm.MyPID(); cout << MyPID << endl; switch( MyPID ) { case 0: NumMyElements = 3; NumMyExternalElements = 2; NumMyTotalElements = NumMyElements + NumMyExternalElements; FE_NumMyElements = 3; MyGlobalElements = new int[NumMyTotalElements]; MyGlobalElements[0] = 0; MyGlobalElements[1] = 4; MyGlobalElements[2] = 3; MyGlobalElements[3] = 1; MyGlobalElements[4] = 5; break; case 1: NumMyElements = 3; NumMyExternalElements = 2; NumMyTotalElements = NumMyElements + NumMyExternalElements; FE_NumMyElements = 3; MyGlobalElements = new int[NumMyTotalElements]; MyGlobalElements[0] = 1; MyGlobalElements[1] = 2; MyGlobalElements[2] = 5; MyGlobalElements[3] = 0; MyGlobalElements[4] = 4; break; } // build Map corresponding to update Epetra_Map Map(-1,NumMyElements,MyGlobalElements,0,Comm); // vector containing coordinates BEFORE exchanging external nodes Epetra_Vector CoordX_noExt(Map); Epetra_Vector CoordY_noExt(Map); switch( MyPID ) { case 0: T.Shape(3,FE_NumMyElements); // fill x-coordinates CoordX_noExt[0] = 0.0; CoordX_noExt[1] = 1.0; CoordX_noExt[2] = 0.0; // fill y-coordinates CoordY_noExt[0] = 0.0; CoordY_noExt[1] = 1.0; CoordY_noExt[2] = 1.0; // fill connectivity T(0,0) = 0; T(0,1) = 4; T(0,2) = 3; T(1,0) = 0; T(1,1) = 1; T(1,2) = 4; T(2,0) = 4; T(2,1) = 1; T(2,2) = 5; break; case 1: T.Shape(3,FE_NumMyElements); // fill x-coordinates CoordX_noExt[0] = 1.0; CoordX_noExt[1] = 2.0; CoordX_noExt[2] = 2.0; // fill y-coordinates CoordY_noExt[0] = 0.0; CoordY_noExt[1] = 0.0; CoordY_noExt[2] = 1.0; // fill connectivity T(0,0) = 0; T(0,1) = 1; T(0,2) = 4; T(1,0) = 1; T(1,1) = 5; T(1,2) = 4; T(2,0) = 1; T(2,1) = 2; T(2,2) = 5; break; } // - - - - - - - - - - - - - - - - - - - - // // E X T E R N A L N O D E S S E T U P // // - - - - - - - - - - - - - - - - - - - - // // build target map to exchange the valus of external nodes Epetra_Map TargetMap(-1,NumMyTotalElements, MyGlobalElements, 0, Comm); // !@# rename Map -> SourceMap ????? Epetra_Import Importer(TargetMap,Map); Epetra_Vector CoordX(TargetMap); Epetra_Vector CoordY(TargetMap); CoordX.Import(CoordX_noExt,Importer,Insert); CoordY.Import(CoordY_noExt,Importer,Insert); // now CoordX_noExt and CoordY_noExt are no longer required // NOTE: better to construct CoordX and CoordY as MultiVector // - - - - - - - - - - - - // // M A T R I X S E T U P // // - - - - - - - - - - - - // // build the CRS matrix corresponding to the grid // some vectors are allocated const int MaxNnzRow = 5; Epetra_CrsMatrix A(Copy,Map,MaxNnzRow); int Element, MyRow, GlobalRow, GlobalCol, i, j, k; Epetra_IntSerialDenseMatrix Struct; // temp to create the matrix connectivity Struct.Shape(NumMyElements,MaxNnzRow); for( i=0 ; i<NumMyElements ; ++i ) for( j=0 ; j<MaxNnzRow ; ++j ) Struct(i,j) = -1; // cycle over all the finite elements for( Element=0 ; Element<FE_NumMyElements ; ++Element ) { // cycle over each row for( i=0 ; i<3 ; ++i ) { // get the global and local number of this row GlobalRow = T(Element,i); MyRow = A.LRID(GlobalRow); if( MyRow != -1 ) { // only rows stored on this proc // cycle over the columns for( j=0 ; j<3 ; ++j ) { // get the global number only of this column GlobalCol = T(Element,j); // look if GlobalCol was already put in Struct for( k=0 ; k<MaxNnzRow ; ++k ) { if( Struct(MyRow,k) == GlobalCol || Struct(MyRow,k) == -1 ) break; } if( Struct(MyRow,k) == -1 ) { // new entry Struct(MyRow,k) = GlobalCol; } else if( Struct(MyRow,k) != GlobalCol ) { // maybe not enough space has beenn allocated cerr << "ERROR: not enough space for element " << GlobalRow << "," << GlobalCol << endl; return( 0 ); } } } } } int * Indices = new int [MaxNnzRow]; double * Values = new double [MaxNnzRow]; for( i=0 ; i<MaxNnzRow ; ++i ) Values[i] = 0.0; // now use Struct to fill build the matrix structure for( int Row=0 ; Row<NumMyElements ; ++Row ) { int Length = 0; for( int j=0 ; j<MaxNnzRow ; ++j ) { if( Struct(Row,j) == -1 ) break; Indices[Length] = Struct(Row,j); Length++; } GlobalRow = MyGlobalElements[Row]; A.InsertGlobalValues(GlobalRow, Length, Values, Indices); } // replace global numbering with local one in T for( int Element=0 ; Element<FE_NumMyElements ; ++Element ) { for( int i=0 ; i<3 ; ++i ) { int global = T(Element,i); int local = find(MyGlobalElements,NumMyTotalElements, global); if( global == -1 ) { cerr << "ERROR\n"; return( EXIT_FAILURE ); } T(Element,i) = local; } } // - - - - - - - - - - - - - - // // M A T R I X F I L L - I N // // - - - - - - - - - - - - - - // // room for the local matrix Epetra_SerialDenseMatrix Ke; Ke.Shape(3,3); // now fill the matrix for( int Element=0 ; Element<FE_NumMyElements ; ++Element ) { // variables used inside int GlobalRow; int MyRow; int GlobalCol; double x_triangle[3]; double y_triangle[3]; // get the spatial coordinate of each local node for( int i=0 ; i<3 ; ++i ) { MyRow = T(Element,i); y_triangle[i] = CoordX[MyRow]; x_triangle[i] = CoordY[MyRow]; } // compute the local matrix for Element compute_loc_matrix( x_triangle, y_triangle,Ke ); // insert it in the global one // cycle over each row for( int i=0 ; i<3 ; ++i ) { // get the global and local number of this row MyRow = T(Element,i); if( MyRow < NumMyElements ) { for( int j=0 ; j<3 ; ++j ) { // get global column number GlobalRow = MyGlobalElements[MyRow]; GlobalCol = MyGlobalElements[T(Element,j)]; A.SumIntoGlobalValues(GlobalRow,1,&(Ke(i,j)),&GlobalCol); } } } } A.FillComplete(); // - - - - - - - - - - - - - // // R H S & S O L U T I O N // // - - - - - - - - - - - - - // Epetra_Vector x(Map), b(Map); x.Random(); b.PutScalar(0.0); // Solution can be obtained using Aztecoo // free memory before leaving delete MyGlobalElements; delete Indices; delete Values; #ifdef HAVE_MPI MPI_Finalize(); #endif return( EXIT_SUCCESS ); } /* main */
// ------------------------------------------------------------------------ // --------------------------- Main Program ----------------------------- // ------------------------------------------------------------------------ int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); bool verbose = false; // Check for verbose output if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 101; bool success = false; try { // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { std::cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << std::endl; throw "NOX Error"; } // Create the interface between NOX and the application // This object is derived from NOX::Epetra::Interface Teuchos::RCP<TransientInterface> interface = Teuchos::rcp(new TransientInterface(NumGlobalElements, Comm, -20.0, 20.0)); double dt = 0.10; interface->setdt(dt); // Set the PDE nonlinear coefficient for this problem interface->setPDEfactor(1.0); // Get the vector from the Problem Teuchos::RCP<Epetra_Vector> soln = interface->getSolution(); NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView); // Begin Nonlinear Solver ************************************ // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& nlParams = *(nlParamsPtr.get()); // Set the nonlinear solver method nlParams.set("Nonlinear Solver", "Line Search Based"); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList& printParams = nlParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); if (verbose) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::LinearSolverDetails + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::Debug + NOX::Utils::Error); else printParams.set("Output Information", NOX::Utils::Error); // Create a print class for controlling output below NOX::Utils utils(printParams); // Sublist for line search Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search"); searchParams.set("Method", "Full Step"); // Sublist for direction Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); dirParams.set("Method", "Newton"); Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton"); newtonParams.set("Forcing Term Method", "Constant"); // Sublist for linear solver for the Newton method Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 800); lsParams.set("Tolerance", 1e-4); lsParams.set("Preconditioner", "AztecOO"); // Create all possible Epetra_Operators. // 1. User supplied (Epetra_RowMatrix) Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian(); // 2. Matrix-Free (Epetra_Operator) // Four constructors to create the Linear System Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams, iReq, iJac, Analytic, noxSoln)); // Create the Group Teuchos::RCP<NOX::Epetra::Group> grpPtr = Teuchos::rcp(new NOX::Epetra::Group(printParams, iReq, noxSoln, linSys)); NOX::Epetra::Group& grp = *(grpPtr.get()); // Create the convergence tests Teuchos::RCP<NOX::StatusTest::NormF> absresid = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::NormF> relresid = Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2)); Teuchos::RCP<NOX::StatusTest::NormUpdate> update = Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5)); Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms = Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8)); Teuchos::RCP<NOX::StatusTest::Combo> converged = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND)); converged->addStatusTest(absresid); converged->addStatusTest(relresid); converged->addStatusTest(wrms); converged->addStatusTest(update); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(20)); Teuchos::RCP<NOX::StatusTest::FiniteValue> fv = Teuchos::rcp(new NOX::StatusTest::FiniteValue); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(fv); combo->addStatusTest(converged); combo->addStatusTest(maxiters); // Initialize time integration parameters int maxTimeSteps = 10; int timeStep = 0; double time = 0.0; #ifdef PRINT_RESULTS_TO_FILES // Print initial solution char file_name[25]; FILE *ifp; int NumMyElements = soln.Map().NumMyElements(); (void) sprintf(file_name, "output.%d_%d",MyPID,timeStep); ifp = fopen(file_name, "w"); for (int i=0; i<NumMyElements; i++) fprintf(ifp, "%d %E %E\n", soln.Map().MinMyGID()+i, interface->getMesh()[i], soln[i]); fclose(ifp); #endif // Create the solver Teuchos::RCP<NOX::Solver::Generic> solver = NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr); // Overall status flag int ierr = 0; // Time integration loop while(timeStep < maxTimeSteps) { timeStep++; time += dt; utils.out() << "Time Step: " << timeStep << ",\tTime: " << time << std::endl; NOX::StatusTest::StatusType status = solver->solve(); // Check for convergence if (status != NOX::StatusTest::Converged) { ierr++; if (utils.isPrintType(NOX::Utils::Error)) utils.out() << "Nonlinear solver failed to converge!" << std::endl; } // Get the Epetra_Vector with the final solution from the solver const NOX::Epetra::Group& finalGroup = dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup()); const Epetra_Vector& finalSolution = (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).getEpetraVector(); //Epetra_Vector& exactSolution = interface->getExactSoln(time); // End Nonlinear Solver ************************************** #ifdef PRINT_RESULTS_TO_FILES // Print solution (void) sprintf(file_name, "output.%d_%d",MyPID,timeStep); ifp = fopen(file_name, "w"); for (int i=0; i<NumMyElements; i++) fprintf(ifp, "%d %E %E %E\n", soln.Map().MinMyGID()+i, interface->getMesh()[i], finalSolution[i],exactSolution[i]); fclose(ifp); #endif interface->reset(finalSolution); grp.setX(finalSolution); solver->reset(grp.getX(), combo); grp.computeF(); } // end time step while loop // Output the parameter list if (utils.isPrintType(NOX::Utils::Parameters)) { utils.out() << std::endl << "Final Parameters" << std::endl << "****************" << std::endl; solver->getList().print(utils.out()); utils.out() << std::endl; } // Test for convergence #ifndef HAVE_MPI // 1. Linear solve iterations on final time step (30)- SERIAL TEST ONLY! // The number of linear iterations changes with # of procs. if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 30) { ierr = 1; } #endif // 2. Nonlinear solve iterations on final time step (3) if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 3) ierr = 2; success = ierr==0; // Summarize test results if (success) utils.out() << "Test passed!" << std::endl; else utils.out() << "Test failed!" << std::endl; } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); #ifdef HAVE_MPI MPI_Finalize(); #endif return ( success ? EXIT_SUCCESS : EXIT_FAILURE ); }
int main(int argc, char *argv[]) { int ierr = 0; // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Check verbosity level bool verbose = false; if (argc > 1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 101; bool success = false; try { // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { std::cout << "Error: numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << std::endl; throw; } // Create the FiniteElementProblem class. This creates all required // Epetra objects for the problem and allows calls to the // function (RHS) and Jacobian evaluation routines. FiniteElementProblem Problem(NumGlobalElements, Comm); // Get the vector from the Problem Teuchos::RCP<Epetra_Vector> soln = Problem.getSolution(); NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView); // Initialize Solution soln->PutScalar(1.0); // Begin Nonlinear Solver ************************************ // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& nlParams = *(nlParamsPtr.get()); // Set the nonlinear solver method nlParams.set("Nonlinear Solver", "Line Search Based"); //nlParams.set("Nonlinear Solver", "Trust Region Based"); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList& printParams = nlParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); if (verbose) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::LinearSolverDetails + NOX::Utils::InnerIteration + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning); else printParams.set("Output Information", NOX::Utils::Error + NOX::Utils::TestDetails); // Sublist for line search Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search"); searchParams.set("Method", "Full Step"); // Sublist for direction Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); dirParams.set("Method", "Newton"); Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton"); newtonParams.set("Forcing Term Method", "Constant"); // Sublist for linear solver for the Newton method Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 800); lsParams.set("Tolerance", 1e-4); lsParams.set("Output Frequency", 1); lsParams.set("Preconditioner", "Ifpack"); lsParams.set("Max Age Of Prec", 5); // Create the interface between the test problem and the nonlinear solver // This is created by the user using inheritance of the abstract base class: // NLS_PetraGroupInterface Teuchos::RCP<Problem_Interface> interface = Teuchos::rcp(new Problem_Interface(Problem)); const Epetra_CrsGraph & G=*Problem.getGraph(); // Create the Epetra_RowMatrix using Finite Difference with Coloring Teuchos::ParameterList isorParamList; // Teuchos::ParameterList& zoltanParamList = isorParamList.sublist("ZOLTAN"); // zoltanParamList.set("DISTANCE","2"); Isorropia::Epetra::Colorer isorColorer( (Teuchos::RCP<const Epetra_CrsGraph>) Problem.getGraph(), isorParamList, false); Teuchos::RCP<Epetra_MapColoring> colorMap = isorColorer.generateColMapColoring(); // Build the update coloring Teuchos::RCP<Epetra_CrsGraph> subgraph=ExtractSubGraph(Problem.getGraph(),Problem.NumLocalNonLinearUnknowns(),Problem.getNonLinearUnknowns()); Isorropia::Epetra::Colorer updateIsorColorer((Teuchos::RCP<const Epetra_CrsGraph>)subgraph, isorParamList, false); // Teuchos::RCP<Epetra_MapColoring> updateColorMap_limited=updateIsorColorer.generateColMapColoring(); Teuchos::RCP<Epetra_MapColoring> updateColorMap=updateIsorColorer.generateColMapColoring(); // Explictly recolor dummies to color zero int Nlid=Problem.NumLocalNonLinearUnknowns(); const int *lids=Problem.getNonLinearUnknowns(); Epetra_IntVector rIDX(G.RowMap()); for(int i=0;i<Nlid;i++) rIDX[lids[i]]=1; Epetra_IntVector *cIDX; if(G.RowMap().SameAs(G.ColMap())) cIDX=&rIDX; else{ cIDX=new Epetra_IntVector(G.ColMap(),true); cIDX->Import(rIDX,*G.Importer(),Insert); } for(int i=0;i<G.NumMyCols();i++) if((*cIDX)[i]==0) (*updateColorMap)[i]=0; if(!G.RowMap().SameAs(G.ColMap())) delete cIDX; int base_colors=colorMap->MaxNumColors(),update_colors=updateColorMap->MaxNumColors(); if(!MyPID) std::cout<<"First time colors = "<<base_colors<<" Update colors = "<<update_colors-1<<endl; // Use this constructor to create the graph numerically as a means of timing // the old way of looping without colors : // NOX::Epetra::FiniteDifferenceColoring A(interface, soln, // *colorMap, *columns); // Or use this as the standard way of using finite differencing with coloring // where the application is responsible for creating the matrix graph // beforehand, ie as is done in Problem. Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface; NOX::Epetra::Vector noxSoln2(noxSoln); Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = coloring_using_fdc(printParams,lsParams,interface,noxSoln,Problem,colorMap); char name_fdc[] = "fdc"; ierr=solve_system(name_fdc,Comm,printParams,nlParamsPtr,iReq,noxSoln,Problem,linSys,verbose); Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys2 = coloring_using_fdcwu(printParams,lsParams,interface,noxSoln2,Problem,colorMap,updateColorMap); char name_fdcwu[] = "fdcwu"; ierr=solve_system(name_fdcwu,Comm,printParams,nlParamsPtr,iReq,noxSoln2,Problem,linSys2,verbose); success = ierr==0; } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); #ifdef HAVE_MPI MPI_Finalize() ; #endif // Final return value (0 = successfull, non-zero = failure) return ( success ? EXIT_SUCCESS : EXIT_FAILURE ); }
int main(int argc, char *argv[]) { int ierr = 0; double elapsed_time; double total_flops; double MFLOPs; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm comm( MPI_COMM_WORLD ); #else Epetra_SerialComm comm; #endif bool verbose = false; bool summary = false; // Check if we should print verbose results to standard out if (argc>6) if (argv[6][0]=='-' && argv[6][1]=='v') verbose = true; // Check if we should print verbose results to standard out if (argc>6) if (argv[6][0]=='-' && argv[6][1]=='s') summary = true; if(argc < 6) { cerr << "Usage: " << argv[0] << " NumNodesX NumNodesY NumProcX NumProcY NumPoints [-v|-s]" << endl << "where:" << endl << "NumNodesX - Number of mesh nodes in X direction per processor" << endl << "NumNodesY - Number of mesh nodes in Y direction per processor" << endl << "NumProcX - Number of processors to use in X direction" << endl << "NumProcY - Number of processors to use in Y direction" << endl << "NumPoints - Number of points to use in stencil (5, 9 or 25 only)" << endl << "-v|-s - (Optional) Run in verbose mode if -v present or summary mode if -s present" << endl << " NOTES: NumProcX*NumProcY must equal the number of processors used to run the problem." << endl << endl << " Serial example:" << endl << argv[0] << " 16 12 1 1 25 -v" << endl << " Run this program in verbose mode on 1 processor using a 16 X 12 grid with a 25 point stencil."<< endl <<endl << " MPI example:" << endl << "mpirun -np 32 " << argv[0] << " 10 12 4 8 9 -v" << endl << " Run this program in verbose mode on 32 processors putting a 10 X 12 subgrid on each processor using 4 processors "<< endl << " in the X direction and 8 in the Y direction. Total grid size is 40 points in X and 96 in Y with a 9 point stencil."<< endl << endl; return(1); } //char tmp; //if (comm.MyPID()==0) cout << "Press any key to continue..."<< endl; //if (comm.MyPID()==0) cin >> tmp; //comm.Barrier(); comm.SetTracebackMode(0); // This should shut down any error traceback reporting if (verbose && comm.MyPID()==0) cout << Epetra_Version() << endl << endl; if (summary && comm.MyPID()==0) { if (comm.NumProc()==1) cout << Epetra_Version() << endl << endl; else cout << endl << endl; // Print two blank line to keep output columns lined up } if (verbose) cout << comm <<endl; // Redefine verbose to only print on PE 0 if (verbose && comm.MyPID()!=0) verbose = false; if (summary && comm.MyPID()!=0) summary = false; int numNodesX = atoi(argv[1]); int numNodesY = atoi(argv[2]); int numProcsX = atoi(argv[3]); int numProcsY = atoi(argv[4]); int numPoints = atoi(argv[5]); if (verbose || (summary && comm.NumProc()==1)) { cout << " Number of local nodes in X direction = " << numNodesX << endl << " Number of local nodes in Y direction = " << numNodesY << endl << " Number of global nodes in X direction = " << numNodesX*numProcsX << endl << " Number of global nodes in Y direction = " << numNodesY*numProcsY << endl << " Number of local nonzero entries = " << numNodesX*numNodesY*numPoints << endl << " Number of global nonzero entries = " << numNodesX*numNodesY*numPoints*numProcsX*numProcsY << endl << " Number of Processors in X direction = " << numProcsX << endl << " Number of Processors in Y direction = " << numProcsY << endl << " Number of Points in stencil = " << numPoints << endl << endl; } // Print blank line to keep output columns lined up if (summary && comm.NumProc()>1) cout << endl << endl << endl << endl << endl << endl << endl << endl<< endl << endl; if (numProcsX*numProcsY!=comm.NumProc()) { cerr << "Number of processors = " << comm.NumProc() << endl << " is not the product of " << numProcsX << " and " << numProcsY << endl << endl; return(1); } if (numPoints!=5 && numPoints!=9 && numPoints!=25) { cerr << "Number of points specified = " << numPoints << endl << " is not 5, 9, 25" << endl << endl; return(1); } if (numNodesX*numNodesY<=0) { cerr << "Product of number of nodes is <= zero" << endl << endl; return(1); } Epetra_IntSerialDenseVector Xoff, XLoff, XUoff; Epetra_IntSerialDenseVector Yoff, YLoff, YUoff; if (numPoints==5) { // Generate a 5-point 2D Finite Difference matrix Xoff.Size(5); Yoff.Size(5); Xoff[0] = -1; Xoff[1] = 1; Xoff[2] = 0; Xoff[3] = 0; Xoff[4] = 0; Yoff[0] = 0; Yoff[1] = 0; Yoff[2] = 0; Yoff[3] = -1; Yoff[4] = 1; // Generate a 2-point 2D Lower triangular Finite Difference matrix XLoff.Size(2); YLoff.Size(2); XLoff[0] = -1; XLoff[1] = 0; YLoff[0] = 0; YLoff[1] = -1; // Generate a 3-point 2D upper triangular Finite Difference matrix XUoff.Size(3); YUoff.Size(3); XUoff[0] = 0; XUoff[1] = 1; XUoff[2] = 0; YUoff[0] = 0; YUoff[1] = 0; YUoff[2] = 1; } else if (numPoints==9) { // Generate a 9-point 2D Finite Difference matrix Xoff.Size(9); Yoff.Size(9); Xoff[0] = -1; Xoff[1] = 0; Xoff[2] = 1; Yoff[0] = -1; Yoff[1] = -1; Yoff[2] = -1; Xoff[3] = -1; Xoff[4] = 0; Xoff[5] = 1; Yoff[3] = 0; Yoff[4] = 0; Yoff[5] = 0; Xoff[6] = -1; Xoff[7] = 0; Xoff[8] = 1; Yoff[6] = 1; Yoff[7] = 1; Yoff[8] = 1; // Generate a 5-point lower triangular 2D Finite Difference matrix XLoff.Size(5); YLoff.Size(5); XLoff[0] = -1; XLoff[1] = 0; Xoff[2] = 1; YLoff[0] = -1; YLoff[1] = -1; Yoff[2] = -1; XLoff[3] = -1; XLoff[4] = 0; YLoff[3] = 0; YLoff[4] = 0; // Generate a 4-point upper triangular 2D Finite Difference matrix XUoff.Size(4); YUoff.Size(4); XUoff[0] = 1; YUoff[0] = 0; XUoff[1] = -1; XUoff[2] = 0; XUoff[3] = 1; YUoff[1] = 1; YUoff[2] = 1; YUoff[3] = 1; } else { // Generate a 25-point 2D Finite Difference matrix Xoff.Size(25); Yoff.Size(25); int xi = 0, yi = 0; int xo = -2, yo = -2; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; xo = -2, yo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; xo = -2, yo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; xo = -2, yo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; xo = -2, yo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Xoff[xi++] = xo++; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; Yoff[yi++] = yo ; // Generate a 13-point lower triangular 2D Finite Difference matrix XLoff.Size(13); YLoff.Size(13); xi = 0, yi = 0; xo = -2, yo = -2; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; xo = -2, yo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; xo = -2, yo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; XLoff[xi++] = xo++; YLoff[yi++] = yo ; YLoff[yi++] = yo ; YLoff[yi++] = yo ; // Generate a 13-point upper triangular 2D Finite Difference matrix XUoff.Size(13); YUoff.Size(13); xi = 0, yi = 0; xo = 0, yo = 0; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; xo = -2, yo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; xo = -2, yo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; XUoff[xi++] = xo++; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; YUoff[yi++] = yo ; } Epetra_Map * map; Epetra_Map * mapL; Epetra_Map * mapU; Epetra_CrsMatrix * A; Epetra_CrsMatrix * L; Epetra_CrsMatrix * U; Epetra_MultiVector * b; Epetra_MultiVector * bt; Epetra_MultiVector * xexact; Epetra_MultiVector * bL; Epetra_MultiVector * btL; Epetra_MultiVector * xexactL; Epetra_MultiVector * bU; Epetra_MultiVector * btU; Epetra_MultiVector * xexactU; Epetra_SerialDenseVector resvec(0); //Timings Epetra_Flops flopcounter; Epetra_Time timer(comm); #ifdef EPETRA_VERY_SHORT_PERFTEST int jstop = 1; #elif EPETRA_SHORT_PERFTEST int jstop = 1; #else int jstop = 2; #endif for (int j=0; j<jstop; j++) { for (int k=1; k<17; k++) { #ifdef EPETRA_VERY_SHORT_PERFTEST if (k<3 || (k%4==0 && k<9)) { #elif EPETRA_SHORT_PERFTEST if (k<6 || k%4==0) { #else if (k<7 || k%2==0) { #endif int nrhs=k; if (verbose) cout << "\n*************** Results for " << nrhs << " RHS with "; bool StaticProfile = (j!=0); if (verbose) { if (StaticProfile) cout << " static profile\n"; else cout << " dynamic profile\n"; } GenerateCrsProblem(numNodesX, numNodesY, numProcsX, numProcsY, numPoints, Xoff.Values(), Yoff.Values(), nrhs, comm, verbose, summary, map, A, b, bt, xexact, StaticProfile, false); #ifdef EPETRA_HAVE_JADMATRIX timer.ResetStartTime(); Epetra_JadMatrix JA(*A); elapsed_time = timer.ElapsedTime(); if (verbose) cout << "Time to create Jagged diagonal matrix = " << elapsed_time << endl; //cout << "A = " << *A << endl; //cout << "JA = " << JA << endl; runJadMatrixTests(&JA, b, bt, xexact, StaticProfile, verbose, summary); #endif runMatrixTests(A, b, bt, xexact, StaticProfile, verbose, summary); delete A; delete b; delete bt; delete xexact; GenerateCrsProblem(numNodesX, numNodesY, numProcsX, numProcsY, XLoff.Length(), XLoff.Values(), YLoff.Values(), nrhs, comm, verbose, summary, mapL, L, bL, btL, xexactL, StaticProfile, true); GenerateCrsProblem(numNodesX, numNodesY, numProcsX, numProcsY, XUoff.Length(), XUoff.Values(), YUoff.Values(), nrhs, comm, verbose, summary, mapU, U, bU, btU, xexactU, StaticProfile, true); runLUMatrixTests(L, bL, btL, xexactL, U, bU, btU, xexactU, StaticProfile, verbose, summary); delete L; delete bL; delete btL; delete xexactL; delete mapL; delete U; delete bU; delete btU; delete xexactU; delete mapU; Epetra_MultiVector q(*map, nrhs); Epetra_MultiVector z(q); Epetra_MultiVector r(q); delete map; q.SetFlopCounter(flopcounter); z.SetFlopCounter(q); r.SetFlopCounter(q); resvec.Resize(nrhs); flopcounter.ResetFlops(); timer.ResetStartTime(); //10 norms for( int i = 0; i < 10; ++i ) q.Norm2( resvec.Values() ); elapsed_time = timer.ElapsedTime(); total_flops = q.Flops(); MFLOPs = total_flops/elapsed_time/1000000.0; if (verbose) cout << "\nTotal MFLOPs for 10 Norm2's= " << MFLOPs << endl; if (summary) { if (comm.NumProc()==1) cout << "Norm2" << '\t'; cout << MFLOPs << endl; } flopcounter.ResetFlops(); timer.ResetStartTime(); //10 dot's for( int i = 0; i < 10; ++i ) q.Dot(z, resvec.Values()); elapsed_time = timer.ElapsedTime(); total_flops = q.Flops(); MFLOPs = total_flops/elapsed_time/1000000.0; if (verbose) cout << "Total MFLOPs for 10 Dot's = " << MFLOPs << endl; if (summary) { if (comm.NumProc()==1) cout << "DotProd" << '\t'; cout << MFLOPs << endl; } flopcounter.ResetFlops(); timer.ResetStartTime(); //10 dot's for( int i = 0; i < 10; ++i ) q.Update(1.0, z, 1.0, r, 0.0); elapsed_time = timer.ElapsedTime(); total_flops = q.Flops(); MFLOPs = total_flops/elapsed_time/1000000.0; if (verbose) cout << "Total MFLOPs for 10 Updates= " << MFLOPs << endl; if (summary) { if (comm.NumProc()==1) cout << "Update" << '\t'; cout << MFLOPs << endl; } } } } #ifdef EPETRA_MPI MPI_Finalize() ; #endif return ierr ; } // Constructs a 2D PDE finite difference matrix using the list of x and y offsets. // // nx (In) - number of grid points in x direction // ny (In) - number of grid points in y direction // The total number of equations will be nx*ny ordered such that the x direction changes // most rapidly: // First equation is at point (0,0) // Second at (1,0) // ... // nx equation at (nx-1,0) // nx+1st equation at (0,1) // numPoints (In) - number of points in finite difference stencil // xoff (In) - stencil offsets in x direction (of length numPoints) // yoff (In) - stencil offsets in y direction (of length numPoints) // A standard 5-point finite difference stencil would be described as: // numPoints = 5 // xoff = [-1, 1, 0, 0, 0] // yoff = [ 0, 0, 0, -1, 1] // nrhs - Number of rhs to generate. (First interface produces vectors, so nrhs is not needed // comm (In) - an Epetra_Comm object describing the parallel machine (numProcs and my proc ID) // map (Out) - Epetra_Map describing distribution of matrix and vectors/multivectors // A (Out) - Epetra_CrsMatrix constructed for nx by ny grid using prescribed stencil // Off-diagonal values are random between 0 and 1. If diagonal is part of stencil, // diagonal will be slightly diag dominant. // b (Out) - Generated RHS. Values satisfy b = A*xexact // bt (Out) - Generated RHS. Values satisfy b = A'*xexact // xexact (Out) - Generated exact solution to Ax = b and b' = A'xexact // Note: Caller of this function is responsible for deleting all output objects. void GenerateCrsProblem(int numNodesX, int numNodesY, int numProcsX, int numProcsY, int numPoints, int * xoff, int * yoff, const Epetra_Comm &comm, bool verbose, bool summary, Epetra_Map *& map, Epetra_CrsMatrix *& A, Epetra_Vector *& b, Epetra_Vector *& bt, Epetra_Vector *&xexact, bool StaticProfile, bool MakeLocalOnly) { Epetra_MultiVector * b1, * bt1, * xexact1; GenerateCrsProblem(numNodesX, numNodesY, numProcsX, numProcsY, numPoints, xoff, yoff, 1, comm, verbose, summary, map, A, b1, bt1, xexact1, StaticProfile, MakeLocalOnly); b = dynamic_cast<Epetra_Vector *>(b1); bt = dynamic_cast<Epetra_Vector *>(bt1); xexact = dynamic_cast<Epetra_Vector *>(xexact1); return; } void GenerateCrsProblem(int numNodesX, int numNodesY, int numProcsX, int numProcsY, int numPoints, int * xoff, int * yoff, int nrhs, const Epetra_Comm &comm, bool verbose, bool summary, Epetra_Map *& map, Epetra_CrsMatrix *& A, Epetra_MultiVector *& b, Epetra_MultiVector *& bt, Epetra_MultiVector *&xexact, bool StaticProfile, bool MakeLocalOnly) { Epetra_Time timer(comm); // Determine my global IDs long long * myGlobalElements; GenerateMyGlobalElements(numNodesX, numNodesY, numProcsX, numProcsY, comm.MyPID(), myGlobalElements); int numMyEquations = numNodesX*numNodesY; map = new Epetra_Map((long long)-1, numMyEquations, myGlobalElements, 0, comm); // Create map with 2D block partitioning. delete [] myGlobalElements; long long numGlobalEquations = map->NumGlobalElements64(); int profile = 0; if (StaticProfile) profile = numPoints; #ifdef EPETRA_HAVE_STATICPROFILE if (MakeLocalOnly) A = new Epetra_CrsMatrix(Copy, *map, *map, profile, StaticProfile); // Construct matrix with rowmap=colmap else A = new Epetra_CrsMatrix(Copy, *map, profile, StaticProfile); // Construct matrix #else if (MakeLocalOnly) A = new Epetra_CrsMatrix(Copy, *map, *map, profile); // Construct matrix with rowmap=colmap else A = new Epetra_CrsMatrix(Copy, *map, profile); // Construct matrix #endif long long * indices = new long long[numPoints]; double * values = new double[numPoints]; double dnumPoints = (double) numPoints; int nx = numNodesX*numProcsX; for (int i=0; i<numMyEquations; i++) { long long rowID = map->GID64(i); int numIndices = 0; for (int j=0; j<numPoints; j++) { long long colID = rowID + xoff[j] + nx*yoff[j]; // Compute column ID based on stencil offsets if (colID>-1 && colID<numGlobalEquations) { indices[numIndices] = colID; double value = - ((double) rand())/ ((double) RAND_MAX); if (colID==rowID) values[numIndices++] = dnumPoints - value; // Make diagonal dominant else values[numIndices++] = value; } } //cout << "Building row " << rowID << endl; A->InsertGlobalValues(rowID, numIndices, values, indices); } delete [] indices; delete [] values; double insertTime = timer.ElapsedTime(); timer.ResetStartTime(); A->FillComplete(false); double fillCompleteTime = timer.ElapsedTime(); if (verbose) cout << "Time to insert matrix values = " << insertTime << endl << "Time to complete fill = " << fillCompleteTime << endl; if (summary) { if (comm.NumProc()==1) cout << "InsertTime" << '\t'; cout << insertTime << endl; if (comm.NumProc()==1) cout << "FillCompleteTime" << '\t'; cout << fillCompleteTime << endl; } if (nrhs<=1) { b = new Epetra_Vector(*map); bt = new Epetra_Vector(*map); xexact = new Epetra_Vector(*map); } else { b = new Epetra_MultiVector(*map, nrhs); bt = new Epetra_MultiVector(*map, nrhs); xexact = new Epetra_MultiVector(*map, nrhs); } xexact->Random(); // Fill xexact with random values A->Multiply(false, *xexact, *b); A->Multiply(true, *xexact, *bt); return; }
int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif bool verbose = false; if (argc > 1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); #ifdef HAVE_MPI int NumProc = Comm.NumProc(); #endif // Set up the printing utilities Teuchos::RCP<Teuchos::ParameterList> noxParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& noxParams = *(noxParamsPtr.get()); // Only print output if the "-v" flag is set on the command line Teuchos::ParameterList& printParams = noxParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 5); printParams.set("Output Processor", 0); if( verbose ) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::TestDetails); else printParams.set("Output Information", NOX::Utils::Error + NOX::Utils::TestDetails); NOX::Utils printing(printParams); // Identify the test problem if (printing.isPrintType(NOX::Utils::TestDetails)) printing.out() << "Starting epetra/NOX_Group/NOX_Group.exe" << endl; // Identify processor information #ifdef HAVE_MPI if (printing.isPrintType(NOX::Utils::TestDetails)) { printing.out() << "Parallel Run" << endl; printing.out() << "Number of processors = " << NumProc << endl; printing.out() << "Print Process = " << MyPID << endl; } Comm.Barrier(); if (printing.isPrintType(NOX::Utils::TestDetails)) printing.out() << "Process " << MyPID << " is alive!" << endl; Comm.Barrier(); #else if (printing.isPrintType(NOX::Utils::TestDetails)) printing.out() << "Serial Run" << endl; #endif // Return value int status = 0; // *** Insert Testing Here!!! *** if (status == 0) printing.out() << "Test passed!" << endl; else printing.out() << "Test failed!" << endl; #ifdef HAVE_MPI MPI_Finalize(); #endif // return 0 for a successful test return status; }
// ====================================================================== int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif verbose = (Comm.MyPID() == 0); int nx = 60; for (int i = 1 ; i < argc ; ++i) { if (strcmp(argv[i],"-s") == 0) { SymmetricGallery = true; Solver = AZ_cg; } if(strcmp(argv[i],"-n") == 0 && i+1 < argc) { i++; nx = atoi(argv[i]); } } // size of the global matrix. Teuchos::ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_CrsMatrix> A; if (SymmetricGallery) A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); else A = Teuchos::rcp( Galeri::CreateCrsMatrix("Recirc2D", &*Map, GaleriList) ); // coordinates Teuchos::RCP<Epetra_MultiVector> coord = Teuchos::rcp( Galeri::CreateCartesianCoordinates("2D",&*Map,GaleriList)); // test the preconditioner int TestPassed = true; int who = RUSAGE_SELF; struct rusage usage; //int ret; //ret = getrusage(who, &usage); struct timeval ru_utime; // struct timeval ru_stime; ru_utime = usage.ru_utime; // ================================== // // compare point and block relaxation // // ================================== // TestPassed = TestPassed && ComparePointAndBlock("Jacobi",A,10); if(verbose) printf(" Jacobi Finished \n"); //ret = getrusage(who, &usage); int sec = usage.ru_utime.tv_sec -ru_utime.tv_sec; int usec = usage.ru_utime.tv_usec -ru_utime.tv_usec; double tt = (double)sec + 1e-6*(double)usec; ru_utime = usage.ru_utime; if(verbose) printf(" Jacobi time %f \n",tt); TestPassed = TestPassed && ComparePointAndBlock("symmetric Gauss-Seidel",A,10); if(verbose) printf(" sGS finished \n"); //ret = getrusage(who, &usage); sec = usage.ru_utime.tv_sec -ru_utime.tv_sec; usec = usage.ru_utime.tv_usec -ru_utime.tv_usec; tt = (double)sec + 1e-6*(double)usec; ru_utime = usage.ru_utime; if(verbose) printf(" sGS time %f \n",tt); if (!SymmetricGallery) { TestPassed = TestPassed && ComparePointAndBlock("Gauss-Seidel",A,10); //ret = getrusage(who, &usage); sec = usage.ru_utime.tv_sec -ru_utime.tv_sec; usec = usage.ru_utime.tv_usec -ru_utime.tv_usec; tt = (double)sec + 1e-6*(double)usec; ru_utime = usage.ru_utime; if(verbose) printf(" GS time %f \n",tt); if(verbose) printf(" GS Finished \n"); } if (!TestPassed) { cout << "Test `Performance.exe' failed!" << endl; exit(EXIT_FAILURE); } #ifdef HAVE_MPI MPI_Finalize(); #endif cout << endl; cout << "Test `Performance.exe' passed!" << endl; cout << endl; return(EXIT_SUCCESS); }
#include "Thyra_VectorBase.hpp" using namespace std; TEUCHOS_UNIT_TEST(dimension, default) { int status = 0; // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif TEST_ASSERT(Comm.NumProc() == 1); ::Stratimikos::DefaultLinearSolverBuilder builder; Teuchos::RCP<Teuchos::ParameterList> p = Teuchos::rcp(new Teuchos::ParameterList); { p->set("Linear Solver Type", "AztecOO"); //p->set("Preconditioner Type", "Ifpack"); p->set("Preconditioner Type", "None"); Teuchos::ParameterList& az = p->sublist("Linear Solver Types").sublist("AztecOO"); az.sublist("Forward Solve").sublist("AztecOO Settings").set("Output Frequency", 1); az.sublist("VerboseObject").set("Verbosity Level", "high"); Teuchos::ParameterList& ip = p->sublist("Preconditioner Types").sublist("Ifpack"); ip.sublist("VerboseObject").set("Verbosity Level", "high"); }
int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif int status = 0; // Converged // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Check verbosity level bool verbose = false; if (argc > 1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 200; bool success = false; try { // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { std::cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << std::endl; std::cout << "Test failed!" << std::endl; throw "NOX Error"; } if (verbose) if (MyPID == 0) std::cout << "\n" << NOX::version() << std::endl; // Create the interface between NOX and the application // This object is derived from NOX::Epetra::Interface Teuchos::RCP<Interface> interface = Teuchos::rcp(new Interface(NumGlobalElements, Comm)); // Get the vector from the Problem Teuchos::RCP<Epetra_Vector> soln = interface->getSolution(); Teuchos::RCP<NOX::Epetra::Vector> noxSoln = Teuchos::rcp(new NOX::Epetra::Vector(soln, NOX::Epetra::Vector::CreateView)); // Set the PDE factor (for nonlinear forcing term). This could be specified // via user input. interface->setPDEfactor(1000.0); // Set the initial guess soln->PutScalar(1.0); // Begin Nonlinear Solver ************************************ // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& nlParams = *(nlParamsPtr.get()); // Set the nonlinear solver method nlParams.set("Nonlinear Solver", "Anderson Accelerated Fixed-Point"); nlParams.sublist("Anderson Parameters").set("Storage Depth", 2); nlParams.sublist("Anderson Parameters").set("Mixing Parameter", -1.0); nlParams.sublist("Anderson Parameters").sublist("Preconditioning").set("Precondition", true); nlParams.sublist("Anderson Parameters").sublist("Preconditioning").set("Recompute Jacobian", true); //nlParams.set("Nonlinear Solver", "Line Search Based"); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList& printParams = nlParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); if (verbose) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::LinearSolverDetails + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::Debug + NOX::Utils::TestDetails + NOX::Utils::Error); else printParams.set("Output Information", NOX::Utils::Error + NOX::Utils::TestDetails); // Create a print class for controlling output below NOX::Utils printing(printParams); Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); dirParams.set("Method", "Newton"); Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton"); newtonParams.set("Forcing Term Method", "Constant"); // Sublist for linear solver for the Newton method Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 800); lsParams.set("Tolerance", 1e-4); // This is an ML Preconditioner test. If ML was not enabled, // // just exit with success #ifndef HAVE_NOX_ML_EPETRA printing.out() << "NOX not compiled with ML, exiting test!" << std::endl; printing.out() << "Test passed!" << std::endl; #ifdef HAVE_MPI MPI_Finalize(); #endif return EXIT_SUCCESS; #endif // Various Preconditioner options #ifdef HAVE_NOX_ML_EPETRA // Comment out the previous Preconditioner spec and uncomment this line // to turn on ML lsParams.set("Preconditioner", "ML"); lsParams.set("Preconditioner Operator", "Use Jacobian"); // Create a parameter list for ML options Teuchos::ParameterList MLList; #endif #ifdef HAVE_NOX_ML_EPETRA if( lsParams.get("Preconditioner", "None") == "ML" ) { // This Teuchos parameter list is needed for ML // These specifications come straight from the example in // Trilinos/packages/ml/example/ml_example_epetra_preconditioner.cpp // set defaults for classic smoothed aggregation /*ML_Epetra::SetDefaults("SA",MLList); // maximum number of levels MLList.set("max levels",5); MLList.set("increasing or decreasing","decreasing"); // use Uncoupled scheme to create the aggregate, // from level 3 use the better but more expensive MIS MLList.set("aggregation: type", "Uncoupled"); MLList.set("aggregation: type (level 3)", "MIS"); // smoother is Gauss-Seidel. Example file // ml_example_epetra_preconditioner_2level.cpp shows how to use // AZTEC's preconditioners as smoothers MLList.set("smoother: type","Gauss-Seidel"); // use both pre and post smoothing MLList.set("smoother: pre or post", "both"); // solve with serial direct solver KLU MLList.set("coarse: type","Jacobi"); // Set ML output verbosity if( verbose ) MLList.set("output", 10); else MLList.set("output", 0);*/ MLList.set("PDE equations", 1); MLList.set("coarse: max size", 2000); MLList.set("coarse: type", "Amesos-KLU"); //MLList.set("ML output", 10); lsParams.set("ML", MLList); } #endif // Add a user defined pre/post operator object Teuchos::RCP<NOX::Abstract::PrePostOperator> ppo = Teuchos::rcp(new UserPrePostOperator(printing)); nlParams.sublist("Solver Options").set("User Defined Pre/Post Operator", ppo); // Let's force all status tests to do a full check nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete"); // Create all possible Epetra_Operators. // 1. User supplied (Epetra_RowMatrix) Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian(); // Create the linear system Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; Teuchos::RCP<NOX::Epetra::Interface::Preconditioner> iPrec = interface; Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams, iReq, iJac, Analytic, //iPrec, Analytic, *soln)); // Create the Group NOX::Epetra::Vector initialGuess(soln, NOX::Epetra::Vector::CreateView); Teuchos::RCP<NOX::Epetra::Group> grpPtr = Teuchos::rcp(new NOX::Epetra::Group(printParams, iReq, initialGuess, linSys)); NOX::Epetra::Group& grp = *grpPtr; // Create the convergence tests Teuchos::RCP<NOX::StatusTest::NormF> absresid = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::NormF> relresid = Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2)); Teuchos::RCP<NOX::StatusTest::NormUpdate> update = Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5)); Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms = Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8)); Teuchos::RCP<NOX::StatusTest::Combo> converged = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND)); converged->addStatusTest(absresid); converged->addStatusTest(relresid); converged->addStatusTest(wrms); converged->addStatusTest(update); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(100)); Teuchos::RCP<NOX::StatusTest::FiniteValue> fv = Teuchos::rcp(new NOX::StatusTest::FiniteValue); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(fv); combo->addStatusTest(converged); combo->addStatusTest(maxiters); // Create the solver Teuchos::RCP<NOX::Solver::Generic> solver = NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr); NOX::StatusTest::StatusType solvStatus = solver->solve(); // End Nonlinear Solver ************************************** // Get the Epetra_Vector with the final solution from the solver const NOX::Epetra::Group& finalGroup = dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup()); const Epetra_Vector& finalSolution = (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())). getEpetraVector(); // Output the parameter list if (verbose) { if (printing.isPrintType(NOX::Utils::Parameters)) { printing.out() << std::endl << "Final Parameters" << std::endl << "****************" << std::endl; solver->getList().print(printing.out()); printing.out() << std::endl; } } // Print solution char file_name[25]; FILE *ifp; int NumMyElements = soln->Map().NumMyElements(); (void) sprintf(file_name, "output.%d",MyPID); ifp = fopen(file_name, "w"); for (int i=0; i<NumMyElements; i++) fprintf(ifp, "%d %E\n", soln->Map().MinMyGID()+i, finalSolution[i]); fclose(ifp); // Tests // 1. Convergence if (solvStatus != NOX::StatusTest::Converged) { status = 1; if (printing.isPrintType(NOX::Utils::Error)) printing.out() << "Nonlinear solver failed to converge!" << std::endl; } // 2. Nonlinear solve iterations (10) if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 11) status = 2; success = status==0; // Summarize test results if (success) printing.out() << "Test passed!" << std::endl; else printing.out() << "Test failed!" << std::endl; printing.out() << "Status = " << status << std::endl; } TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success); #ifdef HAVE_MPI MPI_Finalize(); #endif return ( success ? EXIT_SUCCESS : EXIT_FAILURE ); }
int main(int argc, char *argv[]) { // initialize MPI and Epetra communicator #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif Teuchos::ParameterList GaleriList; // The problem is defined on a 2D grid, global size is nx * nx. int nx = 30; GaleriList.set("nx", nx); GaleriList.set("ny", nx * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) ); Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) ); // =============================================================== // // B E G I N N I N G O F I F P A C K C O N S T R U C T I O N // // =============================================================== // Teuchos::ParameterList List; // builds an Ifpack_AdditiveSchwarz. This is templated with // the local solvers, in this case Ifpack_ICT. Note that any // other Ifpack_Preconditioner-derived class can be used // instead of Ifpack_ICT. // In this example the overlap is zero. Use // Prec(A,OverlapLevel) for the general case. Ifpack_AdditiveSchwarz<Ifpack_ICT> Prec(&*A); // `1.0' means that the factorization should approximatively // keep the same number of nonzeros per row of the original matrix. List.set("fact: ict level-of-fill", 1.0); // no modifications on the diagonal List.set("fact: absolute threshold", 0.0); List.set("fact: relative threshold", 1.0); List.set("fact: relaxation value", 0.0); // matrix `laplace_2d_bc' is not symmetric because of the way // boundary conditions are imposed. We can filter the singletons, // (that is, Dirichlet nodes) and end up with a symmetric // matrix (as ICT requires). List.set("schwarz: filter singletons", true); // sets the parameters IFPACK_CHK_ERR(Prec.SetParameters(List)); // initialize the preconditioner. At this point the matrix must // have been FillComplete()'d, but actual values are ignored. IFPACK_CHK_ERR(Prec.Initialize()); // Builds the preconditioners, by looking for the values of // the matrix. IFPACK_CHK_ERR(Prec.Compute()); // =================================================== // // E N D O F I F P A C K C O N S T R U C T I O N // // =================================================== // // At this point, we need some additional objects // to define and solve the linear system. // defines LHS and RHS Epetra_Vector LHS(A->OperatorDomainMap()); Epetra_Vector RHS(A->OperatorDomainMap()); LHS.PutScalar(0.0); RHS.Random(); // need an Epetra_LinearProblem to define AztecOO solver Epetra_LinearProblem Problem(&*A,&LHS,&RHS); // now we can allocate the AztecOO solver AztecOO Solver(Problem); // specify solver Solver.SetAztecOption(AZ_solver,AZ_cg_condnum); Solver.SetAztecOption(AZ_output,32); // HERE WE SET THE IFPACK PRECONDITIONER Solver.SetPrecOperator(&Prec); // .. and here we solve // NOTE: with one process, the solver must converge in // one iteration. Solver.Iterate(1550,1e-5); // Prints out some information about the preconditioner std::cout << Prec; #ifdef HAVE_MPI MPI_Finalize(); #endif return (EXIT_SUCCESS); }
int main(int argc, char *argv[]) { int ierr = 0; double nonlinear_factor = 1.0; double left_bc = 0.0; double right_bc = 2.07; // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Get the number of elements from the command line int NumGlobalElements = 100 + 1; // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { std::cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << std::endl; exit(1); } // Create the FiniteElementProblem class. This creates all required // Epetra objects for the problem and allows calls to the // function (RHS) and Jacobian evaluation routines. FiniteElementProblem Problem(NumGlobalElements, Comm); // Get the vector from the Problem Epetra_Vector& soln = Problem.getSolution(); // Initialize Solution soln.PutScalar(1.0); // Create initial guess for the null vector of jacobian Teuchos::RCP<NOX::Abstract::Vector> nullVec = Teuchos::rcp(new NOX::Epetra::Vector(soln)); nullVec->init(1.0); // initial value 1.0 // Begin LOCA Solver ************************************ // Create parameter list Teuchos::RCP<Teuchos::ParameterList> paramList = Teuchos::rcp(new Teuchos::ParameterList); // Create LOCA sublist Teuchos::ParameterList& locaParamsList = paramList->sublist("LOCA"); // Create the stepper sublist and set the stepper parameters Teuchos::ParameterList& locaStepperList = locaParamsList.sublist("Stepper"); locaStepperList.set("Continuation Method", "Arc Length"); locaStepperList.set("Bordered Solver Method", "Nested"); //locaStepperList.set("Bordered Solver Method", "Householder"); locaStepperList.set("Continuation Parameter", "Nonlinear Factor"); locaStepperList.set("Initial Value", nonlinear_factor); locaStepperList.set("Max Value", 2.0); locaStepperList.set("Min Value", 0.05); locaStepperList.set("Max Steps", 20); locaStepperList.set("Max Nonlinear Iterations", 15); Teuchos::ParameterList& nestedList = locaStepperList.sublist("Nested Bordered Solver"); nestedList.set("Bordered Solver Method", "Householder"); nestedList.set("Include UV In Preconditioner", true); //nestedList.set("Use P For Preconditioner", true); nestedList.set("Preconditioner Method", "SMW"); // Create bifurcation sublist Teuchos::ParameterList& bifurcationList = locaParamsList.sublist("Bifurcation"); bifurcationList.set("Type", "Turning Point"); bifurcationList.set("Bifurcation Parameter", "Right BC"); bifurcationList.set("Formulation", "Minimally Augmented"); bifurcationList.set("Symmetric Jacobian", false); bifurcationList.set("Update Null Vectors Every Continuation Step", true); bifurcationList.set("Update Null Vectors Every Nonlinear Iteration", false); //bifurcationList.set("Transpose Solver Method","Transpose Preconditioner"); bifurcationList.set("Transpose Solver Method","Explicit Transpose"); //bifurcationList.set("Transpose Solver Method","Left Preconditioning"); //bifurcationList.set("Initial Null Vector Computation", "Solve df/dp"); bifurcationList.set("Initial A Vector", nullVec); bifurcationList.set("Initial B Vector", nullVec); bifurcationList.set("Bordered Solver Method", "Householder"); bifurcationList.set("Include UV In Preconditioner", true); //bifurcationList.set("Use P For Preconditioner", true); bifurcationList.set("Preconditioner Method", "SMW"); //bifurcationList.set("Formulation", "Moore-Spence"); //bifurcationList.set("Solver Method", "Phipps Bordering"); //bifurcationList.set("Solver Method", "Salinger Bordering"); //bifurcationList.set("Initial Null Vector", nullVec); //bifurcationList.set("Length Normalization Vector", nullVec); // Create predictor sublist Teuchos::ParameterList& predictorList = locaParamsList.sublist("Predictor"); predictorList.set("Method", "Secant"); // Create step size sublist Teuchos::ParameterList& stepSizeList = locaParamsList.sublist("Step Size"); stepSizeList.set("Method", "Adaptive"); stepSizeList.set("Initial Step Size", 0.1); stepSizeList.set("Min Step Size", 1.0e-3); stepSizeList.set("Max Step Size", 2000.0); stepSizeList.set("Aggressiveness", 0.1); // Create the "Solver" parameters sublist to be used with NOX Solvers Teuchos::ParameterList& nlParams = paramList->sublist("NOX"); // Create the NOX printing parameter list Teuchos::ParameterList& nlPrintParams = nlParams.sublist("Printing"); nlPrintParams.set("MyPID", MyPID); nlPrintParams.set("Output Precision", 6); nlPrintParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::Details + NOX::Utils::LinearSolverDetails + NOX::Utils::Warning + NOX::Utils::StepperIteration + NOX::Utils::StepperDetails + NOX::Utils::StepperParameters); // Create the "Linear Solver" sublist for Newton's method Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); Teuchos::ParameterList& newParams = dirParams.sublist("Newton"); Teuchos::ParameterList& lsParams = newParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 200); lsParams.set("Tolerance", 1e-6); lsParams.set("Output Frequency", 50); //lsParams.set("Scaling", "None"); //lsParams.set("Scaling", "Row Sum"); lsParams.set("Compute Scaling Manually", false); lsParams.set("Preconditioner", "Ifpack"); lsParams.set("Ifpack Preconditioner", "ILU"); //lsParams.set("Preconditioner", "New Ifpack"); //Teuchos::ParameterList& ifpackParams = lsParams.sublist("Ifpack"); //ifpackParams.set("fact: level-of-fill", 1); // Create and initialize the parameter vector LOCA::ParameterVector pVector; pVector.addParameter("Nonlinear Factor",nonlinear_factor); pVector.addParameter("Left BC", left_bc); pVector.addParameter("Right BC", right_bc); // Create the interface between the test problem and the nonlinear solver // This is created by the user using inheritance of the abstract base class: Teuchos::RCP<Problem_Interface> interface = Teuchos::rcp(new Problem_Interface(Problem)); Teuchos::RCP<LOCA::Epetra::Interface::TimeDependent> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; // Create the Epetra_RowMatrixfor the Jacobian/Preconditioner Teuchos::RCP<Epetra_RowMatrix> Amat = Teuchos::rcp(&Problem.getJacobian(),false); // Create scaling object Teuchos::RCP<NOX::Epetra::Scaling> scaling = Teuchos::null; // scaling = Teuchos::rcp(new NOX::Epetra::Scaling); // Teuchos::RCP<Epetra_Vector> scalingVector = // Teuchos::rcp(new Epetra_Vector(soln.Map())); // //scaling->addRowSumScaling(NOX::Epetra::Scaling::Left, scalingVector); // scaling->addColSumScaling(NOX::Epetra::Scaling::Right, scalingVector); // Create transpose scaling object Teuchos::RCP<NOX::Epetra::Scaling> trans_scaling = Teuchos::null; // trans_scaling = Teuchos::rcp(new NOX::Epetra::Scaling); // Teuchos::RCP<Epetra_Vector> transScalingVector = // Teuchos::rcp(new Epetra_Vector(soln.Map())); // trans_scaling->addRowSumScaling(NOX::Epetra::Scaling::Right, // transScalingVector); // trans_scaling->addColSumScaling(NOX::Epetra::Scaling::Left, // transScalingVector); //bifurcationList.set("Transpose Scaling", trans_scaling); // Create the linear systems Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linsys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(nlPrintParams, lsParams, iReq, iJac, Amat, soln, scaling)); // Create the loca vector NOX::Epetra::Vector locaSoln(soln); // Create Epetra factory Teuchos::RCP<LOCA::Abstract::Factory> epetraFactory = Teuchos::rcp(new LOCA::Epetra::Factory); // Create global data object Teuchos::RCP<LOCA::GlobalData> globalData = LOCA::createGlobalData(paramList, epetraFactory); // Create the Group Teuchos::RCP<LOCA::Epetra::Group> grp = Teuchos::rcp(new LOCA::Epetra::Group(globalData, nlPrintParams, iReq, locaSoln, linsys, linsys, pVector)); grp->computeF(); // Create the Solver convergence test //NOX::StatusTest::NormWRMS wrms(1.0e-2, 1.0e-8); Teuchos::RCP<NOX::StatusTest::NormF> wrms = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-12)); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(locaStepperList.get("Max Nonlinear Iterations", 10))); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(wrms); combo->addStatusTest(maxiters); // Create the stepper LOCA::Stepper stepper(globalData, grp, combo, paramList); LOCA::Abstract::Iterator::IteratorStatus status = stepper.run(); if (status == LOCA::Abstract::Iterator::Finished) globalData->locaUtils->out() << "All tests passed" << std::endl; else { if (globalData->locaUtils->isPrintType(NOX::Utils::Error)) globalData->locaUtils->out() << "Stepper failed to converge!" << std::endl; } // Output the parameter list if (globalData->locaUtils->isPrintType(NOX::Utils::StepperParameters)) { globalData->locaUtils->out() << std::endl << "Final Parameters" << std::endl << "****************" << std::endl; stepper.getList()->print(globalData->locaUtils->out()); globalData->locaUtils->out() << std::endl; } LOCA::destroyGlobalData(globalData); #ifdef HAVE_MPI MPI_Finalize() ; #endif /* end main */ return ierr ; }
int main(int argc, char *argv[]) { #ifdef EPETRA_MPI // Initialize MPI MPI_Init( &argc, &argv ); //int size, rank; // Number of MPI processes, My process ID //MPI_Comm_size(MPI_COMM_WORLD, &size); //MPI_Comm_rank(MPI_COMM_WORLD, &rank); #else //int size = 1; // Serial case (not using MPI) //int rank = 0; #endif bool verbose = false; int nx = 5; int ny = 5; if( argc > 1 ) { if( argc > 4 ) { cout << "Usage: " << argv[0] << " [-v [nx [ny]]]" << endl; exit(1); } int loc = 1; // Check if we should print results to standard out if(argv[loc][0]=='-' && argv[loc][1]=='v') { verbose = true; ++loc; } if (loc < argc) nx = atoi( argv[loc++] ); if( loc < argc) ny = atoi( argv[loc] ); } #ifdef EPETRA_MPI Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); bool verbose1 = false; if(verbose) verbose1 = (MyPID==0); if(verbose1) cout << EpetraExt::EpetraExt_Version() << endl << endl; Comm.Barrier(); if(verbose) cout << Comm << endl << flush; Comm.Barrier(); int NumGlobalElements = nx * ny; if( NumGlobalElements < NumProc ) { cout << "NumGlobalElements = " << NumGlobalElements << " cannot be < number of processors = " << NumProc; exit(1); } int IndexBase = 0; Epetra_Map Map( NumGlobalElements, IndexBase, Comm ); // Extract the global indices of the elements local to this processor int NumMyElements = Map.NumMyElements(); std::vector<int> MyGlobalElements( NumMyElements ); Map.MyGlobalElements( &MyGlobalElements[0] ); if( verbose ) cout << Map; // Create the number of non-zeros for a tridiagonal (1D problem) or banded // (2D problem) matrix std::vector<int> NumNz( NumMyElements, 5 ); int global_i; int global_j; for (int i = 0; i < NumMyElements; ++i) { global_j = MyGlobalElements[i] / nx; global_i = MyGlobalElements[i] - global_j * nx; if (global_i == 0) NumNz[i] -= 1; // By having separate statements, if (global_i == nx-1) NumNz[i] -= 1; // this works for 2D as well as 1D if (global_j == 0) NumNz[i] -= 1; // systems (i.e. nx x 1 or 1 x ny) if (global_j == ny-1) NumNz[i] -= 1; // or even a 1 x 1 system } if(verbose) { cout << endl << "NumNz: "; for (int i = 0; i < NumMyElements; i++) cout << NumNz[i] << " "; cout << endl; } // end if // Create the Epetra Compressed Row Sparse Graph Epetra_CrsGraph A( Copy, Map, &NumNz[0] ); std::vector<int> Indices(5); int NumEntries; for (int i = 0; i < NumMyElements; ++i ) { global_j = MyGlobalElements[i] / nx; global_i = MyGlobalElements[i] - global_j * nx; NumEntries = 0; // (i,j-1) entry if (global_j > 0 && ny > 1) Indices[NumEntries++] = global_i + (global_j-1)*nx; // (i-1,j) entry if (global_i > 0) Indices[NumEntries++] = global_i-1 + global_j *nx; // (i,j) entry Indices[NumEntries++] = MyGlobalElements[i]; // (i+1,j) entry if (global_i < nx-1) Indices[NumEntries++] = global_i+1 + global_j *nx; // (i,j+1) entry if (global_j < ny-1 && ny > 1) Indices[NumEntries++] = global_i + (global_j+1)*nx; // Insert the global indices A.InsertGlobalIndices( MyGlobalElements[i], NumEntries, &Indices[0] ); } // end i loop // Finish up graph construction A.FillComplete(); EpetraExt::CrsGraph_MapColoring Greedy0MapColoringTransform( EpetraExt::CrsGraph_MapColoring::GREEDY, 0, false, verbose ); Epetra_MapColoring & Greedy0ColorMap = Greedy0MapColoringTransform( A ); printColoring(Greedy0ColorMap, &A,verbose); EpetraExt::CrsGraph_MapColoring Greedy1MapColoringTransform( EpetraExt::CrsGraph_MapColoring::GREEDY, 1, false, verbose ); Epetra_MapColoring & Greedy1ColorMap = Greedy1MapColoringTransform( A ); printColoring(Greedy1ColorMap, &A,verbose); EpetraExt::CrsGraph_MapColoring Greedy2MapColoringTransform( EpetraExt::CrsGraph_MapColoring::GREEDY, 2, false, verbose ); Epetra_MapColoring & Greedy2ColorMap = Greedy2MapColoringTransform( A ); printColoring(Greedy2ColorMap, &A,verbose); EpetraExt::CrsGraph_MapColoring Lubi0MapColoringTransform( EpetraExt::CrsGraph_MapColoring::LUBY, 0, false, verbose ); Epetra_MapColoring & Lubi0ColorMap = Lubi0MapColoringTransform( A ); printColoring(Lubi0ColorMap, &A,verbose); EpetraExt::CrsGraph_MapColoring Lubi1MapColoringTransform( EpetraExt::CrsGraph_MapColoring::LUBY, 1, false, verbose ); Epetra_MapColoring & Lubi1ColorMap = Lubi1MapColoringTransform( A ); printColoring(Lubi1ColorMap, &A,verbose); EpetraExt::CrsGraph_MapColoring Lubi2MapColoringTransform( EpetraExt::CrsGraph_MapColoring::LUBY, 2, false, verbose ); Epetra_MapColoring & Lubi2ColorMap = Lubi2MapColoringTransform( A ); printColoring(Lubi2ColorMap, &A,verbose); #ifdef EPETRA_MPI if( verbose ) cout << "Parallel Map Coloring 1!\n"; EpetraExt::CrsGraph_MapColoring Parallel1MapColoringTransform( EpetraExt::CrsGraph_MapColoring::PSEUDO_PARALLEL, 0, false, verbose ); Epetra_MapColoring & Parallel1ColorMap = Parallel1MapColoringTransform( A ); printColoring(Parallel1ColorMap, &A,verbose); if( verbose ) cout << "Parallel Map Coloring 2!\n"; EpetraExt::CrsGraph_MapColoring Parallel2MapColoringTransform( EpetraExt::CrsGraph_MapColoring::JONES_PLASSMAN, 0, false, verbose ); Epetra_MapColoring & Parallel2ColorMap = Parallel2MapColoringTransform( A ); printColoring(Parallel2ColorMap, &A,verbose); #endif #ifdef EPETRA_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif // Creates the linear problem using the Galeri package. int nx; if (argc > 1) nx = (int) strtol(argv[1],NULL,10); else nx = 8; int ny = nx * Comm.NumProc(); // each subdomain is a square ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", ny); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList); Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList); // Build a linear system with trivial solution, using a random vector // as starting solution. Epetra_Vector LHS(*Map); LHS.Random(); Epetra_Vector RHS(*Map); RHS.PutScalar(0.0); Epetra_LinearProblem Problem(A, &LHS, &RHS); // As we wish to use AztecOO, we need to construct a solver object // for this problem AztecOO solver(Problem); // =========================== begin of ML part =========================== // create a parameter list for ML options ParameterList MLList; // Sets default parameters for classic smoothed aggregation. After this // call, MLList contains the default values for the ML parameters, // as required by typical smoothed aggregation for symmetric systems. // Other sets of parameters are available for non-symmetric systems // ("DD" and "DD-ML"), and for the Maxwell equations ("maxwell"). ML_Epetra::SetDefaults("SA",MLList); // overwrite some parameters. Please refer to the user's guide // for more information // some of the parameters do not differ from their default value, // and they are here reported for the sake of clarity // output level, 0 being silent and 10 verbose MLList.set("output", 10); // maximum number of levels MLList.set("max levels",5); // set finest level to 0 MLList.set("increasing or decreasing","increasing"); // use Uncoupled scheme to create the aggregate MLList.set("aggregation: type", "Uncoupled"); // smoother is symmetric Gauss-Seidel. Example file // `ml/examples/TwoLevelDD/ml_2level_DD.cpp' shows how to use // AZTEC's preconditioners as smoothers MLList.set("smoother: type","symmetric Gauss-Seidel"); // use both pre and post smoothing MLList.set("smoother: pre or post", "both"); // solve with serial direct solver KLU MLList.set("coarse: type","Amesos-KLU"); // Creates the preconditioning object. We suggest to use `new' and // `delete' because the destructor contains some calls to MPI (as // required by ML and possibly Amesos). This is an issue only if the // destructor is called **after** MPI_Finalize(). ML_Epetra::MultiLevelPreconditioner* MLPrec = new ML_Epetra::MultiLevelPreconditioner(*A, MLList); // =========================== end of ML part ============================= // tell AztecOO to use the ML preconditioner, specify the solver // and the output, then solve with 500 maximum iterations and 1e-12 // of tolerance (see AztecOO's user guide for more details) solver.SetPrecOperator(MLPrec); solver.SetAztecOption(AZ_solver, AZ_gmres); solver.SetAztecOption(AZ_output, 32); solver.Iterate(500, 1e-12); // destroy the preconditioner delete MLPrec; // compute the real residual double residual; LHS.Norm2(&residual); if( Comm.MyPID()==0 ) { cout << "||b-Ax||_2 = " << residual << endl; } // for testing purposes if (residual > 1e-5) exit(EXIT_FAILURE); delete A; delete Map; #ifdef HAVE_MPI MPI_Finalize(); #endif return(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { #ifdef HAVE_MPI MPI_Init(&argc, &argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif int NumGlobalRows = 10000; // must be a square for the // matrix generator. int NumVectors = 1; // number of rhs's. Amesos // supports single or // multiple RHS. // Initializes an Gallery object. // NOTE: this example uses the Trilinos package Galeri // to define in an easy way the linear system matrix. // The user can easily change the matrix type; consult the // Galeri documentation for mode details. // // Here the problem has size nx x ny, and the 2D Cartesian // grid is divided into mx x my subdomains. ParameterList GaleriList; GaleriList.set("nx", 100); GaleriList.set("ny", 100 * Comm.NumProc()); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList); Epetra_CrsMatrix* Matrix = CreateCrsMatrix("Laplace2D", Map, GaleriList); // Creates vectors for right-hand side and solution, and the // linear problem container. Epetra_Vector LHS(*Map); LHS.PutScalar(0.0); // zero solution Epetra_Vector RHS(*Map); RHS.Random(); // random rhs Epetra_LinearProblem Problem(Matrix, &LHS, &RHS); // ===================================================== // // B E G I N N I N G O F T H E AM E S O S P A R T // // ===================================================== // // Initializes the Amesos solver. This is the base class for // Amesos. It is a pure virtual class (hence objects of this // class cannot be allocated, and can exist only as pointers // or references). // Amesos_BaseSolver* Solver; // Initializes the Factory. Factory is a function class (a // class that contains methods only, no data). Factory // will be used to create Amesos_BaseSolver derived objects. // Amesos Factory; Solver = Factory.Create("Klu", Problem); // Parameters for all Amesos solvers are set through // a call to SetParameters(List). List is a Teuchos // parameter list (Amesos requires Teuchos to compile). // In most cases, users can proceed without calling // SetParameters(). Please refer to the Amesos guide // for more details. // NOTE: you can skip this call; then the solver will // use default parameters. // // Parameters in the list are set using // List.set("parameter-name", ParameterValue); // In this example, we specify that we want more output. // Teuchos::ParameterList List; List.set("PrintTiming", true); List.set("PrintStatus", true); Solver->SetParameters(List); // Now we are ready to solve. Generally, users will // call SymbolicFactorization(), then NumericFactorization(), // and finally Solve(). Note that: // - the numerical values of the linear system matrix // are *not* required before NumericFactorization(); // - solution and rhs are *not* required before calling // Solve(). if (Comm.MyPID() == 0) cout << "Starting symbolic factorization..." << endl; Solver->SymbolicFactorization(); // you can change the matrix values here if (Comm.MyPID() == 0) cout << "Starting numeric factorization..." << endl; Solver->NumericFactorization(); // you can change LHS and RHS here if (Comm.MyPID() == 0) cout << "Starting solution phase..." << endl; Solver->Solve(); // =========================================== // // E N D O F T H E A M E S O S P A R T // // =========================================== // // delete Solver. MPI calls can occur. delete Solver; // delete the objects created by Galeri delete Matrix; delete Map; #ifdef HAVE_MPI MPI_Finalize(); #endif return(EXIT_SUCCESS); } // end of main()
int main(int argc, char *argv[]) { // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI Epetra_MpiComm Comm( MPI_COMM_WORLD ); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Check verbosity level bool verbose = false; if (argc > 1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 101; // The number of unknowns must be at least equal to the number of processors. if (NumGlobalElements < NumProc) { cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << endl; cout << "Test failed!" << endl; throw "NOX Error"; } // Create the interface between NOX and the application // This object is derived from NOX::Epetra::Interface Teuchos::RCP<Interface> interface = Teuchos::rcp(new Interface(NumGlobalElements, Comm)); // Get the vector from the Problem Teuchos::RCP<Epetra_Vector> soln = interface->getSolution(); Teuchos::RCP<NOX::Epetra::Vector> noxSoln = Teuchos::rcp(new NOX::Epetra::Vector(soln, NOX::Epetra::Vector::CreateView)); // Set the PDE factor (for nonlinear forcing term). This could be specified // via user input. interface->setPDEfactor(1000.0); // Set the initial guess soln->PutScalar(1.0); // Begin Nonlinear Solver ************************************ // Create the top level parameter list Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr = Teuchos::rcp(new Teuchos::ParameterList); Teuchos::ParameterList& nlParams = *(nlParamsPtr.get()); // Set the nonlinear solver method nlParams.set("Nonlinear Solver", "Line Search Based"); // Set the printing parameters in the "Printing" sublist Teuchos::ParameterList& printParams = nlParams.sublist("Printing"); printParams.set("MyPID", MyPID); printParams.set("Output Precision", 3); printParams.set("Output Processor", 0); if (verbose) printParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + NOX::Utils::LinearSolverDetails + NOX::Utils::Parameters + NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::Debug + NOX::Utils::TestDetails + NOX::Utils::Error); else printParams.set("Output Information", NOX::Utils::Error + NOX::Utils::TestDetails); // Create a print class for controlling output below NOX::Utils printing(printParams); // Sublist for line search Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search"); searchParams.set("Method", "NonlinearCG"); // "Full Step" can also work well sometimes // Sublist for direction Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); dirParams.set("Method", "NonlinearCG"); Teuchos::ParameterList& nonlinearcg = dirParams.sublist("Nonlinear CG"); nonlinearcg.set("Restart Frequency", 100); nonlinearcg.set("Precondition", "On"); nonlinearcg.set("Orthogonalize", "Fletcher-Reeves"); // or "Polak-Ribiere" // Sublist for linear solver for the Newton method Teuchos::ParameterList& lsParams = nonlinearcg.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); //lsParams.set("Preconditioner Operator", "Use Jacobian"); lsParams.set("Preconditioner", "AztecOO"); lsParams.set("AztecOO Preconditioner Iterations", 15); lsParams.set("Preconditioner Reuse Policy", "Recompute"); // Let's force all status tests to do a full check nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete"); // 1. User supplied (Epetra_RowMatrix) Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian(); // Create the linear system Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams, interface, iJac, Analytic, *soln)); // Create the Group NOX::Epetra::Vector initialGuess(soln, NOX::Epetra::Vector::CreateView); Teuchos::RCP<NOX::Epetra::Group> grpPtr = Teuchos::rcp(new NOX::Epetra::Group(printParams, iReq, initialGuess, linSys)); // Create the convergence tests Teuchos::RCP<NOX::StatusTest::NormF> absresid = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(20)); Teuchos::RCP<NOX::StatusTest::FiniteValue> fv = Teuchos::rcp(new NOX::StatusTest::FiniteValue); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(fv); combo->addStatusTest(absresid); combo->addStatusTest(maxiters); // Create the solver Teuchos::RCP<NOX::Solver::Generic> solver = NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr); NOX::StatusTest::StatusType solvStatus = solver->solve(); // End Nonlinear Solver ************************************** // Get the Epetra_Vector with the final solution from the solver const NOX::Epetra::Group& finalGroup = dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup()); const Epetra_Vector& finalSolution = (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())). getEpetraVector(); // Output the parameter list if (verbose) { if (printing.isPrintType(NOX::Utils::Parameters)) { printing.out() << endl << "Final Parameters" << endl << "****************" << endl; solver->getList().print(printing.out()); printing.out() << endl; } } // Print solution char file_name[25]; FILE *ifp; int NumMyElements = soln->Map().NumMyElements(); (void) sprintf(file_name, "output.%d",MyPID); ifp = fopen(file_name, "w"); for (int i=0; i<NumMyElements; i++) fprintf(ifp, "%d %E\n", soln->Map().MinMyGID()+i, finalSolution[i]); fclose(ifp); // Tests int status = 0; // Converged // 1. Convergence if (solvStatus != NOX::StatusTest::Converged) { status = 1; if (printing.isPrintType(NOX::Utils::Error)) printing.out() << "Nonlinear solver failed to converge!" << endl; } // 2. Nonlinear solve iterations (10) if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) > 13) status = 2; // Summarize test results if (status == 0) printing.out() << "Test passed!" << endl; else printing.out() << "Test failed!" << endl; #ifdef HAVE_MPI MPI_Finalize(); #endif printing.out() << "Status = " << status << endl; // Final return value (0 = successfull, non-zero = failure) return status; }
int main(int argc, char *argv[]) { #ifdef EPETRA_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif // `Laplace2D' is a symmetric matrix; an example of non-symmetric // matrices is `Recirc2D' (advection-diffusion in a box, with // recirculating flow). The grid has nx x ny nodes, divided into // mx x my subdomains, each assigned to a different processor. int nx = 8; int ny = 8 * Comm.NumProc(); ParameterList GaleriList; GaleriList.set("nx", nx); GaleriList.set("ny", ny); GaleriList.set("mx", 1); GaleriList.set("my", Comm.NumProc()); Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList); Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList); // use the following Galeri function to get the // coordinates for a Cartesian grid. Epetra_MultiVector* Coord = CreateCartesianCoordinates("2D", &(A->Map()), GaleriList); double* x_coord = (*Coord)[0]; double* y_coord = (*Coord)[1]; // Create the linear problem, with a zero solution Epetra_Vector LHS(*Map); LHS.Random(); Epetra_Vector RHS(*Map); RHS.PutScalar(0.0); Epetra_LinearProblem Problem(A, &LHS, &RHS); // As we wish to use AztecOO, we need to construct a solver object for this problem AztecOO solver(Problem); // =========================== begin of ML part =========================== // create a parameter list for ML options ParameterList MLList; // set defaults for classic smoothed aggregation. ML_Epetra::SetDefaults("SA",MLList); // use user's defined aggregation scheme to create the aggregates // 1.- set "user" as aggregation scheme (for all levels, or for // a specify level only) MLList.set("aggregation: type", "user"); // 2.- set the label (for output) ML_SetUserLabel(UserLabel); // 3.- set the aggregation scheme (see function above) ML_SetUserPartitions(UserPartitions); // 4.- set the coordinates. MLList.set("x-coordinates", x_coord); MLList.set("y-coordinates", y_coord); MLList.set("aggregation: dimensions", 2); // also setup some variables to visualize the aggregates // (more details are reported in example `ml_viz.cpp'. MLList.set("viz: enable", true); // now we create the preconditioner ML_Epetra::MultiLevelPreconditioner * MLPrec = new ML_Epetra::MultiLevelPreconditioner(*A, MLList); MLPrec->VisualizeAggregates(); // tell AztecOO to use this preconditioner, then solve solver.SetPrecOperator(MLPrec); // =========================== end of ML part ============================= solver.SetAztecOption(AZ_solver, AZ_cg_condnum); solver.SetAztecOption(AZ_output, 32); // solve with 500 iterations and 1e-12 tolerance solver.Iterate(500, 1e-12); delete MLPrec; // compute the real residual double residual; LHS.Norm2(&residual); if (Comm.MyPID() == 0) { cout << "||b-Ax||_2 = " << residual << endl; } delete Coord; delete A; delete Map; if (residual > 1e-3) exit(EXIT_FAILURE); #ifdef EPETRA_MPI MPI_Finalize(); #endif exit(EXIT_SUCCESS); }
int main(int argc, char *argv[]) { int i; #ifdef EPETRA_MPI // Initialize MPI MPI_Init(&argc,&argv); Epetra_MpiComm comm(MPI_COMM_WORLD); #else Epetra_SerialComm comm; #endif // Uncomment to debug in parallel int tmp; if (comm.MyPID()==0) cin >> tmp; comm.Barrier(); bool verbose = false; // Check if we should print results to standard out if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; if (!verbose) comm.SetTracebackMode(0); // This should shut down any error traceback reporting if (verbose) cout << comm << endl << flush; if (verbose) verbose = (comm.MyPID()==0); if (verbose) cout << EpetraExt::EpetraExt_Version() << endl << endl; int nx = 128; int ny = comm.NumProc()*nx; // Scale y grid with number of processors // Create funky stencil to make sure the matrix is non-symmetric (transpose non-trivial): // (i-1,j-1) (i-1,j ) // (i ,j-1) (i ,j ) (i ,j+1) // (i+1,j-1) (i+1,j ) int npoints = 7; int xoff[] = {-1, 0, 1, -1, 0, 1, 0}; int yoff[] = {-1, -1, -1, 0, 0, 0, 1}; Epetra_Map * map; Epetra_CrsMatrix * A; Epetra_Vector * x, * b, * xexact; Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact); if (nx<8) { cout << *A << endl; cout << "X exact = " << endl << *xexact << endl; cout << "B = " << endl << *b << endl; } // Construct transposer Epetra_Time timer(comm); double start = timer.ElapsedTime(); //bool IgnoreNonLocalCols = false; EpetraExt::RowMatrix_Transpose transposer; if (verbose) cout << "\nTime to construct transposer = " << timer.ElapsedTime() - start << endl; Epetra_CrsMatrix & transA = dynamic_cast<Epetra_CrsMatrix&>(transposer(*A)); start = timer.ElapsedTime(); if (verbose) cout << "\nTime to create transpose matrix = " << timer.ElapsedTime() - start << endl; // Now test output of transposer by performing matvecs int ierr = 0; ierr += checkResults(A, &transA, xexact, verbose); // Now change values in original matrix and test update facility of transposer // Add 2 to the diagonal of each row double Value = 2.0; for (i=0; i< A->NumMyRows(); i++) A->SumIntoMyValues(i, 1, &Value, &i); start = timer.ElapsedTime(); transposer.fwd(); if (verbose) cout << "\nTime to update transpose matrix = " << timer.ElapsedTime() - start << endl; ierr += checkResults(A, &transA, xexact, verbose); delete A; delete b; delete x; delete xexact; delete map; if (verbose) cout << endl << "Checking transposer for VbrMatrix objects" << endl<< endl; int nsizes = 4; int sizes[] = {4, 6, 5, 3}; Epetra_VbrMatrix * Avbr; Epetra_BlockMap * bmap; Trilinos_Util_GenerateVbrProblem(nx, ny, npoints, xoff, yoff, nsizes, sizes, comm, bmap, Avbr, x, b, xexact); if (nx<8) { cout << *Avbr << endl; cout << "X exact = " << endl << *xexact << endl; cout << "B = " << endl << *b << endl; } start = timer.ElapsedTime(); EpetraExt::RowMatrix_Transpose transposer1; Epetra_CrsMatrix & transA1 = dynamic_cast<Epetra_CrsMatrix&>(transposer1(*Avbr)); if (verbose) cout << "\nTime to create transpose matrix = " << timer.ElapsedTime() - start << endl; // Now test output of transposer by performing matvecs ; ierr += checkResults(Avbr, &transA1, xexact, verbose); // Now change values in original matrix and test update facility of transposer // Scale matrix on the left by rowsums Epetra_Vector invRowSums(Avbr->RowMap()); Avbr->InvRowSums(invRowSums); Avbr->LeftScale(invRowSums); start = timer.ElapsedTime(); transposer1.fwd(); if (verbose) cout << "\nTime to update transpose matrix = " << timer.ElapsedTime() - start << endl; ierr += checkResults(Avbr, &transA1, xexact, verbose); delete Avbr; delete b; delete x; delete xexact; delete bmap; #ifdef EPETRA_MPI MPI_Finalize(); #endif return ierr; }
int main(int argc, char *argv[]) #ifdef HAVE_MPI { int ierr = 0; int MyPID = 0; try { // scale factor to test arc-length scaling double scale = 1.0; // Initialize MPI #ifdef HAVE_MPI MPI_Init(&argc,&argv); #endif // Create a communicator for Epetra objects #ifdef HAVE_MPI int spatialProcs = 1; int numTimeSteps = 4; Teuchos::RCP<EpetraExt::MultiMpiComm> globalComm = Teuchos::rcp(new EpetraExt::MultiMpiComm(MPI_COMM_WORLD, spatialProcs, numTimeSteps)); Epetra_Comm& Comm = globalComm->SubDomainComm(); #else Epetra_SerialComm Comm; #endif // Get the process ID and the total number of processors MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); // Check for verbose output bool verbose = false; if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; // Get the number of elements from the command line int NumGlobalElements = 0; if ((argc > 2) && (verbose)) NumGlobalElements = atoi(argv[2]) + 1; else if ((argc > 1) && (!verbose)) NumGlobalElements = atoi(argv[1]) + 1; else NumGlobalElements = 101; // The number of unknowns must be at least equal to the // number of processors. if (NumGlobalElements < NumProc) { cout << "numGlobalBlocks = " << NumGlobalElements << " cannot be < number of processors = " << NumProc << endl; exit(1); } // Create the FiniteElementProblem class. This creates all required // Epetra objects for the problem and allows calls to the // function (RHS) and Jacobian evaluation routines. Tcubed_FiniteElementProblem Problem(NumGlobalElements, Comm, scale); // Get the vector from the Problem Epetra_Vector& soln = Problem.getSolution(); soln.PutScalar(1.0); // Construct multipoint initial guess with same value Epetra_MultiVector initGuess(soln.Map(), globalComm->NumTimeStepsOnDomain()); for (int i=0; i<globalComm->NumTimeStepsOnDomain(); i++) *(initGuess(i)) = soln; // Begin LOCA Solver ************************************ // Create parameter list Teuchos::RCP<Teuchos::ParameterList> paramList = Teuchos::rcp(new Teuchos::ParameterList); // Create LOCA sublist Teuchos::ParameterList& locaParamsList = paramList->sublist("LOCA"); // Create the stepper sublist and set the stepper parameters Teuchos::ParameterList& locaStepperList = locaParamsList.sublist("Stepper"); locaStepperList.set("Continuation Method", "Natural"); locaStepperList.set("Bordered Solver Method", "Householder"); locaStepperList.set("Continuation Parameter", "Right BC"); locaStepperList.set("Initial Value", 0.1/scale); locaStepperList.set("Max Value", 100.0/scale); locaStepperList.set("Min Value", 0.05/scale); locaStepperList.set("Max Steps", 10); locaStepperList.set("Max Nonlinear Iterations", 15); #ifdef HAVE_LOCA_ANASAZI // Create Anasazi Eigensolver sublist (needs --with-loca-anasazi) locaStepperList.set("Compute Eigenvalues",true); Teuchos::ParameterList& aList = locaStepperList.sublist("Eigensolver"); aList.set("Method", "Anasazi"); if (!verbose) aList.set("Verbosity", Anasazi::Errors); #else locaStepperList.set("Compute Eigenvalues",false); #endif // Create predictor sublist Teuchos::ParameterList& predictorList = locaParamsList.sublist("Predictor"); predictorList.set("Method", "Tangent"); // Create step size sublist Teuchos::ParameterList& stepSizeList = locaParamsList.sublist("Step Size"); stepSizeList.set("Initial Step Size", 0.1/scale); stepSizeList.set("Min Step Size", 1.0e-3/scale); stepSizeList.set("Max Step Size", 2000.0/scale); stepSizeList.set("Aggressiveness", 0.0); // Create the "Solver" parameters sublist to be used with NOX Solvers Teuchos::ParameterList& nlParams = paramList->sublist("NOX"); // Create the NOX printing parameter list Teuchos::ParameterList& nlPrintParams = nlParams.sublist("Printing"); nlPrintParams.set("MyPID", MyPID); if (verbose) nlPrintParams.set("Output Information", NOX::Utils::OuterIteration + NOX::Utils::OuterIterationStatusTest + NOX::Utils::InnerIteration + //NOX::Utils::Details + NOX::Utils::Warning + NOX::Utils::TestDetails + NOX::Utils::Error + NOX::Utils::StepperIteration + NOX::Utils::StepperDetails + NOX::Utils::StepperParameters); else nlPrintParams.set("Output Information", NOX::Utils::Error); // Create the "Linear Solver" sublist Teuchos::ParameterList& dirParams = nlParams.sublist("Direction"); Teuchos::ParameterList& newParams = dirParams.sublist("Newton"); Teuchos::ParameterList& lsParams = newParams.sublist("Linear Solver"); lsParams.set("Aztec Solver", "GMRES"); lsParams.set("Max Iterations", 100); lsParams.set("Tolerance", 1e-4); if (verbose) lsParams.set("Output Frequency", 1); else lsParams.set("Output Frequency", 0); lsParams.set("Scaling", "None"); lsParams.set("Preconditioner", "Ifpack"); // Create and initialize the parameter vector LOCA::ParameterVector pVector; pVector.addParameter("Nonlinear Factor",1.0); pVector.addParameter("Left BC", 0.0); pVector.addParameter("Right BC", 0.1); // Create the interface between the test problem and the nonlinear solver // This is created by the user using inheritance of the abstract base // class: Teuchos::RCP<Problem_Interface_MP> interface = Teuchos::rcp(new Problem_Interface_MP(Problem)); Teuchos::RCP<LOCA::Epetra::Interface::Required> iReq = interface; Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface; // Create the Epetra_RowMatrixfor the Jacobian/Preconditioner Teuchos::RCP<Epetra_RowMatrix> Amat = Teuchos::rcp(&Problem.getJacobian(),false); // For MultiPoint, create super-interface Teuchos::RCP<LOCA::Epetra::Interface::MultiPoint> iMP = Teuchos::rcp(new LOCA::Epetra::Interface::MultiPoint(iReq, iJac, initGuess, Amat, globalComm)); // Get Block matrix and vector from this interface Teuchos::RCP<Epetra_RowMatrix> AMP = Teuchos::rcp(&(iMP->getJacobian()),false); Teuchos::RCP<Epetra_Vector> solnMP = Teuchos::rcp(&(iMP->getSolution()),false); iReq = iMP; iJac = iMP; // Create the linear system, now with MultiPoint system Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linsys = Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(nlPrintParams, lsParams, iReq, iJac, AMP, solnMP)); // Create the loca vector NOX::Epetra::Vector locaSoln(solnMP); // Create Epetra factory Teuchos::RCP<LOCA::Abstract::Factory> epetraFactory = Teuchos::rcp(new LOCA::Epetra::Factory); // Create global data object Teuchos::RCP<LOCA::GlobalData> globalData = LOCA::createGlobalData(paramList, epetraFactory); // Create the Group Teuchos::RCP<LOCA::Epetra::Group> grp = Teuchos::rcp(new LOCA::Epetra::Group(globalData, nlPrintParams, iMP, locaSoln, linsys, pVector)); grp->computeF(); // Create the Solver convergence test Teuchos::RCP<NOX::StatusTest::NormF> wrms = Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8)); Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = Teuchos::rcp(new NOX::StatusTest::MaxIters(15)); Teuchos::RCP<NOX::StatusTest::Combo> combo = Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR)); combo->addStatusTest(wrms); combo->addStatusTest(maxiters); // Create the stepper LOCA::Stepper stepper(globalData, grp, combo, paramList); LOCA::Abstract::Iterator::IteratorStatus status = stepper.run(); if (status != LOCA::Abstract::Iterator::Finished) { // ierr = 1; //Not an error in this case if (globalData->locaUtils->isPrintType(NOX::Utils::Warning)) globalData->locaUtils->out() << "Stepper failed to converge!" << std::endl; } // Get the final solution from the stepper Teuchos::RCP<const LOCA::Epetra::Group> finalGroup = Teuchos::rcp_dynamic_cast<const LOCA::Epetra::Group>(stepper.getSolutionGroup()); const NOX::Epetra::Vector& finalSolution = dynamic_cast<const NOX::Epetra::Vector&>(finalGroup->getX()); // Output the parameter list if (globalData->locaUtils->isPrintType(NOX::Utils::StepperParameters)) { globalData->locaUtils->out() << std::endl << "Final Parameters" << std::endl << "****************" << std::endl; stepper.getList()->print(globalData->locaUtils->out()); globalData->locaUtils->out() << std::endl; } // Check some statistics on the solution NOX::TestCompare testCompare(globalData->locaUtils->out(), *(globalData->locaUtils)); if (globalData->locaUtils->isPrintType(NOX::Utils::TestDetails)) globalData->locaUtils->out() << std::endl << "***** Checking solution statistics *****" << std::endl; // Check number of steps int numSteps = stepper.getStepNumber(); int numSteps_expected = 11; ierr += testCompare.testValue(numSteps, numSteps_expected, 0.0, "number of continuation steps", NOX::TestCompare::Absolute); // Check number of failed steps int numFailedSteps = stepper.getNumFailedSteps(); int numFailedSteps_expected = 0; ierr += testCompare.testValue(numFailedSteps, numFailedSteps_expected, 0.0, "number of failed continuation steps", NOX::TestCompare::Absolute); // Check final value of continuation parameter double right_bc_final = finalGroup->getParam("Right BC"); double right_bc_expected = 1.1; ierr += testCompare.testValue(right_bc_final, right_bc_expected, 1.0e-14, "final value of continuation parameter", NOX::TestCompare::Relative); // Check norm of solution double norm_x = finalSolution.norm(); double norm_x_expected = 13.485934773212; ierr += testCompare.testValue(norm_x, norm_x_expected, 1.0e-7, "norm of final solution", NOX::TestCompare::Relative); LOCA::destroyGlobalData(globalData); } catch (std::exception& e) { cout << e.what() << endl; ierr = 1; } catch (const char *s) { cout << s << endl; ierr = 1; } catch (...) { cout << "Caught unknown exception!" << endl; ierr = 1; } if (MyPID == 0) { if (ierr == 0) cout << "All tests passed!" << endl; else cout << ierr << " test(s) failed!" << endl; } #ifdef HAVE_MPI MPI_Finalize() ; #endif /* end main */ return ierr ; }