Example #1
0
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

    // this example is in serial only
    if (Comm.NumProc()>1) exit(0);

    FileGrid Grid(Comm, "Hex_3D.grid");
    
    // create a list of all nodes that are linked to a face
    // we have 4 interfaces here with each 2 sides:
    // with tags 1/2, 11/12, 21/22, 31/32
    const int ninter = 4;
    vector<map<int,int> > nodes(ninter*2);
    for (int i=0; i<Grid.NumMyBoundaryFaces(); ++i)
    {
      int tag;
      int nodeids[4];
      Grid.FaceVertices(i,tag,nodeids);
      if (tag==1)
      {
        for (int j=0; j<4; ++j)
          nodes[0][nodeids[j]] = nodeids[j];
      }
      else if (tag==2)
      {
        for (int j=0; j<4; ++j)
          nodes[1][nodeids[j]] = nodeids[j];
      }
      else if (tag==11)
      {
        for (int j=0; j<4; ++j)
          nodes[2][nodeids[j]] = nodeids[j];
      }
      else if (tag==12)
      {
        for (int j=0; j<4; ++j)
          nodes[3][nodeids[j]] = nodeids[j];
      }
      else if (tag==21)
      {
        for (int j=0; j<4; ++j)
          nodes[4][nodeids[j]] = nodeids[j];
      }
      else if (tag==22)
      {
        for (int j=0; j<4; ++j)
          nodes[5][nodeids[j]] = nodeids[j];
      }
      else if (tag==31)
      {
        for (int j=0; j<4; ++j)
          nodes[6][nodeids[j]] = nodeids[j];
      }
      else if (tag==32)
      {
        for (int j=0; j<4; ++j)
          nodes[7][nodeids[j]] = nodeids[j];
      }
      else 
        continue;
    }

    // ------------------------------------------------------------- //
    // create 4 empty MOERTEL::Interface instances
    // ------------------------------------------------------------- //
    int printlevel = 3; // ( moertel takes values 0 - 10 )
    //int printlevel = 8; // ( moertel takes values 0 - 10 ) // GAH gives info about intersection root finding
    vector<RefCountPtr<MOERTEL::Interface> > interfaces(ninter);
    for (int i=0; i<ninter; ++i) 
      interfaces[i] = rcp(new MOERTEL::Interface(i,false,Comm,printlevel));

    // ------------------------------------------------------------- //
    // Add nodes on both sides of interface to interfaces
    // loop all nodes in the maps add them
    // to the interface with unique ids
    // ------------------------------------------------------------- //
    for (int i=0; i<ninter; ++i)
    {
      map<int,int>::iterator curr;
      for (int j=0; j<2; ++j)
        for (curr = nodes[i*2+j].begin(); curr != nodes[i*2+j].end(); ++curr)
        {
          // get unique node id
          int nodeid = curr->second;
          // get node coordinates
          double coord[3];
          Grid.VertexCoord(nodeid,coord);
          // create a moertel node
          MOERTEL::Node node(nodeid,coord,1,&nodeid,false,printlevel);
          // add node to interface i on side j
          interfaces[i]->AddNode(node,j);
        }
    } 

    // ------------------------------------------------------------- //
    // add segments on both sides of the interface to the interface
    // ------------------------------------------------------------- //
    for (int i=0; i<Grid.NumMyBoundaryFaces(); ++i)
    {
      int tag;
      int nodeids[4];
      Grid.FaceVertices(i,tag,nodeids);
      if (tag == 0)
        continue;
      // create a segment (galeri calls it a face)
      MOERTEL::Segment_BiLinearQuad segment(i,4,nodeids,printlevel);
      
      if (tag==1)
        interfaces[0]->AddSegment(segment,0);
      else if (tag==2)
        interfaces[0]->AddSegment(segment,1);
      else if (tag==11)
        interfaces[1]->AddSegment(segment,0);
      else if (tag==12)
        interfaces[1]->AddSegment(segment,1);
      else if (tag==21)
        interfaces[2]->AddSegment(segment,0);
      else if (tag==22)
        interfaces[2]->AddSegment(segment,1);
      else if (tag==31)
        interfaces[3]->AddSegment(segment,0);
      else if (tag==32)
        interfaces[3]->AddSegment(segment,1);
      else
      {
        cout << "Face with unknown tag " << tag << endl;
        exit(EXIT_FAILURE);
      }
    }

    // ------------------------------------------------------------- //
    // choose the mortar side of the interface (0 or 1)
    // choose the finer side here, which is 0
    // ------------------------------------------------------------- //
    for (int i=0; i<ninter; ++i)
      interfaces[i]->SetMortarSide(0);

    // ------------------------------------------------------------- //
    // As we do not know the mortar side yet (we decided to le the
    // package choose it), we can not set a dual trace function (mortar space)
    // as we don't know the side to set it to
    // so we just give orders for the function type
    // ------------------------------------------------------------- //
    for (int i=0; i<ninter; ++i)
      interfaces[i]->SetFunctionTypes(MOERTEL::Function::func_BiLinearQuad,       // primal trace space
                                      MOERTEL::Function::func_DualBiLinearQuad);  // dual mortar space (recommended)
                                      //MOERTEL::Function::func_BiLinearQuad);    // mortar space (not recommended)

    // ------------------------------------------------------------- //
    // complete the interfaces
    // ------------------------------------------------------------- //
    for (int i=0; i<ninter; ++i)
      if (!interfaces[i]->Complete())
      {
         cout << "Interface " << i << " completion returned false\n";
         exit(EXIT_FAILURE);
      }

    // ------------------------------------------------------------- //
    // create an empty MOERTEL::Manager for 3D problems
    // It organizes everything from integration to solution
    // ------------------------------------------------------------- //
    MOERTEL::Manager manager(Comm,MOERTEL::Manager::manager_3D,printlevel);
    
    // ------------------------------------------------------------- //
    // Add the interfaces to the manager
    // ------------------------------------------------------------- //
    for (int i=0; i<ninter; ++i)
      manager.AddInterface(*(interfaces[i]));

    // ------------------------------------------------------------- //
    // for mortar integration, the mortar manager needs to know about
    // the rowmap of the original (uncoupled) problem because it will
    // create coupling matrices D and M matching that rowmap
    // ------------------------------------------------------------- //
    manager.SetProblemMap(&Grid.RowMap());

    // ============================================================= //
    // choose integration parameters
    // ============================================================= //
    Teuchos::ParameterList& moertelparams = manager.Default_Parameters();
    // this does affect this 3D case only
    moertelparams.set("exact values at gauss points",true);
    // 1D interface possible values are 1,2,3,4,5,6,7,8,10 (2 recommended with linear shape functions)
    moertelparams.set("number gaussian points 1D",2);
    // 2D interface possible values are 3,6,12,13,16,19,27 (12 recommended with linear functions)
    moertelparams.set("number gaussian points 2D",12);

    // ============================================================= //
    // Here we are done with the construction phase of the interface
    // so we can integrate the mortar integrals
    // (Note we have not yet evaluated the PDE at all!)
    // ============================================================= //
    manager.Mortar_Integrate();
    
    // print interface information
    // (Manager, Interface, Segment, Node implement the << operator)
    if (printlevel) cout << manager;
        
    // ======================================================== //
    // Prepares the linear system. This requires the definition //
    // of a quadrature formula compatible with the grid, a      //
    // variational formulation, and a problem object which take //
    // care of filling matrix and right-hand side.              //
    // NOTE:
    // we are doing this AFTER we did all the mortar stuff to
    // show that the mortar integration is actually PDE-independent
    // ======================================================== //
    Epetra_CrsMatrix A(Copy, Grid.RowMap(), 0);
    Epetra_Vector    LHS(Grid.RowMap(),true);
    Epetra_Vector    RHS(Grid.RowMap());

    int NumQuadratureNodes = 8;

    GalerkinVariational<HexQuadrature>
      Laplace3D(NumQuadratureNodes, Diffusion, Source, Force, 
                BoundaryValue, BoundaryType);

    LinearProblem FiniteElementProblem(Grid, Laplace3D, A, LHS, RHS); 
    FiniteElementProblem.Compute();

    // ============================================================= //
    // this is Galeri's dense solve method if you'd like to see how
    // the uncoupled solution looks like
    // ============================================================= //
    //Solve(&A, &LHS, &RHS);

    // ============================================================= //
    // Since we now have all the pieces together, let's use the 
    // MOERTEL interface to other Trilinos packages to solve the
    // problem
    // ============================================================= //
    
    // ------------------------------------------------------------- //
    // Create a Teuchos::ParameterList to hold solver arguments and also
    // to hold arguments for connected packages AztecOO, ML and Amesos
    // ------------------------------------------------------------- //
    Teuchos::ParameterList list;
    
    // ------------------------------------------------------------- //
    // Choose which type of system of equations to generate
    // Note that only when using DUAL mortar spaces an spd system 
    // can be generated
    // ------------------------------------------------------------- //
    //list.set("System","SaddleSystem");
    list.set("System","SPDSystem");
    
    // ------------------------------------------------------------- //
    // choose solver, currently there is a choice of Amesos and ML/AztecOO
    // Note that if "SaddleSystem" was chosen as system of equations
    // ML/AztecOO doesn't work
    // ------------------------------------------------------------- //
    list.set("Solver","Amesos");
    //list.set("Solver","ML/Aztec"); // GAH Aztec not working FIX
    
    // ------------------------------------------------------------- //
    // create sublists for packages Amesos, ML, AztecOO. they will be
    // passed on to the individual package that is used
    // ------------------------------------------------------------- //

    // Amesos parameters:
    Teuchos::ParameterList& amesosparams = list.sublist("Amesos");
    amesosparams.set("Solver","Amesos_Klu");
    amesosparams.set("PrintTiming",true);
    amesosparams.set("PrintStatus",true);
    amesosparams.set("UseTranspose",true);
    
    // AztecOO parameters
    Teuchos::ParameterList& aztecparams = list.sublist("Aztec");
    aztecparams.set("AZ_solver","AZ_cg");
    // This will involve ML as preconditioner
    // See the AztecOO manual for other options
    aztecparams.set("AZ_precond","AZ_user_precond");
    aztecparams.set("AZ_max_iter",1200);
    aztecparams.set("AZ_output",100);
    aztecparams.set("AZ_tol",1.0e-7);
    aztecparams.set("AZ_scaling","AZ_none");
        
    // ML parameters
    // As Moertel comes with his own special mortar multigrid hierachy
    // based on ML's smoothed aggregation, not all ML parameters are recognized
    // It basically recognizes everything that recognized by ML's MLAPI
    // (ML Application Programming Interface), see MLAPI documentation
    Teuchos::ParameterList& mlparams = list.sublist("ML");
    ML_Epetra::SetDefaults("SA",mlparams);
    mlparams.set("output",10);
    mlparams.set("print unused",1/*-2*/);
    mlparams.set("PDE equations",1);
    mlparams.set("max levels",10);
    mlparams.set("coarse: max size",500);
    mlparams.set("aggregation: type","Uncoupled");
    mlparams.set("aggregation: damping factor",1.33);

    // original   : The unmodified ML (smoothed) aggregation prolongator
    // mod_simple : ( R * (I-B*W^T) )^T
    // mod_middle : ( (I - R B*W^T*P) * R * (I-B*W^T) )^T
    // mod_full   : ( (I - R B*W^T*P) * R * (I-B*W^T) )^T + ( R B*W^T*P * R * B*W^T )^T
    mlparams.set("prolongator: type","mod_full"); 

    // solvers/smoothers currently recognized by the MLAPI_InverseOperator are
    // Ifpack:
    //         "Jacobi" "Gauss-Seidel" "symmetric Gauss-Seidel"
    //         "ILU" "ILUT" "IC" "ICT" "LU" "Amesos" "Amesos-KLU"
    //         and accompanying parameters as listed
    // ML:
    //         "MLS" "ML MLS" "ML symmetric Gauss-Seidel"
    //         "ML Gauss-Seidel" "ML Jacobi"
    //         and accompanying parameters as listed
    mlparams.set("coarse: type","Amesos-KLU"); 
    mlparams.set("smoother: type","symmetric Gauss-Seidel"); 
    mlparams.set("smoother: MLS polynomial order",3);
    mlparams.set("smoother: damping factor",0.67);
    mlparams.set("smoother: sweeps",1);
    mlparams.set("smoother: pre or post","both");
    // the ns for Laplace is the constant
    int dimnullspace = 1;
    int nummyrows = manager.ProblemMap()->NumMyElements();
    int dimnsp    = dimnullspace*nummyrows;
    double* nsp   = new double[dimnsp];
    for (int i=0; i<dimnsp; ++i) nsp[i] = 1.;
    mlparams.set("null space: type","pre-computed");
    mlparams.set("null space: add default vectors",false);
    mlparams.set("null space: dimension",dimnullspace);
    mlparams.set("null space: vectors",nsp);
        
    // ------------------------------------------------------------- //
    // Pass input matrix to Moertel, 
    // Moertel does NOT take ownership of A!
    // ------------------------------------------------------------- //
    manager.SetInputMatrix(&A,false);
    
    // ============================================================= //
    // Solve
    // ============================================================= //
    manager.Solve(list,LHS,RHS);

    // ------------------------------------------------------------- //
    // One can reset the solver, change parameters and/or matrix (with the
    // same rowmap) and solve again if needed.
    // If no ResetSolver() is called, the same matrix and preconditioner
    // will be used to solve for multiple rhs
    // ------------------------------------------------------------- //
    //manager.ResetSolver();
    //LHS.PutScalar(0.0);
    //manager.SetInputMatrix(&A,false);
    //manager.Solve(list,LHS,RHS);
	
#ifdef MOERTEL_HAVE_EXODUS

    // ==================    //
    // Output using ExodusII //
    // ==================    //
    ExodusInterface exodus(Comm);
    exodus.Write(Grid, "hex_output", LHS);
#else
    // ================== //
    // Output using MEDIT //
    // ================== //
    MEDITInterface MEDIT(Comm);
    MEDIT.Write(Grid, "hex_output", LHS);
#endif
	

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(0);
}
Example #2
0
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
    Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
    Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
    Epetra_SerialComm Comm;
#endif
    int nProcs, myPID ;
    Teuchos::ParameterList pLUList ;        // ParaLU parameters
    Teuchos::ParameterList isoList ;        // Isorropia parameters
    Teuchos::ParameterList shyLUList ;    // shyLU parameters
    Teuchos::ParameterList ifpackList ;    // shyLU parameters
    string ipFileName = "ShyLU.xml";       // TODO : Accept as i/p

    nProcs = mpiSession.getNProc();
    myPID = Comm.MyPID();

    if (myPID == 0)
    {
        cout <<"Parallel execution: nProcs="<< nProcs << endl;
    }

    // =================== Read input xml file =============================
    Teuchos::updateParametersFromXmlFile(ipFileName, &pLUList);
    isoList = pLUList.sublist("Isorropia Input");
    shyLUList = pLUList.sublist("ShyLU Input");
    shyLUList.set("Outer Solver Library", "AztecOO");
    // Get matrix market file name
    string MMFileName = Teuchos::getParameter<string>(pLUList, "mm_file");
    string prec_type = Teuchos::getParameter<string>(pLUList, "preconditioner");
    int maxiters = Teuchos::getParameter<int>(pLUList, "Outer Solver MaxIters");
    double tol = Teuchos::getParameter<double>(pLUList, "Outer Solver Tolerance");
    string rhsFileName = pLUList.get<string>("rhs_file", "");

    if (myPID == 0)
    {
        cout << "Input :" << endl;
        cout << "ParaLU params " << endl;
        pLUList.print(std::cout, 2, true, true);
        cout << "Matrix market file name: " << MMFileName << endl;
    }

    // ==================== Read input Matrix ==============================
    Epetra_CrsMatrix *A;
    Epetra_MultiVector *b1;

    int err = EpetraExt::MatrixMarketFileToCrsMatrix(MMFileName.c_str(), Comm,
                                                        A);
    //EpetraExt::MatlabFileToCrsMatrix(MMFileName.c_str(), Comm, A);
    //assert(err != 0);
    //cout <<"Done reading the matrix"<< endl;
    int n = A->NumGlobalRows();
    //cout <<"n="<< n << endl;

    // Create input vectors
    Epetra_Map vecMap(n, 0, Comm);
    if (rhsFileName != "")
    {
        err = EpetraExt::MatrixMarketFileToMultiVector(rhsFileName.c_str(),
                                         vecMap, b1);
    }
    else
    {
        b1 = new Epetra_MultiVector(vecMap, 1, false);
        b1->PutScalar(1.0);
    }

    Epetra_MultiVector x(vecMap, 1);
    //cout << "Created the vectors" << endl;

    // Partition the matrix with hypergraph partitioning and redisstribute
    Isorropia::Epetra::Partitioner *partitioner = new
                            Isorropia::Epetra::Partitioner(A, isoList, false);
    partitioner->partition();
    Isorropia::Epetra::Redistributor rd(partitioner);

    Epetra_CrsMatrix *newA;
    Epetra_MultiVector *newX, *newB; 
    rd.redistribute(*A, newA);
    delete A;
    A = newA;

    rd.redistribute(x, newX);
    rd.redistribute(*b1, newB);

    Epetra_LinearProblem problem(A, newX, newB);

    AztecOO solver(problem);

    ifpackList ;
    Ifpack_Preconditioner *prec;
    ML_Epetra::MultiLevelPreconditioner *MLprec;
    if (prec_type.compare("ShyLU") == 0)
    {
        prec = new Ifpack_ShyLU(A);
        prec->SetParameters(shyLUList);
        prec->Initialize();
        prec->Compute();
        //(dynamic_cast<Ifpack_ShyLU *>(prec))->JustTryIt();
        //cout << " Going to set it in solver" << endl ;
        solver.SetPrecOperator(prec);
        //cout << " Done setting the solver" << endl ;
    }
    else if (prec_type.compare("ILU") == 0)
    {
        ifpackList.set( "fact: level-of-fill", 1 );
        prec = new Ifpack_ILU(A);
        prec->SetParameters(ifpackList);
        prec->Initialize();
        prec->Compute();
        solver.SetPrecOperator(prec);
    }
    else if (prec_type.compare("ILUT") == 0)
    {
        ifpackList.set( "fact: ilut level-of-fill", 2 );
        ifpackList.set( "fact: drop tolerance", 1e-8);
        prec = new Ifpack_ILUT(A);
        prec->SetParameters(ifpackList);
        prec->Initialize();
        prec->Compute();
        solver.SetPrecOperator(prec);
    }
    else if (prec_type.compare("ML") == 0)
    {
        Teuchos::ParameterList mlList; // TODO : Take it from i/p
        MLprec = new ML_Epetra::MultiLevelPreconditioner(*A, mlList, true);
        solver.SetPrecOperator(MLprec);
    }

    solver.SetAztecOption(AZ_solver, AZ_gmres);
    solver.SetMatrixName(333);
    //solver.SetAztecOption(AZ_output, 1);
    //solver.SetAztecOption(AZ_conv, AZ_Anorm);
    //cout << "Going to iterate for the global problem" << endl;

    solver.Iterate(maxiters, tol);

    // compute ||Ax - b||
    double Norm;
    Epetra_MultiVector Ax(vecMap, 1);

    Epetra_MultiVector *newAx; 
    rd.redistribute(Ax, newAx);
    A->Multiply(false, *newX, *newAx);
    newAx->Update(1.0, *newB, -1.0);
    newAx->Norm2(&Norm);
    double ANorm = A->NormOne();

    cout << "|Ax-b |/|A| = " << Norm/ANorm << endl;

    delete newAx;
    if (prec_type.compare("ML") == 0)
    {
        delete MLprec;
    }
    else
    {
        delete prec;
    }

    delete b1;
    delete newX;
    delete newB;
    delete A;
    delete partitioner;
}
Example #3
0
// ------------------------------------------------------------------------
// ---------------------------   Main Program -----------------------------
// ------------------------------------------------------------------------
int main(int argc, char *argv[])
{
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  bool verbose = false;
  // Check for verbose output
  if (argc>1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else
    NumGlobalElements = 101;

  bool success = false;
  try {
    // The number of unknowns must be at least equal to the
    // number of processors.
    if (NumGlobalElements < NumProc) {
      std::cout << "numGlobalBlocks = " << NumGlobalElements
        << " cannot be < number of processors = " << NumProc << std::endl;
      throw "NOX Error";
    }

    // Create the interface between NOX and the application
    // This object is derived from NOX::Epetra::Interface
    Teuchos::RCP<TransientInterface> interface =
      Teuchos::rcp(new TransientInterface(NumGlobalElements, Comm, -20.0, 20.0));
    double dt = 0.10;
    interface->setdt(dt);

    // Set the PDE nonlinear coefficient for this problem
    interface->setPDEfactor(1.0);

    // Get the vector from the Problem
    Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
    NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView);

    // Begin Nonlinear Solver ************************************

    // Create the top level parameter list
    Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
      Teuchos::rcp(new Teuchos::ParameterList);
    Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

    // Set the nonlinear solver method
    nlParams.set("Nonlinear Solver", "Line Search Based");

    // Set the printing parameters in the "Printing" sublist
    Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
    printParams.set("MyPID", MyPID);
    printParams.set("Output Precision", 3);
    printParams.set("Output Processor", 0);
    if (verbose)
      printParams.set("Output Information",
          NOX::Utils::OuterIteration +
          NOX::Utils::OuterIterationStatusTest +
          NOX::Utils::InnerIteration +
          NOX::Utils::LinearSolverDetails +
          NOX::Utils::Parameters +
          NOX::Utils::Details +
          NOX::Utils::Warning +
          NOX::Utils::Debug +
          NOX::Utils::Error);
    else
      printParams.set("Output Information", NOX::Utils::Error);

    // Create a print class for controlling output below
    NOX::Utils utils(printParams);

    // Sublist for line search
    Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
    searchParams.set("Method", "Full Step");

    // Sublist for direction
    Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
    dirParams.set("Method", "Newton");
    Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Constant");

    // Sublist for linear solver for the Newton method
    Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
    lsParams.set("Aztec Solver", "GMRES");
    lsParams.set("Max Iterations", 800);
    lsParams.set("Tolerance", 1e-4);
    lsParams.set("Preconditioner", "AztecOO");

    // Create all possible Epetra_Operators.
    // 1. User supplied (Epetra_RowMatrix)
    Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();
    // 2. Matrix-Free (Epetra_Operator)

    // Four constructors to create the Linear System
    Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
    Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface;
    Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys =
      Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
            iReq, iJac, Analytic,
            noxSoln));

    // Create the Group
    Teuchos::RCP<NOX::Epetra::Group> grpPtr =
      Teuchos::rcp(new NOX::Epetra::Group(printParams,
            iReq,
            noxSoln,
            linSys));
    NOX::Epetra::Group& grp = *(grpPtr.get());

    // Create the convergence tests
    Teuchos::RCP<NOX::StatusTest::NormF> absresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
    Teuchos::RCP<NOX::StatusTest::NormF> relresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
    Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
      Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
    Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
      Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
    Teuchos::RCP<NOX::StatusTest::Combo> converged =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
    converged->addStatusTest(absresid);
    converged->addStatusTest(relresid);
    converged->addStatusTest(wrms);
    converged->addStatusTest(update);
    Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters =
      Teuchos::rcp(new NOX::StatusTest::MaxIters(20));
    Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
      Teuchos::rcp(new NOX::StatusTest::FiniteValue);
    Teuchos::RCP<NOX::StatusTest::Combo> combo =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
    combo->addStatusTest(fv);
    combo->addStatusTest(converged);
    combo->addStatusTest(maxiters);

    // Initialize time integration parameters
    int maxTimeSteps = 10;
    int timeStep = 0;
    double time = 0.0;

#ifdef PRINT_RESULTS_TO_FILES
    // Print initial solution
    char file_name[25];
    FILE *ifp;
    int NumMyElements = soln.Map().NumMyElements();
    (void) sprintf(file_name, "output.%d_%d",MyPID,timeStep);
    ifp = fopen(file_name, "w");
    for (int i=0; i<NumMyElements; i++)
      fprintf(ifp, "%d  %E  %E\n", soln.Map().MinMyGID()+i,
          interface->getMesh()[i], soln[i]);
    fclose(ifp);
#endif

    // Create the solver
    Teuchos::RCP<NOX::Solver::Generic> solver =
      NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);

    // Overall status flag
    int ierr = 0;

    // Time integration loop
    while(timeStep < maxTimeSteps) {
      timeStep++;
      time += dt;

      utils.out() << "Time Step: " << timeStep << ",\tTime: " << time << std::endl;

      NOX::StatusTest::StatusType status = solver->solve();

      // Check for convergence
      if (status != NOX::StatusTest::Converged) {
        ierr++;
        if (utils.isPrintType(NOX::Utils::Error))
          utils.out() << "Nonlinear solver failed to converge!" << std::endl;
      }


      // Get the Epetra_Vector with the final solution from the solver
      const NOX::Epetra::Group& finalGroup = dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
      const Epetra_Vector& finalSolution = (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).getEpetraVector();
      //Epetra_Vector& exactSolution = interface->getExactSoln(time);


      // End Nonlinear Solver **************************************

#ifdef PRINT_RESULTS_TO_FILES
      // Print solution
      (void) sprintf(file_name, "output.%d_%d",MyPID,timeStep);
      ifp = fopen(file_name, "w");
      for (int i=0; i<NumMyElements; i++)
        fprintf(ifp, "%d  %E  %E  %E\n", soln.Map().MinMyGID()+i,
            interface->getMesh()[i], finalSolution[i],exactSolution[i]);
      fclose(ifp);
#endif

      interface->reset(finalSolution);
      grp.setX(finalSolution);
      solver->reset(grp.getX(), combo);
      grp.computeF();

    } // end time step while loop

    // Output the parameter list
    if (utils.isPrintType(NOX::Utils::Parameters)) {
      utils.out() << std::endl << "Final Parameters" << std::endl
        << "****************" << std::endl;
      solver->getList().print(utils.out());
      utils.out() << std::endl;
    }

    // Test for convergence

#ifndef HAVE_MPI
    // 1. Linear solve iterations on final time step (30)- SERIAL TEST ONLY!
    //    The number of linear iterations changes with # of procs.
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 30) {
      ierr = 1;
    }
#endif
    // 2. Nonlinear solve iterations on final time step (3)
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 3)
      ierr = 2;

    success = ierr==0;
    // Summarize test results
    if (success)
      utils.out() << "Test passed!" << std::endl;
    else
      utils.out() << "Test failed!" << std::endl;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}
Example #4
0
int main(int argc, char *argv[])
{
  int ierr = 0, i;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);

  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  Epetra_SerialComm Comm;

#endif

  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();
  bool verbose = (MyPID==0);

  if (verbose)
    cout << Epetra_Version() << endl << endl;

  cout << Comm << endl;

  // Get the number of local equations from the command line
  if (argc!=2)
   {
     if (verbose) 
       cout << "Usage: " << argv[0] << " number_of_equations" << endl;
    std::exit(1);
   }
  long long NumGlobalElements = std::atoi(argv[1]);

  if (NumGlobalElements < NumProc)
      {
     if (verbose)
       cout << "numGlobalBlocks = " << NumGlobalElements 
	    << " cannot be < number of processors = " << NumProc << endl;
     std::exit(1);
      }

  // Construct a Map that puts approximately the same number of 
  // equations on each processor.

  Epetra_Map Map(NumGlobalElements, 0LL, Comm);
  
  // Get update list and number of local equations from newly created Map.

  int NumMyElements = Map.NumMyElements();

  std::vector<long long> MyGlobalElements(NumMyElements);
    Map.MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation 
  // on this processor

    std::vector<int> NumNz(NumMyElements);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for (i=0; i<NumMyElements; i++)
    if (MyGlobalElements[i]==0 || MyGlobalElements[i] == NumGlobalElements-1)
      NumNz[i] = 2;
    else
      NumNz[i] = 3;

  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy, Map, &NumNz[0]);
  
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1


  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<long long> Indices(2);
  double two = 2.0;
  int NumEntries;
  
  for (i=0; i<NumMyElements; i++)
    {
    if (MyGlobalElements[i]==0)
      {
	Indices[0] = 1;
	NumEntries = 1;
      }
    else if (MyGlobalElements[i] == NumGlobalElements-1)
      {
	Indices[0] = NumGlobalElements-2;
	NumEntries = 1;
      }
    else
      {
	Indices[0] = MyGlobalElements[i]-1;
	Indices[1] = MyGlobalElements[i]+1;
	NumEntries = 2;
      }
     ierr = A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
     assert(ierr==0);
     // Put in the diagonal entry
     ierr = A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i]);
     assert(ierr==0);
    }
   
  // Finish up
  ierr = A.FillComplete();
  assert(ierr==0);

  // Create vectors for Power method


  // variable needed for iteration
  double lambda = 0.0;
  int niters = (int) NumGlobalElements*10;
  double tolerance = 1.0e-2;

  // Iterate
  Epetra_Flops counter;
  A.SetFlopCounter(counter);
  Epetra_Time timer(Comm);
  ierr += power_method(A, lambda, niters, tolerance, verbose);
  double elapsed_time = timer.ElapsedTime();
  double total_flops =counter.Flops();
  double MFLOPs = total_flops/elapsed_time/1000000.0;

  if (verbose) 
    cout << "\n\nTotal MFLOPs for first solve = " << MFLOPs << endl<< endl;

  // Increase diagonal dominance
  if (verbose) 
    cout << "\nIncreasing magnitude of first diagonal term, solving again\n\n"
		    << endl;

  if (A.MyGlobalRow(0)) {
    int numvals = A.NumGlobalEntries(0);
    std::vector<double> Rowvals(numvals);
    std::vector<long long> Rowinds(numvals);
    A.ExtractGlobalRowCopy(0, numvals, numvals, &Rowvals[0], &Rowinds[0]); // Get A[0,0]
    for (i=0; i<numvals; i++) if (Rowinds[i] == 0) Rowvals[i] *= 10.0;

    A.ReplaceGlobalValues(0, numvals, &Rowvals[0], &Rowinds[0]);
  }
 
  // Iterate (again)
  lambda = 0.0;
  timer.ResetStartTime();
  counter.ResetFlops();
  ierr += power_method(A, lambda, niters, tolerance, verbose);
  elapsed_time = timer.ElapsedTime();
  total_flops = counter.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;

  if (verbose) 
    cout << "\n\nTotal MFLOPs for second solve = " << MFLOPs << endl<< endl;


  // Release all objects
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

/* end main
*/
return ierr ;
}
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  Galeri::core::Workspace::setNumDimensions(3);

  Galeri::grid::Loadable domain, boundary;

  int numGlobalElementsX = 2 * comm.NumProc();
  int numGlobalElementsY = 2;
  int numGlobalElementsZ = 2;

  int mx = comm.NumProc();
  int my = 1;
  int mz = 1;

  Galeri::grid::Generator::
  getCubeWithHexs(comm, numGlobalElementsX, numGlobalElementsY, numGlobalElementsZ,
                  mx, my, mz, domain, boundary);

  Epetra_Map matrixMap(domain.getNumGlobalVertices(), 0, comm);

  Epetra_FECrsMatrix A(Copy, matrixMap, 0);
  Epetra_FEVector    LHS(matrixMap);
  Epetra_FEVector    RHS(matrixMap);

  Galeri::problem::ScalarLaplacian<Laplacian> problem("Hex", 1, 8);

  problem.integrate(domain, A, RHS);

  LHS.PutScalar(0.0);

  problem.imposeDirichletBoundaryConditions(boundary, A, RHS, LHS);

  // ============================================================ //
  // Solving the linear system is the next step, using the IFPACK //
  // factory. This is done by using the IFPACK factory, then      //
  // asking for IC preconditioner, and setting few parameters     //
  // using a Teuchos::ParameterList.                              //
  // ============================================================ //
  
  Ifpack Factory;
  Ifpack_Preconditioner* Prec = Factory.Create("IC", &A, 0);

  Teuchos::ParameterList list;
  
  list.set("fact: level-of-fill", 1);
  IFPACK_CHK_ERR(Prec->SetParameters(list));
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());

  Epetra_LinearProblem linearProblem(&A, &LHS, &RHS);

  AztecOO solver(linearProblem);
  solver.SetAztecOption(AZ_solver, AZ_cg);
  solver.SetPrecOperator(Prec);
  solver.Iterate(1550, 1e-9);

  // visualization using MEDIT -- a VTK module is available as well
  Galeri::viz::MEDIT::write(domain, "sol", LHS);

  // now compute the norm of the solution
  problem.computeNorms(domain, LHS);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif
}
TEUCHOS_UNIT_TEST(Thyra_NonlinearSolver, WithResetModel)
{
  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Problem size only supports 1 mpi process
  TEST_EQUALITY(Comm.NumProc(), 1);
  
  // Create the model evaluator object
  double d = 10.0;
  double p0 = 2.0;
  double p1 = 0.0;
  double x00 = 0.0;
  double x01 = 1.0;
  Teuchos::RCP<ModelEvaluator2DSim<double> > thyraModel =
    Teuchos::rcp(new ModelEvaluator2DSim<double>(Teuchos::rcp(&Comm,false),
						 d,p0,p1,x00,x01));
  
  ::Stratimikos::DefaultLinearSolverBuilder builder;

  Teuchos::RCP<Teuchos::ParameterList> p = 
    Teuchos::rcp(new Teuchos::ParameterList);
  p->set("Linear Solver Type", "AztecOO");
  p->set("Preconditioner Type", "Ifpack");
  builder.setParameterList(p);

  Teuchos::RCP< ::Thyra::LinearOpWithSolveFactoryBase<double> >
    lowsFactory = builder.createLinearSolveStrategy("");

  thyraModel->set_W_factory(lowsFactory);
  
  // Create nox parameter list
  Teuchos::RCP<Teuchos::ParameterList> nl_params =
    Teuchos::rcp(new Teuchos::ParameterList);
  nl_params->set("Nonlinear Solver", "Line Search Based");
  
  // Create a Thyra nonlinear solver
  Teuchos::RCP< ::Thyra::NonlinearSolverBase<double> > solver =
    Teuchos::rcp(new ::Thyra::NOXNonlinearSolver);

  solver->setParameterList(nl_params);
  solver->setModel(thyraModel);

  Teuchos::RCP< ::Thyra::VectorBase<double> >
    initial_guess = thyraModel->getNominalValues().get_x()->clone_v();
  
  ::Thyra::SolveCriteria<double> solve_criteria;
  ::Thyra::SolveStatus<double> solve_status;
  
  solve_status = solver->solve(initial_guess.get(), &solve_criteria);
  
  TEST_ASSERT(solve_status.extraParameters->isType<int>("Number of Iterations"));
  TEST_EQUALITY(solve_status.extraParameters->get<int>("Number of Iterations"), 7);  
  TEST_EQUALITY(solve_status.solveStatus, ::Thyra::SOLVE_STATUS_CONVERGED);

  // Test the reset capability for using a new model evalautor with a
  // different set of parameters
  p1 = 2.0;
  thyraModel =
    Teuchos::rcp(new ModelEvaluator2DSim<double>(Teuchos::rcp(&Comm,false),
						 d,p0,p1,x00,x01));
  thyraModel->set_W_factory(lowsFactory);
  
  solver->setModel(thyraModel);
  initial_guess = thyraModel->getNominalValues().get_x()->clone_v();
  solve_status = solver->solve(initial_guess.get(), &solve_criteria);
  TEST_ASSERT(solve_status.extraParameters->isType<int>("Number of Iterations"));
  TEST_EQUALITY(solve_status.extraParameters->get<int>("Number of Iterations"), 9);  
  TEST_EQUALITY(solve_status.solveStatus, ::Thyra::SOLVE_STATUS_CONVERGED);

  Teuchos::TimeMonitor::summarize();
}
int main(int argc, char *argv[])
{

  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Check verbosity level
  bool verbose = false;
  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else
    NumGlobalElements = 101;

  bool success = false;
  try {
    // The number of unknowns must be at least equal to the
    // number of processors.
    if (NumGlobalElements < NumProc) {
      std::cout << "numGlobalBlocks = " << NumGlobalElements
        << " cannot be < number of processors = " << NumProc << std::endl;
      std::cout << "Test failed!" << std::endl;
      throw "NOX Error";
    }

    // Create the interface between NOX and the application
    // This object is derived from NOX::Epetra::Interface
    Teuchos::RCP<Interface> interface =
      Teuchos::rcp(new Interface(NumGlobalElements, Comm));

    // Get the vector from the Problem
    Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
    Teuchos::RCP<NOX::Epetra::Vector> noxSoln =
      Teuchos::rcp(new NOX::Epetra::Vector(soln,
            NOX::Epetra::Vector::CreateView));

    // Set the PDE factor (for nonlinear forcing term).  This could be specified
    // via user input.
    interface->setPDEfactor(1000.0);

    // Set the initial guess
    soln->PutScalar(1.0);

    // Begin Nonlinear Solver ************************************

    // Create the top level parameter list
    Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
      Teuchos::rcp(new Teuchos::ParameterList);
    Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

    // Set the nonlinear solver method
    nlParams.set("Nonlinear Solver", "Line Search Based");

    // Set the printing parameters in the "Printing" sublist
    Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
    printParams.set("MyPID", MyPID);
    printParams.set("Output Precision", 3);
    printParams.set("Output Processor", 0);
    if (verbose)
      printParams.set("Output Information",
          NOX::Utils::OuterIteration +
          NOX::Utils::OuterIterationStatusTest +
          NOX::Utils::InnerIteration +
          NOX::Utils::LinearSolverDetails +
          NOX::Utils::Parameters +
          NOX::Utils::Details +
          NOX::Utils::Warning +
          NOX::Utils::Debug +
          NOX::Utils::TestDetails +
          NOX::Utils::Error);
    else
      printParams.set("Output Information", NOX::Utils::Error +
          NOX::Utils::TestDetails);

    // Create a print class for controlling output below
    NOX::Utils printing(printParams);

    // Sublist for line search
    Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
    searchParams.set("Method", "Full Step");

    // Sublist for direction
    Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
    dirParams.set("Method", "Newton");
    Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Constant");
    //newtonParams.set("Forcing Term Method", "Type 1");

    // Sublist for linear solver for the Newton method
    Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
    lsParams.set("Aztec Solver", "GMRES");
    lsParams.set("Max Iterations", 800);
    lsParams.set("Tolerance", 1e-4);

    // Various Preconditioner options
    lsParams.set("Preconditioner", "None");
    //lsParams.set("Preconditioner", "AztecOO");
    //lsParams.set("Preconditioner", "New Ifpack");
    lsParams.set("Preconditioner Reuse Policy", "Reuse");
    //lsParams.set("Preconditioner Reuse Policy", "Recompute");
    //lsParams.set("Preconditioner Reuse Policy", "Rebuild");
    lsParams.set("Max Age Of Prec", 5);

    // Add a user defined pre/post operator object
    Teuchos::RCP<NOX::Abstract::PrePostOperator> ppo =
      Teuchos::rcp(new UserPrePostOperator(printing));
    nlParams.sublist("Solver Options").set("User Defined Pre/Post Operator",
        ppo);

    // Let's force all status tests to do a full check
    nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete");

    // Create all possible Epetra_Operators.
    // 1. User supplied (Epetra_RowMatrix)
    Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();
    // 2. Matrix-Free (Epetra_Operator)
    Teuchos::RCP<NOX::Epetra::MatrixFree> MF =
      Teuchos::rcp(new NOX::Epetra::MatrixFree(printParams, interface,
            *noxSoln));
    // 3. Finite Difference (Epetra_RowMatrix)
    Teuchos::RCP<NOX::Epetra::FiniteDifference> FD =
      Teuchos::rcp(new NOX::Epetra::FiniteDifference(printParams, interface,
            *soln));

    // Create the linear system
    Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
    Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = MF;
    Teuchos::RCP<NOX::Epetra::Interface::Preconditioner> iPrec = FD;
    Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys =
      Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
            //interface,
            iJac, MF,
            iPrec, FD,
            *soln));

    // Create the Group
    NOX::Epetra::Vector initialGuess(soln, NOX::Epetra::Vector::CreateView);
    Teuchos::RCP<NOX::Epetra::Group> grpPtr =
      Teuchos::rcp(new NOX::Epetra::Group(printParams,
            iReq,
            initialGuess,
            linSys));
    NOX::Epetra::Group& grp = *grpPtr;

    // For LeanMatrixFree, disable linear resid checking
    grp.disableLinearResidualComputation(true);

    // uncomment the following for loca supergroups
    //MF->setGroupForComputeF(*grpPtr);
    //FD->setGroupForComputeF(*grpPtr);

    // Create the convergence tests
    Teuchos::RCP<NOX::StatusTest::NormF> absresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
    Teuchos::RCP<NOX::StatusTest::NormF> relresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
    Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
      Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
    Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
      Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
    Teuchos::RCP<NOX::StatusTest::Combo> converged =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
    converged->addStatusTest(absresid);
    converged->addStatusTest(relresid);
    converged->addStatusTest(wrms);
    converged->addStatusTest(update);
    Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters =
      Teuchos::rcp(new NOX::StatusTest::MaxIters(20));
    Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
      Teuchos::rcp(new NOX::StatusTest::FiniteValue);
    Teuchos::RCP<NOX::StatusTest::Combo> combo =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
    combo->addStatusTest(fv);
    combo->addStatusTest(converged);
    combo->addStatusTest(maxiters);

    // Create the solver
    Teuchos::RCP<NOX::Solver::Generic> solver =
      NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);

    // For LeanMatrixFree, get unperturbed F from solver
    MF->setSolverForComputeJacobian(solver);

    NOX::StatusTest::StatusType solvStatus = solver->solve();

    // End Nonlinear Solver **************************************

    // Get the Epetra_Vector with the final solution from the solver
    const NOX::Epetra::Group& finalGroup =
      dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
    const Epetra_Vector& finalSolution =
      (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).
      getEpetraVector();

    // Output the parameter list
    if (verbose) {
      if (printing.isPrintType(NOX::Utils::Parameters)) {
        printing.out() << std::endl << "Final Parameters" << std::endl
          << "****************" << std::endl;
        solver->getList().print(printing.out());
        printing.out() << std::endl;
      }
    }

    // Print solution
    char file_name[25];
    FILE *ifp;
    int NumMyElements = soln->Map().NumMyElements();
    (void) sprintf(file_name, "output.%d",MyPID);
    ifp = fopen(file_name, "w");
    for (int i=0; i<NumMyElements; i++)
      fprintf(ifp, "%d  %E\n", soln->Map().MinMyGID()+i, finalSolution[i]);
    fclose(ifp);


    // Tests
    int status = 0; // Converged

    // 1. Convergence
    if (solvStatus != NOX::StatusTest::Converged) {
      status = 1;
      if (printing.isPrintType(NOX::Utils::Error))
        printing.out() << "Nonlinear solver failed to converge!" << std::endl;
    }
#ifndef HAVE_MPI
    // 2. Linear solve iterations (53) - SERIAL TEST ONLY!
    //    The number of linear iterations changes with # of procs.
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 659) {
      status = 2;
    }
#endif
    // 3. Nonlinear solve iterations (10)
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 10)
      status = 3;
    // 4. Test the pre/post iterate options
    {
      UserPrePostOperator & ppo2 =
        dynamic_cast<UserPrePostOperator&>(*ppo.get());
      if (ppo2.getNumRunPreIterate() != 10)
        status = 4;
      if (ppo2.getNumRunPostIterate() != 10)
        status = 4;
      if (ppo2.getNumRunPreSolve() != 1)
        status = 4;
      if (ppo2.getNumRunPostSolve() != 1)
        status = 4;
    }

    success = status==0;
    // Summarize test results
    if (success)
      printing.out() << "Test passed!" << std::endl;
    else
      printing.out() << "Test failed!" << std::endl;

    printing.out() << "Status = " << status << std::endl;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}
Example #8
0
int main(int narg, char *arg[])
{
  using std::cout;

#ifdef EPETRA_MPI  
  // Initialize MPI  
  MPI_Init(&narg,&arg);
  Epetra_MpiComm comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int me = comm.MyPID();
  int np = comm.NumProc();

  ITYPE nGlobalRows = 10;
  if (narg > 1) 
    nGlobalRows = (ITYPE) atol(arg[1]);

  bool verbose = (nGlobalRows < 20);

  // Linear map similar to Trilinos default, 
  // but want to allow adding OFFSET_EPETRA64 to the indices.
  int nMyRows = (int) (nGlobalRows / np + (nGlobalRows % np > me));
  ITYPE myFirstRow = (ITYPE)(me * (nGlobalRows / np) + MIN(nGlobalRows%np, me));
  ITYPE *myGlobalRows = new ITYPE[nMyRows];
  for (int i = 0; i < nMyRows; i++)
    myGlobalRows[i] = (ITYPE)i + myFirstRow + OFFSET_EPETRA64;
  Epetra_Map *rowMap = new Epetra_Map(-1, nMyRows, &myGlobalRows[0], 0, comm);
  if (verbose) rowMap->Print(std::cout);

  // Create an integer vector nnzPerRow that is used to build the Epetra Matrix.
  // nnzPerRow[i] is the number of entries for the ith local equation
  std::vector<int> nnzPerRow(nMyRows+1, 0);

  // Also create lists of the nonzeros to be assigned to processors.
  // To save programming time and complexity, these vectors are allocated 
  // bigger than they may actually be needed.
  std::vector<ITYPE> iv(3*nMyRows+1);
  std::vector<ITYPE> jv(3*nMyRows+1);
  std::vector<double> vv(3*nMyRows+1);

  // Generate the nonzeros for the Laplacian matrix.
  ITYPE nMyNonzeros = 0;
  for (ITYPE i = 0, myrowcnt = 0; i < nGlobalRows; i++) {
    if (rowMap->MyGID(i+OFFSET_EPETRA64)) { 
      // This processor owns this row; add nonzeros.
      if (i > 0) {
        iv[nMyNonzeros] = i + OFFSET_EPETRA64;
        jv[nMyNonzeros] = i-1 + OFFSET_EPETRA64;
        vv[nMyNonzeros] = -1;
        if (verbose)
          std::cout << "(" << iv[nMyNonzeros] << "," << jv[nMyNonzeros] << ")="
               << vv[nMyNonzeros] << " on processor " << me
               << " in " << myrowcnt << std::endl;
        nMyNonzeros++;
        nnzPerRow[myrowcnt]++;
      }

      iv[nMyNonzeros] = i + OFFSET_EPETRA64;
      jv[nMyNonzeros] = i + OFFSET_EPETRA64;
      vv[nMyNonzeros] = ((i == 0 || i == nGlobalRows-1) ? 1. : 2.);
      if (verbose) 
        std::cout << "(" << iv[nMyNonzeros] << "," << jv[nMyNonzeros] << ")="
             << vv[nMyNonzeros] << " on processor " << me
             << " in " << myrowcnt << std::endl;
      nMyNonzeros++;
      nnzPerRow[myrowcnt]++;

      if (i < nGlobalRows - 1) {
        iv[nMyNonzeros] = i + OFFSET_EPETRA64;
        jv[nMyNonzeros] = i+1 + OFFSET_EPETRA64;
        vv[nMyNonzeros] = -1;
        if (verbose) 
          std::cout << "(" << iv[nMyNonzeros] << "," << jv[nMyNonzeros] << ")="
               << vv[nMyNonzeros] << " on processor " << me
               << " in " << myrowcnt << std::endl;
        nMyNonzeros++;
        nnzPerRow[myrowcnt]++;
      }
      myrowcnt++;
    }
  }

  // Create an Epetra_Matrix
  Epetra_CrsMatrix *A = new Epetra_CrsMatrix(Copy, *rowMap, &nnzPerRow[0], false);

  // Insert the nonzeros.
  int info;
  ITYPE sum = 0;
  for (int i=0; i < nMyRows; i++) {
    if (nnzPerRow[i]) {
      if (verbose) {
        std::cout << "InsertGlobalValus row " << iv[sum]
             << " count " << nnzPerRow[i] 
             << " cols " << jv[sum] << " " << jv[sum+1] << " ";
        if (nnzPerRow[i] == 3) std::cout << jv[sum+2];
        std::cout << std::endl;
      }
      info = A->InsertGlobalValues(iv[sum],nnzPerRow[i],&vv[sum],&jv[sum]);
      assert(info==0);
      sum += nnzPerRow[i];
    }
  }

  // Finish up
  info = A->FillComplete();
  assert(info==0);
  if (verbose) A->Print(std::cout);

  // Sanity test:  Product of matrix and vector of ones should have norm == 0
  // and max/min/mean values of 0
  Epetra_Vector sanity(A->RangeMap());
  Epetra_Vector sanityres(A->DomainMap());
  sanity.PutScalar(1.);
  A->Multiply(false, sanity, sanityres);

  double jjone, jjtwo, jjmax;
  sanityres.Norm1(&jjone);
  sanityres.Norm2(&jjtwo);
  sanityres.NormInf(&jjmax);
  if (me == 0)
    std::cout << "SanityTest norms 1/2/inf: " << jjone << " "
                                         << jjtwo << " " << jjmax << std::endl;

  bool test_failed = (jjone != 0) || (jjtwo != 0) || (jjmax != 0);

  sanityres.MinValue(&jjone);
  sanityres.MeanValue(&jjtwo);
  sanityres.MaxValue(&jjmax);
  if (me == 0)
    std::cout << "SanityTest values min/max/avg: " << jjone << " "
                                              << jjmax << " " << jjtwo << std::endl;

  test_failed = test_failed || (jjone != 0) || (jjtwo != 0) || (jjmax != 0);

  if (me == 0) {
    if(test_failed)
      std::cout << "Bug_5794_IndexBase_LL tests FAILED" << std::endl;
  }

  delete A;
  delete rowMap;
  delete [] myGlobalRows;

  FINALIZE;
}
void testTwoPts()
{
  Epetra_SerialComm comm;

  // set up a hard-coded layout for two points
  int numCells = 2;
  int numBonds = 2;

  // set up overlap maps, which include ghosted nodes
  // in this case we're on a single processor, so these
  // maps are essentially the identity map
  int numGlobalElements = numCells;
  int numMyElements = numGlobalElements;
  int* myGlobalElements = new int[numMyElements];
  int elementSize = 1;
  for(int i=0; i<numMyElements ; ++i){
	myGlobalElements[i] = i;
  }
  int indexBase = 0;

  // oneDimensionalOverlapMap
  // used for cell volumes and scalar constitutive data
  Epetra_BlockMap oneDimensionalOverlapMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, comm); 
  // used for positions, displacements, velocities and vector constitutive data
  elementSize = 3;
  Epetra_BlockMap threeDimensionalOverlapMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, comm); 
  delete[] myGlobalElements;
  // bondMap
  // used for bond damage and bond constitutive data
  numGlobalElements = numBonds;
  numMyElements = numGlobalElements;
  myGlobalElements = new int[numMyElements];
  elementSize = 1;
  Epetra_BlockMap bondMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, comm); 
  delete[] myGlobalElements;

  // create a linear elastic isotropic peridynamic solid  material model
  // could also use a MaterialFactory object to create the material here
  Teuchos::ParameterList params;
  params.set("Density", 7800.0);
  params.set("Bulk Modulus", 130.0e9);
  params.set("Shear Modulus", 78.0e9);
  PeridigmNS::LinearElasticIsotropicMaterial mat(params);

  // create the NeighborhoodData
  // both points are neighbors of each other
  PeridigmNS::NeighborhoodData neighborhoodData;
  neighborhoodData.SetNumOwned(2);
  neighborhoodData.SetNeighborhoodListSize(4);
  int* const ownedIDs = neighborhoodData.OwnedIDs();
  ownedIDs[0] = 0;
  ownedIDs[1] = 1;
  int* const neighborhoodList = neighborhoodData.NeighborhoodList();
  neighborhoodList[0] = 1;
  neighborhoodList[1] = 1;
  neighborhoodList[2] = 1;
  neighborhoodList[3] = 0;
  int* const neighborhoodPtr = neighborhoodData.NeighborhoodPtr();
  neighborhoodPtr[0] = 0;
  neighborhoodPtr[1] = 2;

  // create a block and load the material model
  PeridigmNS::Block block("test", 1);
  block.setMaterialModel(Teuchos::rcp(&mat, false));

  // create the blockID vector
  Epetra_Vector blockIDs(oneDimensionalOverlapMap);
  blockIDs.PutScalar(1.0);

  // initialize the block
  // in serial, the overlap and non-overlap maps are the same
  block.initialize(Teuchos::rcp(&oneDimensionalOverlapMap, false),
                   Teuchos::rcp(&oneDimensionalOverlapMap, false),
                   Teuchos::rcp(&threeDimensionalOverlapMap, false),
                   Teuchos::rcp(&threeDimensionalOverlapMap, false),
                   Teuchos::rcp(&bondMap, false),
                   Teuchos::rcp(&blockIDs, false),
                   Teuchos::rcp(&neighborhoodData, false));

  // time step
  double dt = 1.0;

  // create a workset with rcps to the relevant data
  PHAL::Workset workset;
  workset.timeStep = Teuchos::RCP<double>(&dt, false);
  workset.jacobian = Teuchos::RCP<PeridigmNS::SerialMatrix>(); // null rcp, not used in this test
  workset.blocks = Teuchos::rcp(new std::vector<PeridigmNS::Block>() );
  workset.myPID = comm.MyPID();

  workset.blocks->push_back(block);

  // set the data for the two-point discretization
  Epetra_Vector& x = *block.getData(Field_NS::COORD3D, Field_ENUM::STEP_NONE);
  Epetra_Vector& y = *block.getData(Field_NS::CURCOORD3D, Field_ENUM::STEP_NP1);
  x[0] = 0.0; x[1] = 0.0; x[2] = 0.0;
  x[3] = 1.0; x[4] = 0.0; x[5] = 0.0;
  y[0] = 0.0; y[1] = 0.0; y[2] = 0.0;
  y[3] = 2.0; y[4] = 0.0; y[5] = 0.0;
  block.getData(Field_NS::VOLUME, Field_ENUM::STEP_NONE)->PutScalar(1.0);

  // fill in constitutive data directly

  // weighted volume
  Epetra_Vector& weightedVolume = *block.getData(Field_NS::WEIGHTED_VOLUME, Field_ENUM::STEP_NONE);
  weightedVolume[0] = 1.0;
  weightedVolume[1] = 1.0;

  // dilatation
  // \todo Investigate effect of dilatation on this particular problem, add new configuration if needed to test diltation.
  Epetra_Vector& dilatation = *block.getData(Field_NS::DILATATION, Field_ENUM::STEP_NP1);
  dilatation[0] = 1.0;
  dilatation[1] = 1.0;

  // set up a parameter list that will be passed to the evaluator constructor
  Teuchos::RCP<Teuchos::ParameterList> p = rcp(new Teuchos::ParameterList);
  int type = FactoryTraits<PHAL::PeridigmTraits>::id_evaluate_force;
  p->set<int>("Type", type); 
  p->set<bool>("Verbose", false);
  Teuchos::RCP<PHX::DataLayout> dummy = Teuchos::rcp(new PHX::MDALayout<Dummy>(0));
  p->set< Teuchos::RCP<PHX::DataLayout> >("Dummy Data Layout", dummy);

  // instantiate the evaluator
  EvaluateForce<PHAL::PeridigmTraits::Residual, PHAL::PeridigmTraits> evaluator(*p);

  // make a call to the evaluateFields() function in the evaluator
  // this is the workhorse function that calls the material model,
  // evaluates the pairwise forces, and updates the force
  evaluator.evaluateFields(workset);

  // assert the data in forceOverlap
  Epetra_Vector& force = *block.getData(Field_NS::FORCE_DENSITY3D, Field_ENUM::STEP_NP1);
  double node0ForceX = force[0];
  BOOST_CHECK_CLOSE(node0ForceX, 2.34e12, 1.0e-14);
  double node0ForceY = force[1];
  BOOST_CHECK_SMALL(node0ForceY, 1.0e-14);
  double node0ForceZ = force[2];
  BOOST_CHECK_SMALL(node0ForceZ, 1.0e-14);
  double node1ForceX = force[3];
  BOOST_CHECK_CLOSE(node1ForceX, -2.34e12, 1.0e-14);
  double node1ForceY = force[4];
  BOOST_CHECK_SMALL(node1ForceY, 1.0e-14);
  double node1ForceZ = force[5];
  BOOST_CHECK_SMALL(node1ForceZ, 1.0e-14);
}
int main( int argc, char* argv[] )
{

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Create command line processor
  Teuchos::CommandLineProcessor RBGen_CLP;
  RBGen_CLP.recogniseAllOptions( false );
  RBGen_CLP.throwExceptions( false );

  // Generate list of acceptable command line options
  bool verbose = false;
  std::string xml_file = "";
  RBGen_CLP.setOption("verbose", "quiet", &verbose, "Print messages and results.");
  RBGen_CLP.setOption("xml-file", &xml_file, "XML Input File");

  // Process command line.
  Teuchos::CommandLineProcessor::EParseCommandLineReturn
    parseReturn= RBGen_CLP.parse( argc, argv );
  if( parseReturn == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED ) {
    return 0;
  }
  if( parseReturn != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL   ) {
#ifdef EPETRA_MPI
    MPI_Finalize();
#endif
    return -1; // Error!
  }

  // Check to make sure an XML input file was provided
  TEUCHOS_TEST_FOR_EXCEPTION(xml_file == "", std::invalid_argument, "ERROR:  An XML file was not provided; use --xml-file to provide an XML input file for this RBGen driver.");

  Teuchos::Array<Teuchos::RCP<Teuchos::Time> > timersRBGen;
  //
  // ---------------------------------------------------------------
  //  CREATE THE INITIAL PARAMETER LIST FROM THE INPUT XML FILE
  // ---------------------------------------------------------------
  //
  Teuchos::RCP<Teuchos::ParameterList> BasisParams = RBGen::createParams( xml_file );
  if (verbose && Comm.MyPID() == 0) 
  {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"Input Parameter List: "<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
    BasisParams->print();
  } 
  //
  // ---------------------------------------------------------------
  //  CREATE THE FILE I/O HANDLER
  // ---------------------------------------------------------------
  //
  //  - First create the abstract factory for the file i/o handler.
  //
  RBGen::EpetraMVFileIOFactory fio_factory;
  //
  //  - Then use the abstract factory to create the file i/o handler specified in the parameter list.
  //
  Teuchos::RCP<Teuchos::Time> timerFileIO = Teuchos::rcp( new Teuchos::Time("Create File I/O Handler") );
  timersRBGen.push_back( timerFileIO );
  //
  Teuchos::RCP< RBGen::FileIOHandler<Epetra_MultiVector> > mvFileIO;
  Teuchos::RCP< RBGen::FileIOHandler<Epetra_Operator> > opFileIO =
    Teuchos::rcp( new RBGen::EpetraCrsMatrixFileIOHandler() ); 
  {
    Teuchos::TimeMonitor lcltimer( *timerFileIO );
    mvFileIO = fio_factory.create( *BasisParams );
    //					    
    // Initialize file IO handlers
    //
    mvFileIO->Initialize( BasisParams );
    opFileIO->Initialize( BasisParams );
  }    
  if (verbose && Comm.MyPID() == 0) 
  {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"File I/O Handlers Generated"<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
  }
  //
  // ---------------------------------------------------------------
  //  READ IN THE DATA SET / SNAPSHOT SET & PREPROCESS
  //  ( this will be a separate abstract class type )
  // ---------------------------------------------------------------
  //
  Teuchos::RCP<std::vector<std::string> > filenames = RBGen::genFileList( *BasisParams );
  Teuchos::RCP<Teuchos::Time> timerSnapshotIn = Teuchos::rcp( new Teuchos::Time("Reading in Snapshot Set") );
  timersRBGen.push_back( timerSnapshotIn );
  //
  Teuchos::RCP<Epetra_MultiVector> testMV;
  {
    Teuchos::TimeMonitor lcltimer( *timerSnapshotIn );
    testMV = mvFileIO->Read( *filenames );
  } 

  RBGen::EpetraMVPreprocessorFactory preprocess_factory;

  Teuchos::RCP<Teuchos::Time> timerCreatePreprocessor = Teuchos::rcp( new Teuchos::Time("Create Preprocessor") );
  timersRBGen.push_back( timerCreatePreprocessor );
  Teuchos::RCP<RBGen::Preprocessor<Epetra_MultiVector> > prep;
  {
    Teuchos::TimeMonitor lcltimer( *timerCreatePreprocessor );
    prep = preprocess_factory.create( *BasisParams );
    //
    // Initialize preprocessor.
    //
    prep->Initialize( BasisParams, mvFileIO );
  }

  Teuchos::RCP<Teuchos::Time> timerPreprocess = Teuchos::rcp( new Teuchos::Time("Preprocess Snapshot Set") );  
  timersRBGen.push_back( timerPreprocess );
  {
    Teuchos::TimeMonitor lcltimer( *timerPreprocess );
    prep->Preprocess( testMV );
  }

  if (verbose && Comm.MyPID() == 0) 
  {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"Snapshot Set Imported and Preprocessed"<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
  }
  //
  // ---------------------------------------------------------------
  //  COMPUTE THE REDUCED BASIS
  // ---------------------------------------------------------------
  //
  //  - First create the abstract factory for the reduced basis methods.
  //
  RBGen::EpetraMVMethodFactory mthd_factory;
  //
  //  - Then use the abstract factory to create the method specified in the parameter list.
  //
  Teuchos::RCP<Teuchos::Time> timerCreateMethod = Teuchos::rcp( new Teuchos::Time("Create Reduced Basis Method") );
  timersRBGen.push_back( timerCreateMethod );
  Teuchos::RCP<RBGen::Method<Epetra_MultiVector,Epetra_Operator> > method;
  {
    Teuchos::TimeMonitor lcltimer( *timerCreateMethod );  
    method = mthd_factory.create( *BasisParams );
    //
    // Initialize reduced basis method.
    //
    method->Initialize( BasisParams, testMV, opFileIO );
  }
  //
  //  - Call the computeBasis method on the reduced basis method object.
  //
  Teuchos::RCP<Teuchos::Time> timerComputeBasis = Teuchos::rcp( new Teuchos::Time("Reduced Basis Computation") );
  timersRBGen.push_back( timerComputeBasis );
  {
    Teuchos::TimeMonitor lcltimer( *timerComputeBasis );  
    method->computeBasis();
  }
  //
  //  - Retrieve the computed basis from the method object.
  //
  Teuchos::RCP<const Epetra_MultiVector> basisMV = method->getBasis();
  //
  //  Since we're using a POD method, we can dynamic cast to get the singular values.
  //
  Teuchos::RCP<RBGen::PODMethod<double> > pod_method = Teuchos::rcp_dynamic_cast<RBGen::PODMethod<double> >( method );
  const std::vector<double> sv = pod_method->getSingularValues();
  //
  if (verbose && Comm.MyPID() == 0) {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"Computed Singular Values : "<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
    for (unsigned int i=0; i<sv.size(); ++i) { std::cout << sv[i] << std::endl; }
  }      
  
  if (Comm.MyPID() == 0) {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"RBGen Computation Time Breakdown (seconds) : "<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
    for (unsigned int i=0; i<timersRBGen.size(); ++i)
      std::cout << std::left << std::setw(40) << timersRBGen[i]->name() << " : "
	   << std::setw(15) << timersRBGen[i]->totalElapsedTime() << std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
  }
  //
  // ---------------------------------------------------------------
  //  POSTPROCESS BASIS (not necessary right now)
  // ---------------------------------------------------------------
  //
  //
  // ---------------------------------------------------------------
  //  WRITE OUT THE REDUCED BASIS
  // ---------------------------------------------------------------
  //
  if ( BasisParams->isSublist( "File IO" ) ) {
    Teuchos::ParameterList fileio_params = BasisParams->sublist( "File IO" );
    if ( fileio_params.isParameter( "Reduced Basis Output File" ) ) {
      std::string outfile = Teuchos::getParameter<std::string>( fileio_params, "Reduced Basis Output File" );
      mvFileIO->Write( basisMV, outfile );
    }
  }
  //
#ifdef EPETRA_MPI
  // Finalize MPI
  MPI_Finalize();
#endif

  return 0;
}
Example #11
0
int main(int argc, char *argv[])
{

  // initialize MPI and Epetra communicator
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30; 
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  // =============================================================== //
  // B E G I N N I N G   O F   I F P A C K   C O N S T R U C T I O N //
  // =============================================================== //

  Teuchos::ParameterList List;

  // builds an Ifpack_AdditiveSchwarz. This is templated with
  // the local solvers, in this case Ifpack_ICT. Note that any
  // other Ifpack_Preconditioner-derived class can be used
  // instead of Ifpack_ICT.

  // In this example the overlap is zero. Use
  // Prec(A,OverlapLevel) for the general case.
  Ifpack_AdditiveSchwarz<Ifpack_ICT> Prec(&*A);

  // `1.0' means that the factorization should approximatively
  // keep the same number of nonzeros per row of the original matrix.
  List.set("fact: ict level-of-fill", 1.0);
  // no modifications on the diagonal
  List.set("fact: absolute threshold", 0.0);
  List.set("fact: relative threshold", 1.0);
  List.set("fact: relaxation value", 0.0);
  // matrix `laplace_2d_bc' is not symmetric because of the way
  // boundary conditions are imposed. We can filter the singletons,
  // (that is, Dirichlet nodes) and end up with a symmetric
  // matrix (as ICT requires).
  List.set("schwarz: filter singletons", true);

  // sets the parameters
  IFPACK_CHK_ERR(Prec.SetParameters(List));

  // initialize the preconditioner. At this point the matrix must
  // have been FillComplete()'d, but actual values are ignored.
  IFPACK_CHK_ERR(Prec.Initialize());

  // Builds the preconditioners, by looking for the values of 
  // the matrix. 
  IFPACK_CHK_ERR(Prec.Compute());

  // =================================================== //
  // E N D   O F   I F P A C K   C O N S T R U C T I O N //
  // =================================================== //

  // At this point, we need some additional objects
  // to define and solve the linear system.

  // defines LHS and RHS
  Epetra_Vector LHS(A->OperatorDomainMap());
  Epetra_Vector RHS(A->OperatorDomainMap());

  LHS.PutScalar(0.0);
  RHS.Random();

  // need an Epetra_LinearProblem to define AztecOO solver
  Epetra_LinearProblem Problem(&*A,&LHS,&RHS);

  // now we can allocate the AztecOO solver
  AztecOO Solver(Problem);

  // specify solver
  Solver.SetAztecOption(AZ_solver,AZ_cg_condnum);
  Solver.SetAztecOption(AZ_output,32);

  // HERE WE SET THE IFPACK PRECONDITIONER
  Solver.SetPrecOperator(&Prec);

  // .. and here we solve
  // NOTE: with one process, the solver must converge in
  // one iteration.
  Solver.Iterate(1550,1e-5);

  // Prints out some information about the preconditioner
  cout << Prec;

#ifdef HAVE_MPI
  MPI_Finalize(); 
#endif

  return (EXIT_SUCCESS);
}
Example #12
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // set global dimension of the matrix to 5, could be any number
  int NumGlobalElements = 5;

  // create a map
  Epetra_Map Map(NumGlobalElements,0,Comm);

  // local number of rows
  int NumMyElements = Map.NumMyElements();

  // get update list
  int * MyGlobalElements = Map.MyGlobalElements( );

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation
  // on this processor

  int * NumNz = new int[NumMyElements];

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for ( int i=0; i<NumMyElements; i++)
    if (MyGlobalElements[i]==0 || MyGlobalElements[i] == NumGlobalElements-1)
      NumNz[i] = 2;
    else
      NumNz[i] = 3;

  // Create a Epetra_Matrix
  Epetra_CrsMatrix A(Copy,Map,NumNz);
  // (NOTE: constructor `Epetra_CrsMatrix A(Copy,Map,3);' was ok too.)

  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1, diagonal term 2

  double *Values = new double[2];
  Values[0] = -1.0; Values[1] = -1.0;
  int *Indices = new int[2];
  double two = 2.0;
  int NumEntries;

  for( int i=0 ; i<NumMyElements; ++i ) {
    if (MyGlobalElements[i]==0) {
      Indices[0] = 1;
      NumEntries = 1;
    } else if (MyGlobalElements[i] == NumGlobalElements-1) {
      Indices[0] = NumGlobalElements-2;
      NumEntries = 1;
    } else {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      NumEntries = 2;
    }
    A.InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices);
    // Put in the diagonal entry
    A.InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i);
  }

  // Finish up, trasforming the matrix entries into local numbering,
  // to optimize data transfert during matrix-vector products
  A.FillComplete();

  // build up two distributed vectors q and z, and compute
  // q = A * z
  Epetra_Vector q(A.RowMap());
  Epetra_Vector z(A.RowMap());

  // Fill z with 1's
  z.PutScalar( 1.0 );

  A.Multiply(false, z, q); // Compute q = A*z

  double dotProduct;
  z.Dot( q, &dotProduct );

  if( Comm.MyPID() == 0 )
    cout << "q dot z = " << dotProduct << endl;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  delete[] NumNz;

  return( EXIT_SUCCESS );

} /* main */
Example #13
0
int main(int argc, char *argv[]) {

  int i, returnierr=0;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Uncomment to debug in parallel int tmp; if (Comm.MyPID()==0) cin >> tmp; Comm.Barrier();

  bool verbose = false;
  bool veryVerbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  // Check if we should print lots of results to standard out
  if (argc>2) if (argv[2][0]=='-' && argv[2][1]=='v') veryVerbose = true;

  if (verbose && Comm.MyPID()==0)
    std::cout << Epetra_Version() << std::endl << std::endl;

  if (!verbose) Comm.SetTracebackMode(0); // This should shut down any error traceback reporting

  if (verbose) std::cout << Comm << std::endl << std::flush;

  bool verbose1 = verbose;
  if (verbose) verbose = (Comm.MyPID()==0);

  bool veryVerbose1 = veryVerbose;
  if (veryVerbose) veryVerbose = (Comm.MyPID()==0);

  int NumMyElements = 100;
  if (veryVerbose1) NumMyElements = 10;
  NumMyElements += Comm.MyPID();
  int MaxNumMyElements = NumMyElements+Comm.NumProc()-1;
  int * ElementSizeList = new int[NumMyElements];
  long long * MyGlobalElements = new long long[NumMyElements];

  for (i = 0; i<NumMyElements; i++) {
    MyGlobalElements[i] = (Comm.MyPID()*MaxNumMyElements+i)*2;
    ElementSizeList[i] = i%6 + 2; // elementsizes go from 2 to 7
  }

  Epetra_BlockMap Map(-1LL, NumMyElements, MyGlobalElements, ElementSizeList,
		      0, Comm);

  delete [] ElementSizeList;
  delete [] MyGlobalElements;

  Epetra_MapColoring C0(Map);

  int * elementColors = new int[NumMyElements];

  int maxcolor = 24;
  int * colorCount = new int[maxcolor];
  int ** colorLIDs = new int*[maxcolor];
  for (i=0; i<maxcolor; i++) colorCount[i] = 0;
  for (i=0; i<maxcolor; i++) colorLIDs[i] = 0;

  int defaultColor = C0.DefaultColor();
  for (i=0; i<Map.NumMyElements(); i++) {
    assert(C0[i]==defaultColor);
    assert(C0(Map.GID64(i))==defaultColor);
    if (i%2==0) C0[i] = i%6+5+i%14; // cycle through 5...23 on even elements
    else C0(Map.GID64(i)) = i%5+1; // cycle through 1...5 on odd elements
    elementColors[i] = C0[i]; // Record color of ith element for use below
    colorCount[C0[i]]++; // Count how many of each color for checking below
  }
  
  if (veryVerbose)
    std::cout << "Original Map Coloring using element-by-element definitions" << std::endl;
  if (veryVerbose1)
    std::cout <<  C0 << std::endl;

  int numColors = 0;
  for (i=0; i<maxcolor; i++) 
    if (colorCount[i]>0) {
      numColors++;
      colorLIDs[i] = new int[colorCount[i]];
    }
  for (i=0; i<maxcolor; i++) colorCount[i] = 0;
  for (i=0; i<Map.NumMyElements(); i++) colorLIDs[C0[i]][colorCount[C0[i]]++] = i;

  

  int newDefaultColor = -1;
  Epetra_MapColoring C1(Map, elementColors, newDefaultColor);
  if (veryVerbose)
    std::cout << "Same Map Coloring using one-time construction" << std::endl;
  if (veryVerbose1)
    std::cout <<  C1 << std::endl;
  assert(C1.DefaultColor()==newDefaultColor);
  for (i=0; i<Map.NumMyElements(); i++) assert(C1[i]==C0[i]);

  Epetra_MapColoring C2(C1);
  if (veryVerbose)
    std::cout << "Same Map Coloring using copy constructor" << std::endl;
  if (veryVerbose1)
    std::cout <<  C1 << std::endl;
  for (i=0; i<Map.NumMyElements(); i++) assert(C2[i]==C0[i]);
  assert(C2.DefaultColor()==newDefaultColor);

  assert(numColors==C2.NumColors());

  for (i=0; i<maxcolor; i++) {
    int curNumElementsWithColor = C2.NumElementsWithColor(i);
    assert(colorCount[i]==curNumElementsWithColor);
    int * curColorLIDList = C2.ColorLIDList(i);
    if (curNumElementsWithColor==0) {
      assert(curColorLIDList==0);
    }
    else
      for (int j=0; j<curNumElementsWithColor; j++) assert(curColorLIDList[j]==colorLIDs[i][j]);
  }
  int curColor = 1;
  Epetra_Map * Map1 = C2.GenerateMap(curColor);
  Epetra_BlockMap * Map2 = C2.GenerateBlockMap(curColor);

  assert(Map1->NumMyElements()==colorCount[curColor]);
  assert(Map2->NumMyElements()==colorCount[curColor]);

  for (i=0; i<Map1->NumMyElements(); i++) {
    assert(Map1->GID64(i)==Map.GID64(colorLIDs[curColor][i]));
    assert(Map2->GID64(i)==Map.GID64(colorLIDs[curColor][i]));
    assert(Map2->ElementSize(i)==Map.ElementSize(colorLIDs[curColor][i]));
  }

  // Now test data redistribution capabilities


  Epetra_Map ContiguousMap(-1LL, Map.NumMyElements(), Map.IndexBase64(), Comm);
  // This vector contains the element sizes for the original map.
  Epetra_IntVector elementSizes(Copy, ContiguousMap, Map.ElementSizeList());
  Epetra_LongLongVector elementIDs(Copy, ContiguousMap, Map.MyGlobalElements64());
  Epetra_IntVector elementColorValues(Copy, ContiguousMap, C2.ElementColors());


  long long NumMyElements0 = 0;
  if (Comm.MyPID()==0) NumMyElements0 = Map.NumGlobalElements64();
  Epetra_Map CMap0(-1LL, NumMyElements0, Map.IndexBase64(), Comm);
  Epetra_Import importer(CMap0, ContiguousMap);
  Epetra_IntVector elementSizes0(CMap0);
  Epetra_LongLongVector elementIDs0(CMap0);
  Epetra_IntVector elementColorValues0(CMap0);
  elementSizes0.Import(elementSizes, importer, Insert);
  elementIDs0.Import(elementIDs, importer, Insert);
  elementColorValues0.Import(elementColorValues, importer, Insert);

  Epetra_BlockMap MapOnPE0(-1LL,NumMyElements0, elementIDs0.Values(), 
			   elementSizes0.Values(), Map.IndexBase64(), Comm);

  Epetra_Import importer1(MapOnPE0, Map);
  Epetra_MapColoring ColoringOnPE0(MapOnPE0);
  ColoringOnPE0.Import(C2, importer1, Insert);

  for (i=0; i<MapOnPE0.NumMyElements(); i++)
    assert(ColoringOnPE0[i]==elementColorValues0[i]);

  if (veryVerbose)
    std::cout << "Same Map Coloring on PE 0 only" << std::endl;
  if (veryVerbose1)
    std::cout <<  ColoringOnPE0 << std::endl;
  Epetra_MapColoring C3(Map);
  C3.Export(ColoringOnPE0, importer1, Insert);
  for (i=0; i<Map.NumMyElements(); i++) assert(C3[i]==C2[i]);
  if (veryVerbose)
    std::cout << "Same Map Coloring after Import/Export exercise" << std::endl;
  if (veryVerbose1)
    std::cout <<  ColoringOnPE0 << std::endl;
   
  
  if (verbose) std::cout << "Checked OK\n\n" << std::endl;

  if (verbose1) {
    if (verbose) std::cout << "Test ostream << operator" << std::endl << std::flush;
    std::cout << C0 << std::endl;
  }
	

  delete [] elementColors;
  for (i=0; i<maxcolor; i++) if (colorLIDs[i]!=0) delete [] colorLIDs[i];
  delete [] colorLIDs;
  delete [] colorCount;

  delete Map1;
  delete Map2;


#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return returnierr;
}
Example #14
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // initialize an Gallery object
  CrsMatrixGallery Gallery("laplace_2d", Comm, false); // CJ TODO FIXME: change for Epetra64
  Gallery.Set("problem_size", 100); //must be a square number
 
  // get pointers to the linear problem, containing matrix, LHS and RHS.
  // if you need to access them, you can for example uncomment the following
  // code:
  // Epetra_CrsMatrix* Matrix = Gallery.GetMatrix();
  // Epetra_MultiVector* LHS = Gallery.GetStartingSolution();
  // Epetra_MultiVector* RHS = Gallery.GetRHS();
  //
  // NOTE: StartingSolution and RHS are pointers to Gallery's internally stored
  // vectors. Using StartingSolution and RHS, we can verify the residual
  // after the solution of the linear system. However, users may define as well
  // their own vectors for solution and RHS. 
  
  Epetra_LinearProblem* Problem = Gallery.GetLinearProblem();

  // initialize Amesos solver:
  // `Solver' is the pointer to the Amesos solver
  // (note the use of the base class Amesos_BaseSolver)
  Amesos_BaseSolver* Solver;
  // Amesos_Factory is the function class used to create the solver.
  // This class contains no data.
  Amesos Amesos_Factory;

  // empty parameter list
  Teuchos::ParameterList List;
  
  // may also try: "Amesos_Umfpack", "Amesos_Lapack", ...
  string SolverType = "Amesos_Klu";
  
  Solver = Amesos_Factory.Create(SolverType, *Problem);
  // Amesos_Factory returns 0 is the selected solver is not
  // available
  assert (Solver);

  // start solving
  Solver->SymbolicFactorization();
  Solver->NumericFactorization();
  Solver->Solve();

  // verify that residual is really small  
  double residual, diff;

  Gallery.ComputeResidual(&residual);
  Gallery.ComputeDiffBetweenStartingAndExactSolutions(&diff);

  if( Comm.MyPID() == 0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
    cout << "||x_exact - x||_2 = " << diff << endl;
  }

  // delete Solver
  delete Solver;
    
  if (residual > 1e-5)
    exit(EXIT_FAILURE);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  exit(EXIT_SUCCESS);
}
TEUCHOS_UNIT_TEST(AndersonAcceleration, AA_Rosenbrock)
{
  int status = 0;

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  TEST_ASSERT(Comm.NumProc() == 1);

  ::Stratimikos::DefaultLinearSolverBuilder builder;

  Teuchos::RCP<Teuchos::ParameterList> p =
    Teuchos::rcp(new Teuchos::ParameterList);
  {
    p->set("Linear Solver Type", "AztecOO");
    //p->set("Preconditioner Type", "Ifpack");
    p->set("Preconditioner Type", "None");
    Teuchos::ParameterList& az = p->sublist("Linear Solver Types").sublist("AztecOO");
    az.sublist("Forward Solve").sublist("AztecOO Settings").set("Output Frequency", 1);
    az.sublist("VerboseObject").set("Verbosity Level", "high");
    Teuchos::ParameterList& ip = p->sublist("Preconditioner Types").sublist("Ifpack");
    ip.sublist("VerboseObject").set("Verbosity Level", "high");
  }

  builder.setParameterList(p);

  Teuchos::RCP< ::Thyra::LinearOpWithSolveFactoryBase<double> >
    lowsFactory = builder.createLinearSolveStrategy("");

  Teuchos::RCP<RosenbrockModelEvaluator> thyraModel =
    Teuchos::rcp(new RosenbrockModelEvaluator(Teuchos::rcp(&Comm,false)));

  thyraModel->set_W_factory(lowsFactory);

  // Create nox parameter list
  Teuchos::RCP<Teuchos::ParameterList> nl_params =
    Teuchos::rcp(new Teuchos::ParameterList);
  nl_params->set("Nonlinear Solver", "Anderson Accelerated Fixed-Point");
  nl_params->sublist("Anderson Parameters").set("Storage Depth", 2);
  nl_params->sublist("Anderson Parameters").set("Mixing Parameter", 1.0);
  nl_params->sublist("Anderson Parameters").set("Acceleration Start Iteration", 1);
  nl_params->sublist("Anderson Parameters").set("Adjust Matrix for Condition Number", false);
  nl_params->sublist("Anderson Parameters").sublist("Preconditioning").set("Precondition", false);

  Teuchos::ParameterList& printParams = nl_params->sublist("Printing");
  printParams.set("Output Information",
          NOX::Utils::OuterIteration +
          NOX::Utils::OuterIterationStatusTest +
          NOX::Utils::InnerIteration +
          NOX::Utils::LinearSolverDetails +
          NOX::Utils::Parameters +
          NOX::Utils::Details +
          NOX::Utils::Warning +
          NOX::Utils::Debug +
          NOX::Utils::TestDetails +
          NOX::Utils::Error);

  nl_params->sublist("Solver Options").set("Status Test Check Type", "Complete");

  // Enable row sum scaling
  nl_params->sublist("Thyra Group Options").set("Function Scaling", "Row Sum");

  // Create Status Tests
  {
    Teuchos::ParameterList& st = nl_params->sublist("Status Tests");
    st.set("Test Type", "Combo");
    st.set("Combo Type", "OR");
    st.set("Number of Tests", 3);

    {
      Teuchos::ParameterList& conv = st.sublist("Test 0");
      conv.set("Test Type", "Combo");
      conv.set("Combo Type", "AND");
      conv.set("Number of Tests", 2);

      Teuchos::ParameterList& normF_rel = conv.sublist("Test 0");
      normF_rel.set("Test Type", "NormF");
      normF_rel.set("Tolerance", 1.0e-8);

      Teuchos::ParameterList& normWRMS = conv.sublist("Test 1");
      normWRMS.set("Test Type", "NormWRMS");
      normWRMS.set("Absolute Tolerance", 1.0e-8);
      normWRMS.set("Relative Tolerance", 1.0e-5);
      normWRMS.set("Tolerance", 1.0);
      normWRMS.set("BDF Multiplier", 1.0);
      normWRMS.set("Alpha", 1.0);
      normWRMS.set("Beta", 0.5);
      normWRMS.set("Disable Implicit Weighting", true);
    }

    {
      Teuchos::ParameterList& fv = st.sublist("Test 1");
      fv.set("Test Type", "FiniteValue");
      fv.set("Vector Type", "F Vector");
      fv.set("Norm Type", "Two Norm");
    }

    {
      Teuchos::ParameterList& maxiters = st.sublist("Test 2");
      maxiters.set("Test Type", "MaxIters");
      maxiters.set("Maximum Iterations", 100);
    }

  }

  // Create a Thyra nonlinear solver
  Teuchos::RCP< ::Thyra::NonlinearSolverBase<double> > solver =
    Teuchos::rcp(new ::Thyra::NOXNonlinearSolver);

  solver->setParameterList(nl_params);
  solver->setModel(thyraModel);

  Teuchos::RCP< ::Thyra::VectorBase<double> >
    initial_guess = thyraModel->getNominalValues().get_x()->clone_v();

  ::Thyra::SolveCriteria<double> solve_criteria;
  ::Thyra::SolveStatus<double> solve_status;

  solve_status = solver->solve(initial_guess.get(), &solve_criteria);


  Teuchos::RCP< ::Thyra::NOXNonlinearSolver> thyra_nox_solver =
    Teuchos::rcp_dynamic_cast< ::Thyra::NOXNonlinearSolver>(solver);
  TEST_EQUALITY(thyra_nox_solver->getNOXSolver()->getNumIterations(), 6);

  Teuchos::RCP<const Epetra_Vector> x_analytic = thyraModel->get_analytic_solution();

  Teuchos::RCP<const NOX::Abstract::Vector> x = thyra_nox_solver->getNOXSolver()->getSolutionGroup().getXPtr();

  Teuchos::RCP<const NOX::Thyra::Vector> nox_thyra_x =
    Teuchos::rcp_dynamic_cast<const NOX::Thyra::Vector>(x,true);

  Teuchos::RCP<const Thyra::SpmdVectorBase<double> > spmd_x =
    Teuchos::rcp_dynamic_cast<const Thyra::SpmdVectorBase<double> >(nox_thyra_x->getThyraRCPVector(),true);

  Teuchos::ArrayRCP<const double> local_values;
  spmd_x->getLocalData(outArg(local_values));

  double tol = 1.0e-7;
  TEST_FLOATING_EQUALITY((*x_analytic)[0],local_values[0],tol);
  TEST_FLOATING_EQUALITY((*x_analytic)[1],local_values[1],tol);

  if (solve_status.solveStatus == ::Thyra::SOLVE_STATUS_CONVERGED)
    std::cout << "Test passed!" << std::endl;

//   std::cout << *p << std::endl;

  Teuchos::TimeMonitor::summarize();

  // Final return value (0 = successfull, non-zero = failure)
  TEST_ASSERT(status == 0);
}
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
    Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
    Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
    Epetra_SerialComm Comm;
#endif
    typedef double                            ST;
    typedef Teuchos::ScalarTraits<ST>        SCT;
    typedef SCT::magnitudeType                MT;
    typedef Epetra_MultiVector                MV;
    typedef Epetra_Operator                   OP;
    typedef Belos::MultiVecTraits<ST,MV>     MVT;
    typedef Belos::OperatorTraits<ST,MV,OP>  OPT;
    using Teuchos::RCP;
    using Teuchos::rcp;


    bool success = true;
    string pass = "******";
    string fail = "End Result: TEST FAILED";



    bool verbose = false, proc_verbose = true;
    bool leftprec = false;      // left preconditioning or right.
    int frequency = -1;        // frequency of status test output.
    int blocksize = 1;         // blocksize
    int numrhs = 1;            // number of right-hand sides to solve for
    int maxrestarts = 15;      // maximum number of restarts allowed
    int maxsubspace = 25;      // maximum number of blocks the solver can use
                               // for the subspace
    char file_name[100];

    int nProcs, myPID ;
    Teuchos::RCP <Teuchos::ParameterList> pLUList ;        // ParaLU parameters
    Teuchos::ParameterList isoList ;        // Isorropia parameters
    Teuchos::ParameterList shyLUList ;    // ShyLU parameters
    string ipFileName = "ShyLU.xml";       // TODO : Accept as i/p

#ifdef HAVE_MPI
    nProcs = mpiSession.getNProc();
    myPID = Comm.MyPID();
#else
    nProcs = 1;
    myPID = 0;
#endif

    if (myPID == 0)
    {
        cout <<"Parallel execution: nProcs="<< nProcs << endl;
    }

    // =================== Read input xml file =============================
    pLUList = Teuchos::getParametersFromXmlFile(ipFileName);
    isoList = pLUList->sublist("Isorropia Input");
    shyLUList = pLUList->sublist("ShyLU Input");
    shyLUList.set("Outer Solver Library", "Belos");
    // Get matrix market file name
    string MMFileName = Teuchos::getParameter<string>(*pLUList, "mm_file");
    string prec_type = Teuchos::getParameter<string>(*pLUList, "preconditioner");
    int maxiters = Teuchos::getParameter<int>(*pLUList, "Outer Solver MaxIters");
    MT tol = Teuchos::getParameter<double>(*pLUList, "Outer Solver Tolerance");
    string rhsFileName = pLUList->get<string>("rhs_file", "");


    int maxFiles = pLUList->get<int>("Maximum number of files to read in", 1);
    int startFile = pLUList->get<int>("Number of initial file", 1);
    int file_number = startFile;

    if (myPID == 0)
    {
        cout << "Input :" << endl;
        cout << "ParaLU params " << endl;
        pLUList->print(std::cout, 2, true, true);
        cout << "Matrix market file name: " << MMFileName << endl;
    }

    if (maxFiles > 1)
    {
        MMFileName += "%d.mm";
        sprintf( file_name, MMFileName.c_str(), file_number );
    }
    else
    {
        strcpy( file_name, MMFileName.c_str());
    }

    // ==================== Read input Matrix ==============================
    Epetra_CrsMatrix *A;
    Epetra_MultiVector *b1;

    int err = EpetraExt::MatrixMarketFileToCrsMatrix(file_name, Comm, A);
    if (err != 0 && myPID == 0)
      {
        cout << "Matrix file could not be read in!!!, info = "<< err << endl;
        success = false;
      }

    int n = A->NumGlobalRows();

    // ==================== Read input rhs  ==============================
    if (rhsFileName != "" && maxFiles > 1)
    {
        rhsFileName += "%d.mm";
        sprintf( file_name, rhsFileName.c_str(), file_number );
    }
    else
    {
        strcpy( file_name, rhsFileName.c_str());
    }

    Epetra_Map vecMap(n, 0, Comm);
    bool allOneRHS = false;
    if (rhsFileName != "")
    {
        err = EpetraExt::MatrixMarketFileToMultiVector(file_name, vecMap, b1);
    }
    else
    {
        b1 = new Epetra_MultiVector(vecMap, 1, false);
        b1->Random();
        allOneRHS = true;
    }

    Epetra_MultiVector x(vecMap, 1);

    // Partition the matrix with hypergraph partitioning and redisstribute
    Isorropia::Epetra::Partitioner *partitioner = new
                            Isorropia::Epetra::Partitioner(A, isoList, false);
    partitioner->partition();
    Isorropia::Epetra::Redistributor rd(partitioner);

    Epetra_CrsMatrix *newA;
    Epetra_MultiVector *newX, *newB;
    rd.redistribute(*A, newA);
    delete A;
    A = newA;
    RCP<Epetra_CrsMatrix> rcpA(A, false);

    rd.redistribute(x, newX);
    rd.redistribute(*b1, newB);
    delete b1;
    RCP<Epetra_MultiVector> rcpx (newX, false);
    RCP<Epetra_MultiVector> rcpb (newB, false);
    //OPT::Apply(*rcpA, *rcpx, *rcpb );


    Epetra_CrsMatrix *iterA = 0;
    Epetra_CrsMatrix *redistA = 0;
    Epetra_MultiVector *iterb1 = 0;
    Ifpack_Preconditioner *prec;
    ML_Epetra::MultiLevelPreconditioner *MLprec;
//#ifdef TIMING_OUTPUT
        Teuchos::Time ftime("solve time");
//#endif
    while(file_number < maxFiles+startFile)
    {

        if (prec_type.compare("ShyLU") == 0)
        {
            if (file_number == startFile)
            {
//#ifdef TIMING_OUTPUT
        ftime.start();
//#endif
                prec = new Ifpack_ShyLU(A);
#ifdef HAVE_IFPACK_DYNAMIC_FACTORY
                Teuchos::ParameterList shyluParameters;
                shyluParameters.set<Teuchos::ParameterList>("ShyLU list", shyLUList);
                prec->SetParameters(shyluParameters);
#else
                prec->SetParameters(shyLUList);
#endif
                prec->Initialize();
//#ifdef TIMING_OUTPUT
        ftime.stop();
//#endif
            }
//#ifdef TIMING_OUTPUT
        ftime.start();
//#endif
            prec->Compute();
//#ifdef TIMING_OUTPUT
        ftime.stop();
//#endif
            //cout << " Going to set it in solver" << endl ;
            //solver.SetPrecOperator(prec);
            //cout << " Done setting the solver" << endl ;
        }
        else if (prec_type.compare("ILU") == 0)
        {
            prec = new Ifpack_ILU(A);
            prec->Initialize();
            prec->Compute();
            //solver.SetPrecOperator(prec);
        }
        else if (prec_type.compare("ILUT") == 0)
        {
            prec = new Ifpack_ILUT(A);
            prec->Initialize();
            prec->Compute();
            //solver.SetPrecOperator(prec);
        }
        else if (prec_type.compare("ML") == 0)
        {
            Teuchos::ParameterList mlList; // TODO : Take it from i/p
            MLprec = new ML_Epetra::MultiLevelPreconditioner(*A, mlList, true);
            //solver.SetPrecOperator(MLprec);
        }

        RCP<Ifpack_Preconditioner> rcpPrec(prec, false);
        RCP<Belos::EpetraPrecOp> belosPrec = rcp(new Belos::EpetraPrecOp(rcpPrec));

        const int NumGlobalElements = rcpb->GlobalLength();
        Teuchos::ParameterList belosList;
         //belosList.set( "Flexible Gmres", true );
        belosList.set( "Num Blocks", maxsubspace );// Maximum number of blocks in Krylov factorization
        belosList.set( "Block Size", blocksize );  // Blocksize to be used by iterative solver
        belosList.set( "Maximum Iterations", maxiters ); // Maximum number of iterations allowed
        belosList.set( "Maximum Restarts", maxrestarts );// Maximum number of restarts allowed
        belosList.set( "Convergence Tolerance", tol );   // Relative convergence tolerance requested
        if (numrhs > 1) {
        belosList.set( "Show Maximum Residual Norm Only", true );  // Show only the maximum residual norm
        }
        if (verbose) {
        belosList.set( "Verbosity", Belos::Errors + Belos::Warnings +
               Belos::TimingDetails + Belos::StatusTestDetails );
        if (frequency > 0)
          belosList.set( "Output Frequency", frequency );
        }
        else
        belosList.set( "Verbosity", Belos::Errors + Belos::Warnings );
        //
        // *******Construct a preconditioned linear problem********
        //

        rcpx->PutScalar(0.0);
        RCP<Belos::LinearProblem<double,MV,OP> > problem
        = rcp( new Belos::LinearProblem<double,MV,OP>( rcpA, rcpx, rcpb ) );
        if (leftprec) {
        problem->setLeftPrec( belosPrec );
        }
        else {
        problem->setRightPrec( belosPrec );
        }
        bool set = problem->setProblem();
        if (set == false) {
        if (proc_verbose)
          {
          cout << endl << "ERROR:  Belos::LinearProblem failed to set up correctly!" << endl;
          }
          cout << fail << endl;
          success = false;
          return -1;
        }

        // Create an iterative solver manager.
        RCP< Belos::SolverManager<double,MV,OP> > solver
        = rcp( new Belos::BlockGmresSolMgr<double,MV,OP>(problem,
                rcp(&belosList,false)));

        //
        // *******************************************************************
        // *************Start the block Gmres iteration*************************
        // *******************************************************************
        //
        if (proc_verbose)
        {
            cout << std::endl << std::endl;
            cout << "Dimension of matrix: " << NumGlobalElements << endl;
            cout << "Number of right-hand sides: " << numrhs << endl;
            cout << "Block size used by solver: " << blocksize << endl;
            cout << "Number of restarts allowed: " << maxrestarts << endl;
            cout << "Max number of Gmres iterations per restart cycle: " <<
                        maxiters << endl;
            cout << "Relative residual tolerance: " << tol << endl;
            cout << endl;
        }

        if(tol > 1e-5)
          {
            success = false;
          }



        //
        // Perform solve
        //
//#ifdef TIMING_OUTPUT
        ftime.start();
//#endif
        // mfh 26 Mar 2015: Don't introduce a variable (like 'ret')
        // unless you plan to use it.  The commented-out code causes a
        // build warning.
        //
        //Belos::ReturnType ret = solver->solve();
        solver->solve ();
//#ifdef TIMING_OUTPUT
        ftime.stop();
//#endif
        //
        // Get the number of iterations for this solve.
        //
        int numIters = solver->getNumIters();
        if (proc_verbose)
        {
            cout << "Number of iterations performed for this solve: " <<
                     numIters << endl;
        }
        //
        // Compute actual residuals.
        //
        //bool badRes = false; // unused
        std::vector<double> actual_resids( numrhs );
        std::vector<double> rhs_norm( numrhs );
        Epetra_MultiVector resid((*rcpA).RowMap(), numrhs);
        OPT::Apply( *rcpA, *rcpx, resid );
        MVT::MvAddMv( -1.0, resid, 1.0, *rcpb, resid );
        MVT::MvNorm( resid, actual_resids );
        MVT::MvNorm( *rcpb, rhs_norm );
        if (proc_verbose)
        {
            cout<< "------ Actual Residuals (normalized) -------"<<endl;
            for ( int i=0; i<numrhs; i++)
            {
                double actRes = actual_resids[i]/rhs_norm[i];
                std::cout<<"Problem "<<i<<" : \t"<< actRes <<std::endl;
                if (actRes > tol) {
                  //badRes = true; // unused
                  success = false;
                }
            }
        }

        file_number++;
        if (file_number >= maxFiles+startFile)
        {
          break;
        }
        else
        {
            sprintf(file_name, MMFileName.c_str(), file_number);

            if (redistA != NULL) delete redistA;
            // Load the new matrix
            err = EpetraExt::MatrixMarketFileToCrsMatrix(file_name,
                            Comm, iterA);
            if (err != 0)
            {
                if (myPID == 0)
                  {
                    cout << "Could not open file: "<< file_name << endl;

                  }
                success = false;
            }
            else
            {
                rd.redistribute(*iterA, redistA);
                delete iterA;
                InitMatValues(*redistA, A);
            }

            // Load the new rhs
            if (!allOneRHS)
            {
                sprintf(file_name, rhsFileName.c_str(), file_number);

                if (iterb1 != NULL) delete iterb1;
                err = EpetraExt::MatrixMarketFileToMultiVector(file_name,
                        vecMap, b1);
                if (err != 0)
                {
                    if (myPID==0)
                      {
                        cout << "Could not open file: "<< file_name << endl;
                        success = false;
                      }
                }
                else
                {
                    rd.redistribute(*b1, iterb1);
                    delete b1;
                    InitMVValues( *iterb1, newB );
                }
            }
        }
    }
//#ifdef TIMING_OUTPUT
        cout << "Time to solve: " << ftime.totalElapsedTime() << endl;
        if(success)
          {
            cout << pass << endl;
          }
        else
          {
            cout << fail << endl;
          }

//#endif
    if (redistA != NULL) delete redistA;
    if (iterb1 != NULL) delete iterb1;


    if (prec_type.compare("ML") == 0)
    {
        delete MLprec;
    }
    else
    {
        delete prec;
    }
    delete newX;
    delete newB;
    delete A;
    delete partitioner;
}
Example #17
0
int main(int argc, char *argv[]) {

  int ierr=0, returnierr=0;

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;


  if (!verbose) {
    Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  }
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if (verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << Comm << endl;

  bool verbose1 = verbose;
  if (verbose) verbose = (MyPID==0);

  int NumMyElements = 10000;
  int NumMyElements1 = NumMyElements; // Used for local map
  long long NumGlobalElements = NumMyElements*NumProc+EPETRA_MIN(NumProc,3);
  if (MyPID < 3) NumMyElements++;
  int IndexBase = 0;
  bool DistributedGlobal = (NumGlobalElements>NumMyElements);
  
  Epetra_Map* Map;

  // Test exceptions

  if (verbose)
    cout << "*******************************************************************************************" << endl
	 << "        Testing Exceptions (Expect error messages if EPETRA_NO_ERROR_REPORTS is not defined" << endl
	 << "*******************************************************************************************" << endl
	 << endl << endl;

  try {
    if (verbose) cout << "Checking Epetra_Map(-2, IndexBase, Comm)" << endl;
    Map = new Epetra_Map((long long)-2, IndexBase, Comm);
  }
  catch (int Error) {
    if (Error!=-1) {
      if (Error!=0) {
	EPETRA_TEST_ERR(Error,returnierr);
	if (verbose) cout << "Error code should be -1" << endl;
      }
      else {
	cout << "Error code = " << Error << "Should be -1" << endl;
	returnierr+=1;
      }
    }
    else if (verbose) cout << "Checked OK\n\n" << endl;
  }

  try {
    if (verbose) cout << "Checking Epetra_Map(2, 3, IndexBase, Comm)" << endl;
    Map = new Epetra_Map((long long)2, 3, IndexBase, Comm);
  }
  catch (int Error) {
    if (Error!=-4) {
      if (Error!=0) {
	EPETRA_TEST_ERR(Error,returnierr);
	if (verbose) cout << "Error code should be -4" << endl;
      }
      else {
	cout << "Error code = " << Error << "Should be -4" << endl;
	returnierr+=1;
      }
    }
    else if (verbose) cout << "Checked OK\n\n" << endl;
  }

  if (verbose) cerr << flush;
  if (verbose) cout << flush;
  Comm.Barrier();
  if (verbose)
    cout << endl << endl
      << "*******************************************************************************************" << endl
      << "        Testing valid constructor now......................................................" << endl
      << "*******************************************************************************************" << endl
      << endl << endl;

  // Test Epetra-defined uniform linear distribution constructor
  Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm);
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl;
  ierr = checkmap(*Map, NumGlobalElements, NumMyElements, 0, 
		  IndexBase, Comm, DistributedGlobal);

  EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;

  delete Map;

  // Test User-defined linear distribution constructor
  Map = new Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm);

  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm)" << endl;
  ierr = checkmap(*Map, NumGlobalElements, NumMyElements, 0, 
		  IndexBase, Comm, DistributedGlobal);

  EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;
  delete Map;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  long long * MyGlobalElements = new long long[NumMyElements];
  int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (int i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, 
											 IndexBase, Comm);
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements,  IndexBase, Comm)" << endl;
  ierr = checkmap(*Map, NumGlobalElements, NumMyElements, MyGlobalElements, 
									IndexBase, Comm, DistributedGlobal);

  EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;
  // Test Copy constructor
  Epetra_Map* Map1 = new Epetra_Map(*Map);

  // Test SameAs() method
  bool same = Map1->SameAs(*Map);
  EPETRA_TEST_ERR(!(same==true),ierr);// should return true since Map1 is a copy of Map

  Epetra_BlockMap* Map2 = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements,  IndexBase, Comm);
  same = Map2->SameAs(*Map);
  EPETRA_TEST_ERR(!(same==true),ierr); // Map and Map2 were created with the same sets of parameters
  delete Map2;

  // now test SameAs() on a map that is different

  Map2 =  new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase-1, Comm);
  same = Map2->SameAs(*Map);
  EPETRA_TEST_ERR(!(same==false),ierr); // IndexBases are different
  delete Map2;

  // Back to testing copy constructor
  if (verbose) cout << "Checking Epetra_Map(*Map)" << endl;
  ierr = checkmap(*Map1, NumGlobalElements, NumMyElements, MyGlobalElements, 
		  IndexBase, Comm, DistributedGlobal);

  EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;
  Epetra_Map* SmallMap = 0;
  if (verbose1) {
    // Build a small map for test cout.  Use 10 elements from current map
    long long* MyEls = Map->MyGlobalElements64();
    int IndBase = Map->IndexBase();
    int MyLen = EPETRA_MIN(10+Comm.MyPID(),Map->NumMyElements());
    SmallMap = new Epetra_Map((long long)-1, MyLen, MyEls, IndBase, Comm);
  }

  delete [] MyGlobalElements;
  delete Map;
  delete Map1;

	// Test reference-counting in Epetra_Map
	if (verbose) cout << "Checking Epetra_Map reference counting" << endl;
	ierr = checkMapDataClass(Comm, verbose);
	EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;

  // Test LocalMap constructor
  Epetra_LocalMap* LocalMap = new Epetra_LocalMap((long long)NumMyElements1, IndexBase, Comm);
  if (verbose) cout << "Checking Epetra_LocalMap(NumMyElements1, IndexBase, Comm)" << endl;
  ierr = checkmap(*LocalMap, NumMyElements1, NumMyElements1, 0, IndexBase, Comm, false);

  EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;
  // Test Copy constructor
  Epetra_LocalMap* LocalMap1 = new Epetra_LocalMap(*LocalMap);
  if (verbose) cout << "Checking Epetra_LocalMap(*LocalMap)" << endl;
  ierr = checkmap(*LocalMap1, NumMyElements1, NumMyElements1, 0, IndexBase, Comm, false);

  EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;
  delete LocalMap1;
  delete LocalMap;

	// Test reference-counting in Epetra_LocalMap
	if (verbose) cout << "Checking Epetra_LocalMap reference counting" << endl;
	ierr = checkLocalMapDataClass(Comm, verbose);
	EPETRA_TEST_ERR(ierr,returnierr);
  if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl;

	// Test output
  if (verbose1) {
    if (verbose) cout << "Test ostream << operator" << endl << flush;
    cout << *SmallMap;
    delete SmallMap;
  }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return returnierr;
}
Example #18
0
int main(int argc, char *argv[])
{
  int ierr = 0;

  double nonlinear_factor = 1.0;
  double left_bc = 0.0;
  double right_bc = 0.40;

  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Get the number of elements from the command line
  int NumGlobalElements = 100 + 1;

  // The number of unknowns must be at least equal to the 
  // number of processors.
  if (NumGlobalElements < NumProc) {
    cout << "numGlobalBlocks = " << NumGlobalElements 
	 << " cannot be < number of processors = " << NumProc << endl;
    exit(1);
  }

  // Create the FiniteElementProblem class.  This creates all required
  // Epetra objects for the problem and allows calls to the 
  // function (RHS) and Jacobian evaluation routines.
  FiniteElementProblem Problem(NumGlobalElements, Comm);

  // Get the vector from the Problem
  Epetra_Vector& soln = Problem.getSolution();

  // Initialize Solution
  soln.PutScalar(0.1);

  // Create initial guess for the null vector of jacobian
  Teuchos::RCP<NOX::Abstract::Vector> solnTwo = 
    Teuchos::rcp(new NOX::Epetra::Vector(soln));  
  solnTwo->init(2.5);             // initial value 1.0
  
  // Begin LOCA Solver ************************************

  // Create parameter list
  Teuchos::RCP<Teuchos::ParameterList> paramList = 
    Teuchos::rcp(new Teuchos::ParameterList);

  // Create LOCA sublist
  Teuchos::ParameterList& locaParamsList = paramList->sublist("LOCA");

  // Create the stepper sublist and set the stepper parameters
  Teuchos::ParameterList& locaStepperList = locaParamsList.sublist("Stepper");
  locaStepperList.set("Continuation Method", "Natural");
  //locaStepperList.set("Bordered Solver Method", "Nested");
  //locaStepperList.set("Bordered Solver Method", "Householder");
  locaStepperList.set("Continuation Parameter", "Nonlinear Factor");
  locaStepperList.set("Initial Value", nonlinear_factor);
  locaStepperList.set("Max Value", 1.6);
  locaStepperList.set("Min Value", 0.00);
  locaStepperList.set("Max Steps", 20);
  locaStepperList.set("Max Nonlinear Iterations", 15);

  // Create bifurcation sublist
  Teuchos::ParameterList& bifurcationList = 
    locaParamsList.sublist("Bifurcation");
  bifurcationList.set("Type", "Phase Transition");
  bifurcationList.set("Bifurcation Parameter", "Right BC");

  bifurcationList.set("Second Solution Vector", solnTwo);
  
  // Create predictor sublist
  Teuchos::ParameterList& predictorList = locaParamsList.sublist("Predictor");
  predictorList.set("Method", "Secant");

  // Create step size sublist
  Teuchos::ParameterList& stepSizeList = locaParamsList.sublist("Step Size");
  stepSizeList.set("Method", "Constant");
  stepSizeList.set("Initial Step Size", 0.1);
  stepSizeList.set("Min Step Size", 1.0e-3);
  stepSizeList.set("Max Step Size", 2000.0);
  stepSizeList.set("Aggressiveness", 0.1);

  // Create the "Solver" parameters sublist to be used with NOX Solvers
  Teuchos::ParameterList& nlParams = paramList->sublist("NOX");

  // Create the NOX printing parameter list
  Teuchos::ParameterList& nlPrintParams = nlParams.sublist("Printing");
  nlPrintParams.set("MyPID", MyPID); 
  nlPrintParams.set("Output Precision", 6); 
  nlPrintParams.set("Output Information", 
		    NOX::Utils::OuterIteration + 
		    NOX::Utils::OuterIterationStatusTest + 
		    NOX::Utils::InnerIteration +
		    NOX::Utils::Details + 
		    NOX::Utils::LinearSolverDetails +
		    NOX::Utils::Warning + 
		    NOX::Utils::StepperIteration +
		    NOX::Utils::StepperDetails +
		    NOX::Utils::StepperParameters);

  // Create the "Linear Solver" sublist for Newton's method
  Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
  Teuchos::ParameterList& newParams = dirParams.sublist("Newton");
  Teuchos::ParameterList& lsParams = newParams.sublist("Linear Solver");
  lsParams.set("Aztec Solver", "GMRES");  
  lsParams.set("Max Iterations", 200);  
  lsParams.set("Tolerance", 1e-6);
  lsParams.set("Output Frequency", 50);    
  //lsParams.set("Scaling", "None");             
  //lsParams.set("Scaling", "Row Sum");  
  lsParams.set("Compute Scaling Manually", false);
  lsParams.set("Preconditioner", "Ifpack");
  lsParams.set("Ifpack Preconditioner", "ILU");

  //lsParams.set("Preconditioner", "New Ifpack");
  //Teuchos::ParameterList& ifpackParams = lsParams.sublist("Ifpack");
  //ifpackParams.set("fact: level-of-fill", 1);

  // Create and initialize the parameter vector
  LOCA::ParameterVector pVector;
  pVector.addParameter("Nonlinear Factor",nonlinear_factor);
  pVector.addParameter("Left BC", left_bc);
  pVector.addParameter("Right BC", right_bc);

  // Create the interface between the test problem and the nonlinear solver
  // This is created by the user using inheritance of the abstract base class:
  Teuchos::RCP<Problem_Interface> interface = 
    Teuchos::rcp(new Problem_Interface(Problem));
  Teuchos::RCP<LOCA::Epetra::Interface::TimeDependent> iReq = interface;
  Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface;
  
  // Create the Epetra_RowMatrixfor the Jacobian/Preconditioner
  Teuchos::RCP<Epetra_RowMatrix> Amat = 
    Teuchos::rcp(&Problem.getJacobian(),false);

  // Create scaling object
  Teuchos::RCP<NOX::Epetra::Scaling> scaling = Teuchos::null;
//   scaling = Teuchos::rcp(new NOX::Epetra::Scaling);
//   Teuchos::RCP<Epetra_Vector> scalingVector = 
//     Teuchos::rcp(new Epetra_Vector(soln.Map()));
//   //scaling->addRowSumScaling(NOX::Epetra::Scaling::Left, scalingVector);
//   scaling->addColSumScaling(NOX::Epetra::Scaling::Right, scalingVector);

  // Create transpose scaling object
  Teuchos::RCP<NOX::Epetra::Scaling> trans_scaling = Teuchos::null;
//   trans_scaling = Teuchos::rcp(new NOX::Epetra::Scaling);
//   Teuchos::RCP<Epetra_Vector> transScalingVector = 
//     Teuchos::rcp(new Epetra_Vector(soln.Map()));
//   trans_scaling->addRowSumScaling(NOX::Epetra::Scaling::Right, 
// 				  transScalingVector);
//   trans_scaling->addColSumScaling(NOX::Epetra::Scaling::Left, 
// 				  transScalingVector);
  //bifurcationList.set("Transpose Scaling", trans_scaling);

  // Create the linear systems
  Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linsys = 
    Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(nlPrintParams, lsParams,
						      iReq, iJac, Amat, soln,
						      scaling));

  // Create the loca vector
  NOX::Epetra::Vector locaSoln(soln);

  // Create Epetra factory
  Teuchos::RCP<LOCA::Abstract::Factory> epetraFactory =
    Teuchos::rcp(new LOCA::Epetra::Factory);

  // Create global data object
  Teuchos::RCP<LOCA::GlobalData> globalData = 
    LOCA::createGlobalData(paramList, epetraFactory);

  // Create the Group
  Teuchos::RCP<LOCA::Epetra::Group> grp = 
    Teuchos::rcp(new LOCA::Epetra::Group(globalData, nlPrintParams, iReq, 
					 locaSoln, linsys, linsys,
					 pVector));

  // Inject FreeEnergy interface into the group
  Teuchos::RCP<LOCA::Epetra::Interface::FreeEnergy> iFE = interface;
  grp->setFreeEnergyInterface(iFE);

  grp->computeF();

  // Create the Solver convergence test
  //NOX::StatusTest::NormWRMS wrms(1.0e-2, 1.0e-8);
  Teuchos::RCP<NOX::StatusTest::NormF> wrms = 
    Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-12));
  Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = 
    Teuchos::rcp(new NOX::StatusTest::MaxIters(locaStepperList.get("Max Nonlinear Iterations", 10)));
  Teuchos::RCP<NOX::StatusTest::Combo> combo = 
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
  combo->addStatusTest(wrms);
  combo->addStatusTest(maxiters);
  
  // Create the stepper  
  LOCA::Stepper stepper(globalData, grp, combo, paramList);
  LOCA::Abstract::Iterator::IteratorStatus status = stepper.run();
  
  if (status == LOCA::Abstract::Iterator::Finished) 
    globalData->locaUtils->out() << "All tests passed" << endl;
  else {
    if (globalData->locaUtils->isPrintType(NOX::Utils::Error))
      globalData->locaUtils->out() 
	<< "Stepper failed to converge!" << std::endl;
  }

  // Output the parameter list
  if (globalData->locaUtils->isPrintType(NOX::Utils::StepperParameters)) {
    globalData->locaUtils->out() 
      << std::endl << "Final Parameters" << std::endl
      << "****************" << std::endl;
    stepper.getList()->print(globalData->locaUtils->out());
    globalData->locaUtils->out() << std::endl;
  }

  LOCA::destroyGlobalData(globalData);

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

/* end main
*/
return ierr ;
}
// ====================================================================== 
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  verbose = (Comm.MyPID() == 0);

  for (int i = 1 ; i < argc ; ++i) {
    if (strcmp(argv[i],"-s") == 0) {
      SymmetricGallery = true;
      Solver = AZ_cg;
    }
  }

  // size of the global matrix. 
  Teuchos::ParameterList GaleriList;
  int nx = 30; 
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A;
  if (SymmetricGallery)
    A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  else
    A = Teuchos::rcp( Galeri::CreateCrsMatrix("Recirc2D", &*Map, GaleriList) );

  // coordinates
  Teuchos::RCP<Epetra_MultiVector> coord = Teuchos::rcp( Galeri::CreateCartesianCoordinates("2D",&*Map,GaleriList));

  // test the preconditioner
  int TestPassed = true;

  // ======================================== //
  // first verify that we can get convergence //
  // with all point relaxation methods        //
  // ======================================== //

  if(!BasicTest("Jacobi",A,false))
    TestPassed = false;

  if(!BasicTest("symmetric Gauss-Seidel",A,false))
    TestPassed = false;

  if(!BasicTest("symmetric Gauss-Seidel",A,false,true))
    TestPassed = false;

  if (!SymmetricGallery) {
    if(!BasicTest("Gauss-Seidel",A,false))
      TestPassed = false;
    if(!BasicTest("Gauss-Seidel",A,true))
      TestPassed = false;  

    if(!BasicTest("Gauss-Seidel",A,false,true))
      TestPassed = false;
    if(!BasicTest("Gauss-Seidel",A,true,true))
      TestPassed = false;  

  }

  // ============================= //
  // check uses as preconditioners //
  // ============================= //
  
  if(!KrylovTest("symmetric Gauss-Seidel",A,false))
    TestPassed = false;

  if(!KrylovTest("symmetric Gauss-Seidel",A,false,true))
    TestPassed = false;


  if (!SymmetricGallery) {
    if(!KrylovTest("Gauss-Seidel",A,false))
      TestPassed = false;
    if(!KrylovTest("Gauss-Seidel",A,true))
      TestPassed = false;

    if(!KrylovTest("Gauss-Seidel",A,false,true))
      TestPassed = false;
    if(!KrylovTest("Gauss-Seidel",A,true,true))
      TestPassed = false;

  }

  // ================================== //
  // compare point and block relaxation //
  // ================================== //

  //TestPassed = TestPassed && 
   // ComparePointAndBlock("Jacobi",A,1);

  TestPassed = TestPassed && 
    ComparePointAndBlock("Jacobi",A,10);

  //TestPassed = TestPassed && 
    //ComparePointAndBlock("symmetric Gauss-Seidel",A,1);

  TestPassed = TestPassed && 
    ComparePointAndBlock("symmetric Gauss-Seidel",A,10);

  if (!SymmetricGallery) {
    //TestPassed = TestPassed && 
      //ComparePointAndBlock("Gauss-Seidel",A,1);

    TestPassed = TestPassed && 
      ComparePointAndBlock("Gauss-Seidel",A,10);
  }

  // ============================ //
  // verify effect of # of blocks //
  // ============================ //
  
  {
    int Iters4, Iters8, Iters16;
    Iters4 = CompareBlockSizes("Jacobi",A,4);
    Iters8 = CompareBlockSizes("Jacobi",A,8);
    Iters16 = CompareBlockSizes("Jacobi",A,16);

    if ((Iters16 > Iters8) && (Iters8 > Iters4)) {
      if (verbose)
        cout << "CompareBlockSizes Test passed" << endl;
    }
    else {
      if (verbose) 
        cout << "CompareBlockSizes TEST FAILED!" << endl;
      TestPassed = TestPassed && false;
    }
  }

  // ================================== //
  // verify effect of overlap in Jacobi //
  // ================================== //

  {
    int Iters0, Iters2, Iters4;
    Iters0 = CompareBlockOverlap(A,0);
    Iters2 = CompareBlockOverlap(A,2);
    Iters4 = CompareBlockOverlap(A,4);
    if ((Iters4 < Iters2) && (Iters2 < Iters0)) {
      if (verbose)
        cout << "CompareBlockOverlap Test passed" << endl;
    }
    else {
      if (verbose) 
        cout << "CompareBlockOverlap TEST FAILED!" << endl;
      TestPassed = TestPassed && false;
    }
  }

  // ================================== //
  // check if line smoothing works      //
  // ================================== //
  {
    int Iters1=
    CompareLineSmoother(A,coord);    
    printf(" comparelinesmoother iters %d \n",Iters1);
  }				
 // ================================== //
  // check if All singleton version of CompareLineSmoother    //
  // ================================== //
  {

    AllSingle(A,coord);    

  }				

  // ================================== //
  // test variable blocking             //
  // ================================== //
  {
    TestPassed = TestPassed && TestVariableBlocking(A->Comm());
  }

  // ================================== //
  // test variable blocking             //
  // ================================== //
  {
    TestPassed = TestPassed && TestTriDiVariableBlocking(A->Comm());
  }


  // ============ //
  // final output //
  // ============ //

  if (!TestPassed) {
    cout << "Test `TestRelaxation.exe' failed!" << endl;
    exit(EXIT_FAILURE);
  }
  
#ifdef HAVE_MPI
  MPI_Finalize(); 
#endif

  cout << endl;
  cout << "Test `TestRelaxation.exe' passed!" << endl;
  cout << endl;
  return(EXIT_SUCCESS);
}
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  Teuchos::GlobalMPISession mpiSession(&argc, &argv,0);

  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int commRank = Teuchos::GlobalMPISession::getRank();

  Comm.Barrier(); // set breakpoint here to allow debugger attachment to other MPI processes than the one you automatically attached to.

  Teuchos::CommandLineProcessor cmdp(false,true); // false: don't throw exceptions; true: do return errors for unrecognized options

  // problem parameters:
  int spaceDim = 2;
  double Re = 40;
  bool steady = false;
  string problemChoice = "TaylorGreen";
  int numRefs = 1;
  int p = 2, delta_p = 2;
  int numXElems = 1;
  int numTElems = 1;
  int numSlabs = 1;
  bool useConformingTraces = false;
  string solverChoice = "KLU";
  string multigridStrategyString = "W-cycle";
  bool useCondensedSolve = false;
  bool useConjugateGradient = true;
  bool logFineOperator = false;
  // double solverTolerance = 1e-8;
  double nonlinearTolerance = 1e-5;
  // int maxLinearIterations = 10000;
  int maxNonlinearIterations = 20;
  int cgMaxIterations = 10000;
  double cgTol = 1e-8;
  bool computeL2Error = false;
  bool exportSolution = false;
  bool saveSolution = false;
  bool loadSolution = false;
  int loadRef = 0;
  int loadDirRef = 0;
  string norm = "Graph";
  string rootDir = ".";
  string tag="";
  cmdp.setOption("spaceDim", &spaceDim, "spatial dimension");
  cmdp.setOption("Re", &Re, "Re");
  cmdp.setOption("steady", "transient", &steady, "use steady incompressible Navier-Stokes");
  cmdp.setOption("problem", &problemChoice, "Kovasznay, TaylorGreen");
  cmdp.setOption("polyOrder",&p,"polynomial order for field variable u");
  cmdp.setOption("delta_p", &delta_p, "test space polynomial order enrichment");
  cmdp.setOption("numRefs",&numRefs,"number of refinements");
  cmdp.setOption("numXElems",&numXElems,"number of elements in x direction");
  cmdp.setOption("numTElems",&numTElems,"number of elements in t direction");
  cmdp.setOption("numSlabs",&numSlabs,"number of time slabs to use");
  cmdp.setOption("norm", &norm, "norm");
  cmdp.setOption("conformingTraces", "nonconformingTraces", &useConformingTraces, "use conforming traces");
  cmdp.setOption("solver", &solverChoice, "KLU, SuperLU, MUMPS, GMG-Direct, GMG-ILU, GMG-IC");
  cmdp.setOption("multigridStrategy", &multigridStrategyString, "Multigrid strategy: V-cycle, W-cycle, Full, or Two-level");
  cmdp.setOption("useCondensedSolve", "useStandardSolve", &useCondensedSolve);
  cmdp.setOption("CG", "GMRES", &useConjugateGradient);
  cmdp.setOption("logFineOperator", "dontLogFineOperator", &logFineOperator);
  // cmdp.setOption("solverTolerance", &solverTolerance, "iterative solver tolerance");
  cmdp.setOption("nonlinearTolerance", &nonlinearTolerance, "nonlinear solver tolerance");
  // cmdp.setOption("maxLinearIterations", &maxLinearIterations, "maximum number of iterations for linear solver");
  cmdp.setOption("maxNonlinearIterations", &maxNonlinearIterations, "maximum number of iterations for Newton solver");
  cmdp.setOption("exportDir", &rootDir, "export directory");
  cmdp.setOption("computeL2Error", "skipL2Error", &computeL2Error, "compute L2 error");
  cmdp.setOption("exportSolution", "skipExport", &exportSolution, "export solution to HDF5");
  cmdp.setOption("saveSolution", "skipSave", &saveSolution, "save mesh and solution to HDF5");
  cmdp.setOption("loadSolution", "skipLoad", &loadSolution, "load mesh and solution from HDF5");
  cmdp.setOption("loadRef", &loadRef, "load refinement number");
  cmdp.setOption("loadDirRef", &loadDirRef, "which refinement directory to load from");
  cmdp.setOption("tag", &tag, "output tag");

  if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL)
  {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return -1;
  }

  map<string, Teuchos::RCP<IncompressibleProblem>> problems;
  problems["ManufacturedSolution"] = Teuchos::rcp(new IncompressibleManufacturedSolution(steady, Re, numXElems));
  problems["Kovasznay"] = Teuchos::rcp(new KovasznayProblem(steady, Re));
  problems["TaylorGreen"] = Teuchos::rcp(new TaylorGreenProblem(steady, Re, numXElems, numSlabs));
  problems["Cylinder"] = Teuchos::rcp(new CylinderProblem(steady, Re, numSlabs));
  problems["SquareCylinder"] = Teuchos::rcp(new SquareCylinderProblem(steady, Re, numSlabs));
  Teuchos::RCP<IncompressibleProblem> problem = problems.at(problemChoice);

  // if (commRank == 0)
  // {
  //   Solver::printAvailableSolversReport();
  //   cout << endl;
  // }
  Teuchos::RCP<Time> totalTimer = Teuchos::TimeMonitor::getNewCounter("Total Time");
  totalTimer->start(true);

  for (; problem->currentStep() < problem->numSlabs(); problem->advanceStep())
  {
    if (problem->numSlabs() > 1 && commRank == 0 && !steady)
      cout << "Solving time slab [" << problem->currentT0() << ", " << problem->currentT1() << "]" << endl;

    ostringstream problemName;
    string isSteady = "Steady";
    if (!steady)
      isSteady = "Transient";
    problemName << isSteady << problemChoice << spaceDim << "D_slab" << problem->currentStep() << "_" << norm << "_" << Re << "_p" << p << "_" << solverChoice;
    if (tag != "")
      problemName << "_" << tag;
    ostringstream saveDir;
    saveDir << problemName.str() << "_ref" << loadRef;

    int success = mkdir((rootDir+"/"+saveDir.str()).c_str(), S_IRWXU | S_IRWXG);

    string dataFileLocation = rootDir + "/" + saveDir.str() + "/" + saveDir.str() + ".data";
    string exportName = saveDir.str();

    ostringstream loadDir;
    loadDir << problemName.str() << "_ref" << loadDirRef;
    string loadFilePrefix = "";
    if (loadSolution)
    {
      loadFilePrefix = rootDir + "/" + loadDir.str() + "/" + saveDir.str();
      if (commRank == 0) cout << "Loading previous solution " << loadFilePrefix << endl;
    }
    // ostringstream saveDir;
    // saveDir << problemName.str() << "_ref" << loadRef;
    string saveFilePrefix = rootDir + "/" + saveDir.str() + "/" + problemName.str();
    if (saveSolution && commRank == 0) cout << "Saving to " << saveFilePrefix << endl;

    Teuchos::ParameterList parameters;
    parameters.set("spaceDim", spaceDim);
    parameters.set("steady", steady);
    parameters.set("mu", 1./Re);
    parameters.set("useConformingTraces", useConformingTraces);
    parameters.set("fieldPolyOrder", p);
    parameters.set("delta_p", delta_p);
    parameters.set("numTElems", numTElems);
    parameters.set("norm", norm);
    parameters.set("savedSolutionAndMeshPrefix", loadFilePrefix);
    SpaceTimeIncompressibleFormulationPtr form = Teuchos::rcp(new SpaceTimeIncompressibleFormulation(problem, parameters));

    MeshPtr mesh = form->solutionUpdate()->mesh();
    vector<MeshPtr> meshesCoarseToFine;
    MeshPtr k0Mesh = Teuchos::rcp( new Mesh (mesh->getTopology()->deepCopy(), form->bf(), 1, delta_p) );
    meshesCoarseToFine.push_back(k0Mesh);
    meshesCoarseToFine.push_back(mesh);
    // mesh->registerObserver(k0Mesh);

    // Set up boundary conditions
    problem->setBCs(form);

    // Set up solution
    SolutionPtr solutionUpdate = form->solutionUpdate();
    SolutionPtr solutionBackground = form->solutionBackground();
    // dynamic_cast<AnalyticalIncompressibleProblem*>(problem.get())->projectExactSolution(solutionBackground);

    RefinementStrategyPtr refStrategy = form->getRefinementStrategy();
    Teuchos::RCP<HDF5Exporter> exporter;
    if (exportSolution)
      exporter = Teuchos::rcp(new HDF5Exporter(mesh,exportName, rootDir));

    Teuchos::RCP<Time> solverTime = Teuchos::TimeMonitor::getNewCounter("Solve Time");
    map<string, SolverPtr> solvers;
    solvers["KLU"] = Solver::getSolver(Solver::KLU, true);
#if defined(HAVE_AMESOS_SUPERLUDIST) || defined(HAVE_AMESOS2_SUPERLUDIST)
    solvers["SuperLUDist"] = Solver::getSolver(Solver::SuperLUDist, true);
#endif
#ifdef HAVE_AMESOS_MUMPS
    solvers["MUMPS"] = Solver::getSolver(Solver::MUMPS, true);
#endif
    bool useStaticCondensation = false;

    GMGOperator::MultigridStrategy multigridStrategy;
    if (multigridStrategyString == "Two-level")
    {
      multigridStrategy = GMGOperator::TWO_LEVEL;
    }
    else if (multigridStrategyString == "W-cycle")
    {
      multigridStrategy = GMGOperator::W_CYCLE;
    }
    else if (multigridStrategyString == "V-cycle")
    {
      multigridStrategy = GMGOperator::V_CYCLE;
    }
    else if (multigridStrategyString == "Full-V")
    {
      multigridStrategy = GMGOperator::FULL_MULTIGRID_V;
    }
    else if (multigridStrategyString == "Full-W")
    {
      multigridStrategy = GMGOperator::FULL_MULTIGRID_W;
    }
    else
    {
      TEUCHOS_TEST_FOR_EXCEPTION(true, std::invalid_argument, "unrecognized multigrid strategy");
    }

    ofstream dataFile(dataFileLocation);
    dataFile << "ref\t " << "elements\t " << "dofs\t " << "energy\t " << "l2\t " << "solvetime\t" << "iterations\t " << endl;

    // {
    //   // ostringstream saveFile;
    //   // saveFile << saveFilePrefix << "_ref" << -1;
    //   // form->save(saveFile.str());
    //   exporter->exportSolution(solutionBackground, -1);
    //   if (commRank == 0)
    //     cout << "Done exporting" << endl;
    // }

    for (int refIndex=loadRef; refIndex <= numRefs; refIndex++)
    {
      double l2Update = 1e10;
      int iterCount = 0;
      solverTime->start(true);
      Teuchos::RCP<GMGSolver> gmgSolver;
      if (solverChoice[0] == 'G')
      {
        // gmgSolver = Teuchos::rcp( new GMGSolver(solutionUpdate, k0Mesh, maxLinearIterations, solverTolerance, Solver::getDirectSolver(true), useStaticCondensation));
        bool reuseFactorization = true;
        SolverPtr coarseSolver = Solver::getDirectSolver(reuseFactorization);
        gmgSolver = Teuchos::rcp(new GMGSolver(solutionUpdate, meshesCoarseToFine, cgMaxIterations, cgTol, multigridStrategy, coarseSolver, useCondensedSolve));
        gmgSolver->setUseConjugateGradient(useConjugateGradient);
        int azOutput = 20; // print residual every 20 CG iterations
        gmgSolver->setAztecOutput(azOutput);
        gmgSolver->gmgOperator()->setNarrateOnRankZero(logFineOperator,"finest GMGOperator");

        // gmgSolver->setAztecOutput(azOutput);
        // if (solverChoice == "GMG-Direct")
        //   gmgSolver->gmgOperator()->setSchwarzFactorizationType(GMGOperator::Direct);
        // if (solverChoice == "GMG-ILU")
        //   gmgSolver->gmgOperator()->setSchwarzFactorizationType(GMGOperator::ILU);
        // if (solverChoice == "GMG-IC")
        //   gmgSolver->gmgOperator()->setSchwarzFactorizationType(GMGOperator::IC);
      }
      while (l2Update > nonlinearTolerance && iterCount < maxNonlinearIterations)
      {
        if (solverChoice[0] == 'G')
          solutionUpdate->solve(gmgSolver);
        else
          solutionUpdate->condensedSolve(solvers[solverChoice]);

        // Compute L2 norm of update
        double u1L2Update = solutionUpdate->L2NormOfSolutionGlobal(form->u(1)->ID());
        double u2L2Update = solutionUpdate->L2NormOfSolutionGlobal(form->u(2)->ID());
        l2Update = sqrt(u1L2Update*u1L2Update + u2L2Update*u2L2Update);
        if (commRank == 0)
          cout << "Nonlinear Update:\t " << l2Update << endl;

        form->updateSolution();
        iterCount++;
      }
      double solveTime = solverTime->stop();

      double energyError = solutionUpdate->energyErrorTotal();
      double l2Error = 0;
      if (computeL2Error)
      {
        l2Error = problem->computeL2Error(form, solutionBackground);
      }
      if (commRank == 0)
      {
        cout << "Refinement: " << refIndex
          << " \tElements: " << mesh->numActiveElements()
          << " \tDOFs: " << mesh->numGlobalDofs()
          << " \tEnergy Error: " << energyError
          << " \tL2 Error: " << l2Error
          << " \tSolve Time: " << solveTime
          << " \tTotal Time: " << totalTimer->totalElapsedTime(true)
          // << " \tIteration Count: " << iterationCount
          << endl;
        dataFile << refIndex
          << " " << mesh->numActiveElements()
          << " " << mesh->numGlobalDofs()
          << " " << energyError
          << " " << l2Error
          << " " << solveTime
          << " " << totalTimer->totalElapsedTime(true)
          // << " " << iterationCount
          << endl;
      }

      if (exportSolution)
        exporter->exportSolution(solutionBackground, refIndex);

      if (saveSolution)
      {
        ostringstream saveFile;
        saveFile << saveFilePrefix << "_ref" << refIndex;
        form->save(saveFile.str());
      }

      if (refIndex != numRefs)
      {
        // k0Mesh = Teuchos::rcp( new Mesh (mesh->getTopology()->deepCopy(), form->bf(), 1, delta_p) );
        // meshesCoarseToFine.push_back(k0Mesh);
        refStrategy->refine();
        meshesCoarseToFine.push_back(mesh);
      }
    }
    dataFile.close();
  }
  double totalTime = totalTimer->stop();
  if (commRank == 0)
    cout << "Total time = " << totalTime << endl;

  return 0;
}
int main(int argc, char *argv[])
{
 
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Check verbosity level
  bool verbose = false;
  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else 
    NumGlobalElements = 101;

  // The number of unknowns must be at least equal to the 
  // number of processors.
  if (NumGlobalElements < NumProc) {
    std::cout << "numGlobalBlocks = " << NumGlobalElements 
	 << " cannot be < number of processors = " << NumProc << std::endl;
    std::cout << "Test failed!" << std::endl;
    throw "NOX Error";
  }

  // Create the interface between NOX and the application
  // This object is derived from NOX::Epetra::Interface
  Teuchos::RCP<Interface> interface = 
    Teuchos::rcp(new Interface(NumGlobalElements, Comm));

  // Set the PDE factor (for nonlinear forcing term).  This could be specified
  // via user input.
  interface->setPDEfactor(100000.0);

  // Use a scaled vector space.  The scaling must also be registered
  // with the linear solver so the linear system is consistent!
  Teuchos::RCP<Epetra_Vector> scaleVec = 
    Teuchos::rcp(new Epetra_Vector( *(interface->getSolution())));
  scaleVec->PutScalar(2.0);
  Teuchos::RCP<NOX::Epetra::Scaling> scaling = 
    Teuchos::rcp(new NOX::Epetra::Scaling);
  scaling->addUserScaling(NOX::Epetra::Scaling::Left, scaleVec);

  // Use a weighted vector space for scaling all norms
  Teuchos::RCP<NOX::Epetra::VectorSpace> weightedVectorSpace = 
    Teuchos::rcp(new NOX::Epetra::VectorSpaceScaledL2(scaling));

  // Get the vector from the Problem
  Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
  Teuchos::RCP<NOX::Epetra::Vector> noxSoln = 
    Teuchos::rcp(new NOX::Epetra::Vector(soln, 
					 NOX::Epetra::Vector::CreateCopy,
					 NOX::DeepCopy,
					 weightedVectorSpace));

  // Initial Guess 
  noxSoln->init(2.0);

  // Begin Nonlinear Solver ************************************

  // Create the top level parameter list
  Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
    Teuchos::rcp(new Teuchos::ParameterList);
  Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

  // Set the nonlinear solver method
  nlParams.set("Nonlinear Solver", "Inexact Trust Region Based");
  nlParams.sublist("Trust Region").
    set("Inner Iteration Method", "Inexact Trust Region");

  // Set the printing parameters in the "Printing" sublist
  Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
  
  // RPP: Commenting this line out.  There is now a default for MPI
  // specific builds.  We are testing that it works here.
  // //printParams.set("MyPID", MyPID);

  printParams.set("Output Precision", 3);
  printParams.set("Output Processor", 0);
  if (verbose)
    printParams.set("Output Information", 
			     NOX::Utils::OuterIteration + 
			     NOX::Utils::OuterIterationStatusTest + 
			     NOX::Utils::InnerIteration +
			     NOX::Utils::LinearSolverDetails +
			     NOX::Utils::Parameters + 
			     NOX::Utils::Details + 
			     NOX::Utils::Warning +
                             NOX::Utils::Debug +
			     NOX::Utils::TestDetails +
			     NOX::Utils::Error);
  else
    printParams.set("Output Information", NOX::Utils::Error +
			     NOX::Utils::TestDetails);

  // Create a print class for controlling output below
  NOX::Utils printing(printParams);

  // Sublist for direction
  Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
  dirParams.set("Method", "Newton");
  Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Type 1");

  // Sublist for linear solver for the Newton method
  Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
  lsParams.set("Aztec Solver", "GMRES");  
  lsParams.set("Max Iterations", 800);  
  lsParams.set("Tolerance", 1e-4);

  // Various Preconditioner options
  //lsParams.set("Preconditioner", "AztecOO");
  lsParams.set("Preconditioner", "Ifpack");
  lsParams.set("Preconditioner Reuse Policy", "Rebuild");

  // Sublist for Cauchy direction
  Teuchos::ParameterList& cauchyDirParams = nlParams.sublist("Cauchy Direction");
  cauchyDirParams.set("Method", "Steepest Descent");
  Teuchos::ParameterList& sdParams = cauchyDirParams.sublist("Steepest Descent");
  sdParams.set("Scaling Type", "Quadratic Model Min");

  // Add a user defined pre/post operator object
  Teuchos::RCP<NOX::Abstract::PrePostOperator> ppo =
    Teuchos::rcp(new UserPrePostOperator(printing));
  nlParams.sublist("Solver Options").set("User Defined Pre/Post Operator", 
					 ppo);

  // Let's force all status tests to do a full check
  nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete");

  // User supplied (Epetra_RowMatrix)
  Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();

  // Create the linear system
  Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
  Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface;
  Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = 
    Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
						      iReq,
						      iJac, Analytic, 
						      *noxSoln,
						      scaling));
  
  // Create the Group
  Teuchos::RCP<NOX::Epetra::Group> grpPtr = 
    Teuchos::rcp(new NOX::Epetra::Group(printParams, 
					iReq, 
					*noxSoln, 
					linSys));  
  NOX::Epetra::Group& grp = *grpPtr;

  // uncomment the following for loca supergroups
  //MF->setGroupForComputeF(*grpPtr);
  //FD->setGroupForComputeF(*grpPtr);

  // Create the convergence tests
  Teuchos::RCP<NOX::StatusTest::NormF> absresid = 
    Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
  Teuchos::RCP<NOX::StatusTest::NormF> relresid = 
    Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
  Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
    Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
  Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
    Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
  Teuchos::RCP<NOX::StatusTest::Combo> converged =
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
  converged->addStatusTest(absresid);
  converged->addStatusTest(relresid);
  converged->addStatusTest(wrms);
  converged->addStatusTest(update);
  Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = 
    Teuchos::rcp(new NOX::StatusTest::MaxIters(200));
  Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
    Teuchos::rcp(new NOX::StatusTest::FiniteValue);
  Teuchos::RCP<NOX::StatusTest::Combo> combo = 
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
  combo->addStatusTest(fv);
  combo->addStatusTest(converged);
  combo->addStatusTest(maxiters);

  // Create the solver
  Teuchos::RCP<NOX::Solver::Generic> solver = 
    NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);
  NOX::StatusTest::StatusType solvStatus = solver->solve();

  // End Nonlinear Solver **************************************

  // Get the Epetra_Vector with the final solution from the solver
  const NOX::Epetra::Group& finalGroup = 
    dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
  const Epetra_Vector& finalSolution = 
    (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).
    getEpetraVector();

  // Output the parameter list
  if (verbose) {
    if (printing.isPrintType(NOX::Utils::Parameters)) {
      printing.out() << std::endl << "Final Parameters" << std::endl
	   << "****************" << std::endl;
      solver->getList().print(printing.out());
      printing.out() << std::endl;
    }
  }

  // Print solution
  char file_name[25];
  FILE *ifp;
  int NumMyElements = soln->Map().NumMyElements();
  (void) sprintf(file_name, "output.%d",MyPID);
  ifp = fopen(file_name, "w");
  for (int i=0; i<NumMyElements; i++)
    fprintf(ifp, "%d  %E\n", soln->Map().MinMyGID()+i, finalSolution[i]);
  fclose(ifp);


  // Tests
  int status = 0; // Converged
  
  // 1. Convergence
  if (solvStatus != NOX::StatusTest::Converged) {
      status = 1;
      if (printing.isPrintType(NOX::Utils::Error))
	printing.out() << "Nonlinear solver failed to converge!" << std::endl;
  }
#ifndef HAVE_MPI 
  // 2. Linear solve iterations (14) - SERIAL TEST ONLY!
  //    The number of linear iterations changes with # of procs.
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 14) {
    status = 2;
  }
#endif
  // 3. Nonlinear solve iterations (17)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 17)
    status = 3;
  // 4. Test the pre/post iterate options
  {
  UserPrePostOperator* ppoPtr = dynamic_cast<UserPrePostOperator*>(ppo.get());
  if (ppoPtr->getNumRunPreIterate() != 17)
    status = 4;
  if (ppoPtr->getNumRunPostIterate() != 17)
    status = 4;
  if (ppoPtr->getNumRunPreSolve() != 1)
    status = 4;
  if (ppoPtr->getNumRunPostSolve() != 1)
    status = 4;
  }
  // 5. Number of Cauchy steps (3)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Trust Region").sublist("Output").get("Number of Cauchy Steps", 0) != 3)
    status = 5;
  // 6. Number of Newton steps (14)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Trust Region").sublist("Output").get("Number of Newton Steps", 0) != 14)
    status = 6;

  // Summarize test results 
  if (status == 0)
    printing.out() << "Test passed!" << std::endl;
  else 
    printing.out() << "Test failed!" << std::endl;
  
#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  // Final return value (0 = successfull, non-zero = failure)
  return status;
}
Example #22
0
int main(int argc, char *argv[])
{
  int ierr = 0;
  int MyPID = 0;

  int nRHS = 7;
  double reltol = 1.0e-8;
  double abstol = 1.0e-8;
  double lstol = 1.0e-11;
  int ls_its = 100;

  try {

    // Initialize MPI
#ifdef HAVE_MPI
    MPI_Init(&argc,&argv);
#endif

    // Create a communicator for Epetra objects
#ifdef HAVE_MPI
    Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
    Epetra_SerialComm Comm;
#endif

    // Get the total number of processors
    MyPID = Comm.MyPID();
    int NumProc = Comm.NumProc();

    bool verbose = false;
    // Check for verbose output
    if (argc>1)
      if (argv[1][0]=='-' && argv[1][1]=='v')
    verbose = true;

    // Get the number of elements from the command line
    int NumGlobalElements = 0;
    if ((argc > 2) && (verbose))
      NumGlobalElements = atoi(argv[2]) + 1;
    else if ((argc > 1) && (!verbose))
      NumGlobalElements = atoi(argv[1]) + 1;
    else
      NumGlobalElements = 101;

    // The number of unknowns must be at least equal to the
    // number of processors.
    if (NumGlobalElements < NumProc) {
      std::cout << "numGlobalBlocks = " << NumGlobalElements
       << " cannot be < number of processors = " << NumProc << std::endl;
      exit(1);
    }

    // Create parameter list
    Teuchos::RCP<Teuchos::ParameterList> paramList =
      Teuchos::rcp(new Teuchos::ParameterList);

    // Create the "Solver" parameters sublist to be used with NOX Solvers
    Teuchos::ParameterList& nlParams = paramList->sublist("NOX");

    Teuchos::ParameterList& nlPrintParams = nlParams.sublist("Printing");
    nlPrintParams.set("MyPID", MyPID);
    if (verbose)
       nlPrintParams.set("Output Information",
                  NOX::Utils::Error +
                  NOX::Utils::Details +
                  NOX::Utils::OuterIteration +
                  NOX::Utils::InnerIteration +
                  NOX::Utils::Warning +
                  NOX::Utils::TestDetails +
                  NOX::Utils::StepperIteration +
                  NOX::Utils::StepperDetails);
     else
       nlPrintParams.set("Output Information", NOX::Utils::Error);

    // Create the "Direction" sublist for the "Line Search Based" solver
    Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
    Teuchos::ParameterList& newParams = dirParams.sublist("Newton");
    Teuchos::ParameterList& lsParams = newParams.sublist("Linear Solver");
    lsParams.set("Aztec Solver", "GMRES");
    lsParams.set("Max Iterations", ls_its);
    lsParams.set("Tolerance", lstol);
    if (verbose)
      lsParams.set("Output Frequency", 1);
    else
      lsParams.set("Output Frequency", 0);
    lsParams.set("Scaling", "None");

    //lsParams.set("Overlap", 2);
    //lsParams.set("Fill Factor", 2.0);
    //lsParams.set("Drop Tolerance", 1.0e-12);

    // Create Epetra factory
    Teuchos::RCP<LOCA::Abstract::Factory> epetraFactory =
      Teuchos::rcp(new LOCA::Epetra::Factory);

    // Create global data object
    Teuchos::RCP<LOCA::GlobalData> globalData =
      LOCA::createGlobalData(paramList, epetraFactory);

    // Test transpose solves with Ifpack preconditioner
    if (globalData->locaUtils->isPrintType(NOX::Utils::TestDetails))
      globalData->locaUtils->out() << std::endl <<
    "********** " <<
    "Testing Transpose Solves With Ifpack Preconditioner" <<
    " **********" << std::endl;
    lsParams.set("Preconditioner", "Ifpack");
    ierr += testTransposeSolve(NumGlobalElements, nRHS, reltol, abstol,
                   Comm, globalData, paramList);

    // Test transpose solves with no preconditioner
    if (globalData->locaUtils->isPrintType(NOX::Utils::TestDetails))
      globalData->locaUtils->out() << std::endl <<
    "********** " <<
    "Testing Transpose Solves With No Preconditioner" <<
    " **********" << std::endl;
    lsParams.set("Preconditioner", "None");
    lsParams.set("Max Iterations", NumGlobalElements+1);
    ierr += testTransposeSolve(NumGlobalElements, nRHS, reltol, abstol,
                   Comm, globalData, paramList);

    LOCA::destroyGlobalData(globalData);
  }

  catch (std::exception& e) {
    std::cout << e.what() << std::endl;
    ierr = 1;
  }
  catch (const char *s) {
    std::cout << s << std::endl;
    ierr = 1;
  }
  catch (...) {
    std::cout << "Caught unknown exception!" << std::endl;
    ierr = 1;
  }

  if (MyPID == 0) {
    if (ierr == 0)
      std::cout << "All tests passed!" << std::endl;
    else
      std::cout << ierr << " test(s) failed!" << std::endl;
  }

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return ierr;
}
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // initialize the random number generator

  int ml_one = 1;
  ML_srandom1(&ml_one);
  // ===================== //
  // create linear problem //
  // ===================== //

  ParameterList GaleriList;
  int base=10;
  GaleriList.set("nx", base);
  GaleriList.set("ny", base);
  GaleriList.set("nz", base * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", 1);
  GaleriList.set("mz", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian3D", Comm, GaleriList);
  Epetra_CrsMatrix* Matrix = CreateCrsMatrix("Laplace3D", Map, GaleriList);

  Epetra_Vector LHS(*Map);
  Epetra_Vector RHS(*Map);
  
  Epetra_LinearProblem Problem(Matrix, &LHS, &RHS);

  Teuchos::ParameterList MLList;
  double TotalErrorResidual = 0.0, TotalErrorExactSol = 0.0;

  // ==================n==== //
  // default options for SA //
  // ====================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Gauss-Seidel");
  char mystring[80];
  strcpy(mystring,"SA");
  TestMultiLevelPreconditioner(mystring, MLList, Problem, 
                               TotalErrorResidual, TotalErrorExactSol);

  // ============================== //
  // default options for SA, Jacobi //
  // ============================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Jacobi");

  TestMultiLevelPreconditioner(mystring, MLList, Problem, TotalErrorResidual,
                               TotalErrorExactSol);

  // =========================== //
  // default options for SA, Cheby //
  // =========================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Chebyshev");

  TestMultiLevelPreconditioner(mystring, MLList, Problem, 
                               TotalErrorResidual, TotalErrorExactSol);
  // ===================== //
  // print out total error //
  // ===================== //

  
  if (Comm.MyPID() == 0) {
    cout << endl;
    cout << "......Total error for residual        = " << TotalErrorResidual << endl;
    cout << "......Total error for exact solution  = " << TotalErrorExactSol << endl;
    cout << endl;
  }

  
  delete Matrix;
  delete Map;
  
  if (TotalErrorResidual > 1e-8) {
    cerr << "Error: `MultiLevelPrecoditioner_Sym.exe' failed!" << endl;
    exit(EXIT_FAILURE);
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (Comm.MyPID() == 0)
    cerr << "`MultiLevelPrecoditioner_Sym.exe' passed!" << endl;

  return (EXIT_SUCCESS);
}
Example #24
0
int main(int argc, char *argv[]) {

  int ierr = 0, i;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

#ifdef HAVE_EPETRA_TEUCHOS
  Teuchos::RCP<Teuchos::FancyOStream>
    fancyOut = Teuchos::VerboseObjectBase::getDefaultOStream();
  if (Comm.NumProc() > 1 ) {
    fancyOut->setShowProcRank(true);
    fancyOut->setOutputToRootOnly(-1);
  }
  std::ostream &out = *fancyOut;
#else
  std::ostream &out = std::cout;
#endif

  Comm.SetTracebackMode(0); // This should shut down any error tracing
  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  //  char tmp;
  //  if (rank==0) out << "Press any key to continue..."<< endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if (verbose && MyPID==0)
    out << Epetra_Version() << endl << endl;

  if (verbose) out << Comm <<endl;

  bool verbose1 = verbose;

  // Redefine verbose to only print on PE 0
  if (verbose && rank!=0) verbose = false;

  int NumMyElements = 10000;
  int NumMyElements1 = NumMyElements; // Needed for localmap
  int NumGlobalElements = NumMyElements*NumProc+EPETRA_MIN(NumProc,3);
  if (MyPID < 3) NumMyElements++;
  int IndexBase = 0;
  int ElementSize = 7;

  // Test LocalMap constructor
  // and Petra-defined uniform linear distribution constructor

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_LocalMap(NumMyElements1, IndexBase, Comm)" << endl;
  if (verbose) out << "     and Epetra_BlockMap(NumGlobalElements, ElementSize, IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  Epetra_LocalMap *LocalMap = new Epetra_LocalMap(NumMyElements1, IndexBase,
                              Comm);
  Epetra_BlockMap * BlockMap = new Epetra_BlockMap(NumGlobalElements, ElementSize, IndexBase, Comm);
  EPETRA_TEST_ERR(VectorTests(*BlockMap, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, verbose),ierr);

  delete BlockMap;

  // Test User-defined linear distribution constructor

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, ElementSize, IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, ElementSize, IndexBase, Comm);

  EPETRA_TEST_ERR(VectorTests(*BlockMap, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, verbose),ierr);

  delete BlockMap;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  int * MyGlobalElements = new int[NumMyElements];
  int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements,  ElementSize, IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, ElementSize,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(VectorTests(*BlockMap, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, verbose),ierr);

  delete BlockMap;

  int * ElementSizeList = new int[NumMyElements];
  int NumMyEquations = 0;
  int NumGlobalEquations = 0;
  for (i = 0; i<NumMyElements; i++)
    {
      ElementSizeList[i] = i%6+2; // blocksizes go from 2 to 7
      NumMyEquations += ElementSizeList[i];
    }
  ElementSize = 7; // Set to maximum for use in checkmap
  NumGlobalEquations = Comm.NumProc()*NumMyEquations;

  // Adjust NumGlobalEquations based on processor ID
  if (Comm.NumProc() > 3)
    {
      if (Comm.MyPID()>2)
	NumGlobalEquations += 3*((NumMyElements)%6+2);
      else
	NumGlobalEquations -= (Comm.NumProc()-3)*((NumMyElements-1)%6+2);
    }

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements,  ElementSizeList, IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, ElementSizeList,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(VectorTests(*BlockMap, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, verbose),ierr);

  // Test Copy constructor

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_BlockMap(*BlockMap)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  Epetra_BlockMap * BlockMap1 = new Epetra_BlockMap(*BlockMap);

  EPETRA_TEST_ERR(VectorTests(*BlockMap, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, verbose),ierr);

  delete [] ElementSizeList;
  delete [] MyGlobalElements;
  delete BlockMap;
  delete BlockMap1;


  // Test Petra-defined uniform linear distribution constructor

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  Epetra_Map * Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm);
  EPETRA_TEST_ERR(VectorTests(*Map, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, verbose),ierr);

  delete Map;

  // Test User-defined linear distribution constructor

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm);

  EPETRA_TEST_ERR(VectorTests(*Map, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, verbose),ierr);

  delete Map;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  MyGlobalElements = new int[NumMyElements];
  MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements,  IndexBase, Comm)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(VectorTests(*Map, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, verbose),ierr);

  // Test Copy constructor

  if (verbose) out << "\n*********************************************************" << endl;
  if (verbose) out << "Checking Epetra_Map(*Map)" << endl;
  if (verbose) out << "*********************************************************" << endl;

  Epetra_Map Map1(*Map);

  EPETRA_TEST_ERR(VectorTests(*Map, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, verbose),ierr);

  delete [] MyGlobalElements;
  delete Map;

  if (verbose1)
    {
      // Test Vector MFLOPS for 2D Dot Product
      int M = 1;
      int K = 1000000;
      Epetra_Map Map2(-1, K, IndexBase, Comm);
      Epetra_LocalMap Map3(M, IndexBase, Comm);

      Epetra_Vector A(Map2);A.Random();
      Epetra_Vector B(Map2);B.Random();
      Epetra_Vector C(Map3);C.Random();

      // Test Epetra_Vector label
      const char* VecLabel = A.Label();
      const char* VecLabel1 = "Epetra::Vector";
      if (verbose) out << endl << endl <<"This should say " << VecLabel1 << ": " << VecLabel << endl << endl << endl;
      EPETRA_TEST_ERR(strcmp(VecLabel1,VecLabel),ierr);
      if (verbose) out << "Testing Assignment operator" << endl;

      double tmp1 = 1.00001* (double) (MyPID+1);
      double tmp2 = tmp1;
      A[1] = tmp1;
      tmp2 = A[1];
      out << "On PE "<< MyPID << "  A[1] should equal = " << tmp1;
      if (tmp1==tmp2) out << " and it does!" << endl;
      else out << " but it equals " << tmp2;

      Comm.Barrier();
	
      if (verbose) out << endl << endl << "Testing MFLOPs" << endl;
      Epetra_Flops counter;
      C.SetFlopCounter(counter);
      Epetra_Time mytimer(Comm);
      C.Multiply('T', 'N', 0.5, A, B, 0.0);
      double Multiply_time = mytimer.ElapsedTime();
      double Multiply_flops = C.Flops();
      if (verbose) out << "\n\nTotal FLOPs = " << Multiply_flops << endl;
      if (verbose) out << "Total Time  = " << Multiply_time << endl;
      if (verbose) out << "MFLOPs      = " << Multiply_flops/Multiply_time/1000000.0 << endl;

      Comm.Barrier();
	
      // Test Vector ostream operator with Petra-defined uniform linear distribution constructor
      // and a small vector

      Epetra_Map Map4(100, IndexBase, Comm);
      double * Dp = new double[100];
      for (i=0; i<100; i++)
	Dp[i] = i;
      Epetra_Vector D(View, Map4,Dp);
	
      if (verbose) out << "\n\nTesting ostream operator:  Multivector  should be 100-by-2 and print i,j indices"
	   << endl << endl;
      out << D << endl;

      if (verbose) out << "Traceback Mode value = " << D.GetTracebackMode() << endl;
      delete [] Dp;
    }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return ierr;

}
Example #25
0
int main(int argc, char *argv[])
{
  int ierr = 0, i, forierr = 0;
#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;


  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<<endl;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0)
		verbose = false;

  int NumMyEquations = 10000;
  long long NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
  if(MyPID < 3)
    NumMyEquations++;

  // Construct a Map that puts approximately the same Number of equations on each processor

  Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0LL, Comm);

  // Get update list and number of local equations from newly created Map
  vector<long long> MyGlobalElements(Map.NumMyElements());
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

  vector<int> NumNz(NumMyEquations);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for(i = 0; i < NumMyEquations; i++)
    if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
      NumNz[i] = 1;
    else
      NumNz[i] = 2;

  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy, Map, &NumNz[0]);
  EPETRA_TEST_ERR(A.IndicesAreGlobal(),ierr);
  EPETRA_TEST_ERR(A.IndicesAreLocal(),ierr);

  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1


  vector<double> Values(2);
  Values[0] = -1.0;
	Values[1] = -1.0;
	vector<long long> Indices(2);
  double two = 2.0;
  int NumEntries;

  forierr = 0;
  for(i = 0; i < NumMyEquations; i++) {
    if(MyGlobalElements[i] == 0) {
			Indices[0] = 1;
			NumEntries = 1;
		}
    else if (MyGlobalElements[i] == NumGlobalEquations-1) {
			Indices[0] = NumGlobalEquations-2;
			NumEntries = 1;
		}
    else {
			Indices[0] = MyGlobalElements[i]-1;
			Indices[1] = MyGlobalElements[i]+1;
			NumEntries = 2;
		}
		forierr += !(A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0])==0);
		forierr += !(A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i])>0); // Put in the diagonal entry
  }
  EPETRA_TEST_ERR(forierr,ierr);

  // Finish up
  A.FillComplete();
  A.OptimizeStorage();

  Epetra_JadMatrix JadA(A);
  Epetra_JadMatrix JadA1(A);
  Epetra_JadMatrix JadA2(A);

  // Create vectors for Power method

  Epetra_Vector q(Map);
  Epetra_Vector z(Map); z.Random();
  Epetra_Vector resid(Map);

  Epetra_Flops flopcounter;
  A.SetFlopCounter(flopcounter);
  q.SetFlopCounter(A);
  z.SetFlopCounter(A);
  resid.SetFlopCounter(A);
  JadA.SetFlopCounter(A);
  JadA1.SetFlopCounter(A);
  JadA2.SetFlopCounter(A);


  if (verbose) cout << "=======================================" << endl
		    << "Testing Jad using CrsMatrix as input..." << endl
		    << "=======================================" << endl;

  A.ResetFlops();
  powerMethodTests(A, JadA, Map, q, z, resid, verbose);

  // Increase diagonal dominance

  if (verbose) cout << "\n\nIncreasing the magnitude of first diagonal term and solving again\n\n"
		    << endl;


  if (A.MyGlobalRow(0)) {
    int numvals = A.NumGlobalEntries(0);
    vector<double> Rowvals(numvals);
    vector<long long> Rowinds(numvals);
    A.ExtractGlobalRowCopy(0, numvals, numvals, &Rowvals[0], &Rowinds[0]); // Get A[0,0]

    for (i=0; i<numvals; i++) if (Rowinds[i] == 0) Rowvals[i] *= 10.0;

    A.ReplaceGlobalValues(0, numvals, &Rowvals[0], &Rowinds[0]);
  }
  JadA.UpdateValues(A);
  A.ResetFlops();
  powerMethodTests(A, JadA, Map, q, z, resid, verbose);

  if (verbose) cout << "================================================================" << endl
		          << "Testing Jad using Jad matrix as input matrix for construction..." << endl
		          << "================================================================" << endl;
  JadA1.ResetFlops();
  powerMethodTests(JadA1, JadA2, Map, q, z, resid, verbose);

#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

return ierr ;
}
Example #26
0
int main(int argc, char *argv[])
{
  int ierr = 0;

  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();
  
  // Check for verbose output
  bool verbose = false;
  if (argc>1) 
    if (argv[1][0]=='-' && argv[1][1]=='v') 
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else 
    NumGlobalElements = 101;
  
  // The number of unknowns must be at least equal to the 
  // number of processors.
  if (NumGlobalElements < NumProc) {
    std::cout << "numGlobalBlocks = " << NumGlobalElements 
	 << " cannot be < number of processors = " << NumProc << std::endl;
    exit(1);
  }

  // Test includeUV = true, useP = true, prec = "Transpose Preconditioner"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, true, "Transpose Preconditioner",
		      "Jacobian");

  // Test includeUV = true, useP = false, prec = "Transpose Preconditioner"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, false, "Transpose Preconditioner",
		      "Jacobian");

  // Test includeUV = false, useP = true, prec = "Transpose Preconditioner"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
   		      true, true, "Transpose Preconditioner",
   		      "SMW");

  // Test includeUV = false, useP = false, prec = "Transpose Preconditioner"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
   		      true, false, "Transpose Preconditioner",
  		      "SMW");

  if (NumProc > 1) {
    // Test includeUV = false, useP = true, prec = "Transpose Preconditioner"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, true, "Transpose Preconditioner",
			"Jacobian");

    // Test includeUV = false, useP = false, prec = "Transpose Preconditioner"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, false, "Transpose Preconditioner",
			"Jacobian");

    // Test includeUV = false, useP = true, prec = "Transpose Preconditioner"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, true, "Transpose Preconditioner",
			"SMW");

    // Test includeUV = false, useP = false, prec = "Transpose Preconditioner"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, false, "Transpose Preconditioner",
			"SMW");
  }

  // Test includeUV = true, useP = true, prec = "Left Preconditioning"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, true, "Left Preconditioning",
		      "Jacobian");

  // Test includeUV = true, useP = false, prec = "Left Preconditioning"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, false, "Left Preconditioning",
		      "Jacobian");

  // Test includeUV = true, useP = true, prec = "Left Preconditioning"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, true, "Left Preconditioning",
		      "SMW");

  // Test includeUV = true, useP = false, prec = "Left Preconditioning"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, false, "Left Preconditioning",
		      "SMW");

  if (NumProc > 1) {
    // Test includeUV = false, useP = true, prec = "Left Preconditioning"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, true, "Left Preconditioning",
			"Jacobian");

    // Test includeUV = false, useP = false, prec = "Left Preconditioning"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, false, "Left Preconditioning",
			"Jacobian");

    // Test includeUV = false, useP = true, prec = "Left Preconditioning"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, true, "Left Preconditioning",
			"SMW");

    // Test includeUV = false, useP = false, prec = "Left Preconditioning"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, false, "Left Preconditioning",
			"SMW");
  }

#ifdef HAVE_NOX_EPETRAEXT
  // Test includeUV = true, useP = true, prec = "Explicit Transpose"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, true, "Explicit Transpose",
		      "Jacobian");

  // Test includeUV = true, useP = false, prec = "Explicit Transpose"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, false, "Explicit Transpose",
		      "Jacobian");

  if (NumProc > 1) {
    // Test includeUV = false, useP = true, prec = "Explicit Transpose"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, true, "Explicit Transpose",
			"Jacobian");

    // Test includeUV = false, useP = false, prec = "Explicit Transpose"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, false, "Explicit Transpose",
			"Jacobian");

    // Test includeUV = false, useP = true, prec = "Explicit Transpose"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, true, "Explicit Transpose",
			"SMW");

    // Test includeUV = false, useP = false, prec = "Explicit Transpose"
    ierr += tcubed_test(NumGlobalElements, verbose, Comm,
			false, false, "Explicit Transpose",
			"SMW");
  }

  // Test includeUV = false, useP = false, prec = "Explicit Transpose"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, true, "Explicit Transpose",
		      "SMW");

  // Test includeUV = false, useP = false, prec = "Explicit Transpose"
  ierr += tcubed_test(NumGlobalElements, verbose, Comm,
		      true, false, "Explicit Transpose",
		      "SMW");
#endif

  if (MyPID == 0) {
    if (ierr == 0)
      std::cout << "All tests passed!" << std::endl;
    else
      std::cout << ierr << " test(s) failed!" << std::endl;
  }

#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif

/* end main
*/
    return ierr ;
}
Example #27
0
// =======================================================================
// GOAL: test that the names in the factory do not change. This test
//       will not solve any linear system.
//
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;
  const int n = 9; 
  GaleriList.set("n", n);
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Linear", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Minij", &*Map, GaleriList) );
  
  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec;

  Prec = Teuchos::rcp( Factory.Create("point relaxation", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("point relaxation stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("block relaxation", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("block relaxation stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("IC", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("ICT", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("ILU", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("ILUT", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("IC stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("ICT stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("ILU stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("ILUT stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

#ifdef HAVE_IFPACK_AMESOS
  Prec = Teuchos::rcp( Factory.Create("Amesos", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("Amesos stand-alone", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;
#endif
  
  Prec = Teuchos::rcp( Factory.Create("Chebyshev", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("Polynomial", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  Prec = Teuchos::rcp( Factory.Create("Krylov", &*A) );
  assert (Prec != Teuchos::null);
  IFPACK_CHK_ERR(Prec->Initialize());
  IFPACK_CHK_ERR(Prec->Compute());
  cout << *Prec;

  if (Comm.MyPID() == 0)
    cout << "Test `PrecondititonerFactory.exe' passed!" << endl;

#ifdef HAVE_MPI
  MPI_Finalize() ; 
#endif

  return(EXIT_SUCCESS);
}
int main(int argc, char *argv[])
{

  // initialize MPI and Epetra communicator
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30; 
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  // =============================================================== //
  // B E G I N N I N G   O F   I F P A C K   C O N S T R U C T I O N //
  // =============================================================== //

  Teuchos::ParameterList List;

  // allocates an IFPACK factory. No data is associated 
  // to this object (only method Create()).
  Ifpack Factory;

  // create the preconditioner. For valid PrecType values,
  // please check the documentation
  std::string PrecType = "Amesos";
  int OverlapLevel = 2; // must be >= 0. If Comm.NumProc() == 1,
                        // it is ignored.

  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) );
  assert(Prec != Teuchos::null);

  // specify the Amesos solver to be used. 
  // If the selected solver is not available,
  // IFPACK will try to use Amesos' KLU (which is usually always
  // compiled). Amesos' serial solvers are:
  // "Amesos_Klu", "Amesos_Umfpack", "Amesos_Superlu"
  List.set("amesos: solver type", "Amesos_Klu");

  // sets the parameters
  IFPACK_CHK_ERR(Prec->SetParameters(List));

  // initialize the preconditioner. At this point the matrix must
  // have been FillComplete()'d, but actual values are ignored.
  // At this call, Amesos will perform the symbolic factorization.
  IFPACK_CHK_ERR(Prec->Initialize());

  // Builds the preconditioners, by looking for the values of 
  // the matrix. At this call, Amesos will perform the
  // numeric factorization.
  IFPACK_CHK_ERR(Prec->Compute());

  // =================================================== //
  // E N D   O F   I F P A C K   C O N S T R U C T I O N //
  // =================================================== //

  // At this point, we need some additional objects
  // to define and solve the linear system.

  // defines LHS and RHS
  Epetra_Vector LHS(A->OperatorDomainMap());
  Epetra_Vector RHS(A->OperatorDomainMap());

  // solution is constant
  LHS.PutScalar(1.0);
  // now build corresponding RHS
  A->Apply(LHS,RHS);

  // now randomize the solution
  RHS.Random();

  // need an Epetra_LinearProblem to define AztecOO solver
  Epetra_LinearProblem Problem(&*A,&LHS,&RHS);

  // now we can allocate the AztecOO solver
  AztecOO Solver(Problem);

  // specify solver
  Solver.SetAztecOption(AZ_solver,AZ_gmres);
  Solver.SetAztecOption(AZ_output,32);

  // HERE WE SET THE IFPACK PRECONDITIONER
  Solver.SetPrecOperator(&*Prec);

  // .. and here we solve
  // NOTE: with one process, the solver must converge in
  // one iteration.
  Solver.Iterate(1550,1e-8);

#ifdef HAVE_MPI
  MPI_Finalize() ; 
#endif

    return(EXIT_SUCCESS);
}
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // initialize the random number generator

  int ml_one = 1;
  ML_srandom1(&ml_one);
  // ===================== //
  // create linear problem //
  // ===================== //

  ParameterList GaleriList;
  GaleriList.set("nx", 10);
  GaleriList.set("ny", 10);
  GaleriList.set("nz", 10 * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", 1);
  GaleriList.set("mz", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian3D", Comm, GaleriList);
  Epetra_CrsMatrix* Matrix = CreateCrsMatrix("Laplace3D", Map, GaleriList);
  Epetra_MultiVector* Coords = CreateCartesianCoordinates("3D",Map,GaleriList);

  Epetra_Vector LHS(*Map);
  Epetra_Vector RHS(*Map);

  Epetra_LinearProblem Problem(Matrix, &LHS, &RHS);

  Teuchos::ParameterList MLList;
  double TotalErrorResidual = 0.0, TotalErrorExactSol = 0.0;

  // ====================== //
  // default options for SA //
  // ====================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Gauss-Seidel");
  char mystring[80];
  strcpy(mystring,"SA");
  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);


  // ============================================== //
  // default options for SA, efficient symmetric GS //
  // ============================================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Gauss-Seidel");
  MLList.set("smoother: Gauss-Seidel efficient symmetric",true);

  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol,true);

  // ============================== //
  // default options for SA, Jacobi //
  // ============================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Jacobi");

  TestMultiLevelPreconditioner(mystring, MLList, Problem, TotalErrorResidual,
                               TotalErrorExactSol,true);

  // =========================== //
  // default options for SA, Cheby //
  // =========================== //

  if (Comm.MyPID() == 0) PrintLine();

  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "Chebyshev");

  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);



  // =========================== //
  // Specifying Ifpack coarse lists correctly
  // =========================== //
#ifdef HAVE_ML_IFPACK
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);

  if(!Comm.MyPID()) {
    MLList.set("ML print initial list",1);
    MLList.set("ML print final list",1);
  }

  MLList.set("smoother: type","ILU");
  MLList.set("coarse: type","ILUT");
  ParameterList &fList = MLList.sublist("smoother: ifpack list");
  fList.set("fact: level-of-fill",1);
  ParameterList &cList = MLList.sublist("coarse: ifpack list");
  cList.set("fact: ilut level-of-fill",1e-2);
  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);
#endif


  // =========================== //
  // Specifying level sublists
  // =========================== //
  if (Comm.MyPID() == 0) PrintLine();
  ParameterList LevelList;
  ML_Epetra::SetDefaults("SA",LevelList);
  ParameterList &smList = LevelList.sublist("smoother: list (level 0)");
  smList.set("smoother: type","Jacobi");
  smList.set("smoother: sweeps",5);
  ParameterList &smList2 = LevelList.sublist("smoother: list (level 1)");
  smList2.set("smoother: type","symmetric Gauss-Seidel");
  smList2.set("smoother: sweeps",3);
  ParameterList &coarseList = LevelList.sublist("coarse: list");
  coarseList.set("smoother: type","symmetric Gauss-Seidel");
  TestMultiLevelPreconditioner(mystring, LevelList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);


  // =========================== //
  // Ifpack G-S w/ L1
  // =========================== //
#ifdef HAVE_ML_IFPACK
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: use l1 Gauss-Seidel",true);
  MLList.set("smoother: type", "Gauss-Seidel");
  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);
#endif

  // =========================== //
  // Ifpack SGS w/ L1
  // =========================== //
#ifdef HAVE_ML_IFPACK
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: use l1 Gauss-Seidel",true);
  MLList.set("smoother: type", "symmetric Gauss-Seidel");
  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);
#endif


  // =========================== //
  // Autodetected Line SGS (trivial lines) 
  // =========================== //
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type", "line Gauss-Seidel");
  MLList.set("smoother: line detection threshold",0.1);
  MLList.set("x-coordinates",(*Coords)[0]);
  MLList.set("y-coordinates",(*Coords)[1]);
  MLList.set("z-coordinates",(*Coords)[2]);
  TestMultiLevelPreconditioner(mystring, MLList, Problem,
                               TotalErrorResidual, TotalErrorExactSol);
  

  // ===================== //
  // print out total error //
  // ===================== //

  if (Comm.MyPID() == 0) {
    cout << endl;
    cout << "......Total error for residual        = " << TotalErrorResidual << endl;
    cout << "......Total error for exact solution  = " << TotalErrorExactSol << endl;
    cout << endl;
  }

  delete Matrix;
  delete Coords;
  delete Map;


  if (TotalErrorResidual > 1e-8) {
    cerr << "Error: `MultiLevelPrecoditioner_Sym.exe' failed!" << endl;
    exit(EXIT_FAILURE);
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (Comm.MyPID() == 0)
    cerr << "`MultiLevelPrecoditioner_Sym.exe' passed!" << endl;

  return (EXIT_SUCCESS);
}
Example #30
0
int main(int argc, char *argv[])
{
  
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  try {

    // ================================================== //
    // Defines the grid for this problem, a rectangle,    //
    // with the number of nodes along the X-axis (nx) and //
    // Y-axis (ny), the length of the rectangle along the //
    // axes, and the number of processors on each axix.   //
    // ================================================== //

    int nx = 40 * Comm.NumProc();
    int ny = 40;
    int mx = Comm.NumProc();
    int my = 1;

    //TriangleRectangleGrid Grid(Comm, nx, ny, mx, my);
    FileGrid Grid(Comm, "Square.grid");

    // ======================================================== //
    // Prepares the linear system. This requires the definition //
    // of a quadrature formula compatible with the grid, a      //
    // variational formulation, and a problem object which take //
    // care of filling matrix and right-hand side.              //
    // ======================================================== //
    
    Epetra_CrsMatrix A(Copy, Grid.RowMap(), 0);
    Epetra_Vector    LHS(Grid.RowMap());
    Epetra_Vector    RHS(Grid.RowMap());

    int NumQuadratureNodes = 3;

    SUPGVariational<TriangleQuadrature>
      AdvDiff(NumQuadratureNodes, Diffusion, ConvX, ConvY, ConvZ, 
              Source, Force, BoundaryValue, BoundaryType);

    LinearProblem FiniteElementProblem(Grid, AdvDiff, A, LHS, RHS); 
    FiniteElementProblem.Compute();

    // =================================================== //
    // The solution must be computed here by solving the   //
    // linear system A * LHS = RHS.                        //
    //
    // NOTE: Solve() IS A SIMPLE FUNCTION BASED ON LAPACK, //
    // THEREFORE THE MATRIX IS CONVERTED TO DENSE FORMAT.  //
    // IT WORKS IN SERIAL ONLY.                            //
    // EVEN MEDIUM-SIZED MATRICES MAY REQUIRE A LOT OF     //
    // MEMORY AND CPU-TIME! USERS SHOULD CONSIDER INSTEAD  //
    // AZTECOO, ML, IFPACK OR OTHER SOLVERS.               //
    // =================================================== //

    Solve(&A, &LHS, &RHS);

    // ================== //
    // Output using MEDIT //
    // ================== //
    
    MEDITInterface MEDIT(Comm);
    MEDIT.Write(Grid, "AdvDiff2D", LHS);

  }
  catch (int e) {
    cerr << "Caught exception, value = " << e << endl;
  }
  catch (...) {
    cerr << "Caught generic exception" << endl;
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(0);
}