Exemplo n.º 1
0
// ===================================================
// Methods
// ===================================================
Int
PreconditionerML::buildPreconditioner ( operator_type& matrix )
{

    //the Trilinos::MultiLevelPreconditioner unsafely access to the area of memory co-owned by M_operator.
    //to avoid the risk of dandling pointers always deallocate M_preconditioner first and then M_operator
    M_preconditioner.reset();
    M_operator = matrix;

    M_precType = M_list.get ( "prec type", "undefined??" );
    M_precType += "_ML";

    // <one-level-postsmoothing> / <two-level-additive>
    // <two-level-hybrid> / <two-level-hybrid2>

    M_preconditioner.reset ( new prec_raw_type ( * (M_operator->matrixPtr() ), this->parametersList(), true ) );

    if ( M_analyze )
    {
        ML_Epetra::MultiLevelPreconditioner* prec;
        prec = dynamic_cast<ML_Epetra::MultiLevelPreconditioner*> ( M_preconditioner.get() );
        Int NumPreCycles = 5;
        Int NumPostCycles = 1;
        Int NumMLCycles = 10;
        prec->AnalyzeHierarchy ( true, NumPreCycles, NumPostCycles, NumMLCycles );
    }

    this->M_preconditionerCreated = true;

    return ( EXIT_SUCCESS );
}
void
TrilinosPreconditioner<T>::compute()
{
  Ifpack_Preconditioner * ifpack = NULL;
#ifdef LIBMESH_HAVE_ML
  ML_Epetra::MultiLevelPreconditioner * ml = NULL;
#endif

  switch (this->_preconditioner_type)
    {
      // IFPACK preconditioners
    case ILU_PRECOND:
    case SOR_PRECOND:
      ifpack = dynamic_cast<Ifpack_Preconditioner *>(_prec);
      ifpack->Compute();
      break;

#ifdef LIBMESH_HAVE_ML
      // ML preconditioners
    case AMG_PRECOND:
      ml = dynamic_cast<ML_Epetra::MultiLevelPreconditioner *>(_prec);
      ml->ComputePreconditioner();
      break;
#endif

    default:
      // no nothing here
      break;
    }
}
Exemplo n.º 3
0
void
TrilinosPreconditioner<T>::compute()
{
#ifdef LIBMESH_TRILINOS_HAVE_IFPACK
  Ifpack_Preconditioner * ifpack = libmesh_nullptr;
#endif

#ifdef LIBMESH_TRILINOS_HAVE_ML
  ML_Epetra::MultiLevelPreconditioner * ml = libmesh_nullptr;
#endif

  switch (this->_preconditioner_type)
    {
#ifdef LIBMESH_TRILINOS_HAVE_IFPACK
      // IFPACK preconditioners
    case ILU_PRECOND:
    case SOR_PRECOND:
      ifpack = dynamic_cast<Ifpack_Preconditioner *>(_prec);
      ifpack->Compute();
      break;
#endif

#ifdef LIBMESH_TRILINOS_HAVE_ML
      // ML preconditioners
    case AMG_PRECOND:
      ml = dynamic_cast<ML_Epetra::MultiLevelPreconditioner *>(_prec);
      ml->ComputePreconditioner();
      break;
#endif

    default:
      // If we made it here, there were no TrilinosPreconditioners
      // active, so that's probably an error.
      libmesh_error_msg("ERROR: No valid TrilinosPreconditioners available!");
      break;
    }
}
Exemplo n.º 4
0
PetscErrorCode ShellApplyML(PC pc,Vec x,Vec y)
{
  PetscErrorCode  ierr;
  ML_Epetra::MultiLevelPreconditioner *mlp = 0;
  void* ctx;

  ierr = PCShellGetContext(pc,&ctx); CHKERRQ(ierr);  
  mlp = (ML_Epetra::MultiLevelPreconditioner*)ctx;
#endif
  /* Wrap x and y as Epetra_Vectors. */
  PetscScalar *xvals,*yvals;
  ierr = VecGetArray(x,&xvals);CHKERRQ(ierr);
  Epetra_Vector epx(View,mlp->OperatorDomainMap(),xvals);
  ierr = VecGetArray(y,&yvals);CHKERRQ(ierr);
  Epetra_Vector epy(View,mlp->OperatorRangeMap(),yvals);

  /* Apply ML. */
  mlp->ApplyInverse(epx,epy);
  
  /* Clean up and return. */
  ierr = VecRestoreArray(x,&xvals);CHKERRQ(ierr);
  ierr = VecRestoreArray(y,&yvals);CHKERRQ(ierr);
  return 0;
} /*ShellApplyML*/
Exemplo n.º 5
0
int main(int argc, char *argv[])
{
  
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Creates the linear problem using the Galeri package.
  // Several matrix examples are supported; please refer to the
  // Galeri documentation for more details.
  // Most of the examples using the ML_Epetra::MultiLevelPreconditioner
  // class are based on Epetra_CrsMatrix. Example
  // `ml_EpetraVbr.cpp' shows how to define a Epetra_VbrMatrix.
  // `Laplace2D' is a symmetric matrix; an example of non-symmetric
  // matrices is `Recirc2D' (advection-diffusion in a box, with
  // recirculating flow). The grid has nx x ny nodes, divided into
  // mx x my subdomains, each assigned to a different processor.

  int nx;
  if (argc > 1)
    nx = (int) strtol(argv[1],NULL,10);
  else
    nx = 8;
  int ny = nx * Comm.NumProc(); // each subdomain is a square

  ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", ny);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);

  Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList);
    
  // Build a linear system with trivial solution, using a random vector
  // as starting solution.
  Epetra_Vector LHS(*Map); LHS.Random();
  Epetra_Vector RHS(*Map); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  // As we wish to use AztecOO, we need to construct a solver object 
  // for this problem
  AztecOO solver(Problem);

  // =========================== begin of ML part ===========================
  
  // create a parameter list for ML options
  ParameterList MLList;

  // Sets default parameters for classic smoothed aggregation. After this
  // call, MLList contains the default values for the ML parameters,
  // as required by typical smoothed aggregation for symmetric systems.
  // Other sets of parameters are available for non-symmetric systems
  // ("DD" and "DD-ML"), and for the Maxwell equations ("maxwell").
  ML_Epetra::SetDefaults("SA",MLList);
  
  // overwrite some parameters. Please refer to the user's guide
  // for more information
  // some of the parameters do not differ from their default value,
  // and they are here reported for the sake of clarity
  
  // output level, 0 being silent and 10 verbose
  MLList.set("ML output", 10);
  // maximum number of levels
  MLList.set("max levels",5);
  // set finest level to 0
  MLList.set("increasing or decreasing","increasing");

  // use Uncoupled scheme to create the aggregate
  MLList.set("aggregation: type", "Uncoupled");

  // smoother is symmetric Gauss-Seidel. Example file 
  // `ml/examples/TwoLevelDD/ml_2level_DD.cpp' shows how to use
  // AZTEC's preconditioners as smoothers
  //MLList.set("smoother: type","symmetric Gauss-Seidel");

  // use both pre and post smoothing
  MLList.set("smoother: pre or post", "both");

  MLList.set("smoother: type", "user-defined");
  //The function pointer to the user-defined smoother.
  MLList.set("smoother: user-defined function",userSmoother);
  //(optional) The label for the user-defined smoother.
  MLList.set("smoother: user-defined name","myJacobi");
  //Use this smoother on the finest level, 0.
  MLList.set("smoother: type (level 0)", "user-defined");
  MLList.set("smoother: sweeps (level 0)", 1);
  //Use symmetric Gauss-Seidel on all subsequent levels.
  MLList.set("smoother: type (level 1)", "symmetric Gauss-Seidel");

  //user-defined smoothing for a 1-level method only
  //to use this, uncomment the next 3 lines and comment out the direct-solver
  //settings further below
  //MLList.set("coarse: type", "user-defined");
  //MLList.set("coarse: user-defined function",userSmoother);
  //MLList.set("coarse: user-defined name","myJacobi");


#ifdef HAVE_ML_AMESOS
  // solve with serial direct solver KLU
  MLList.set("coarse: type","Amesos-KLU");
#else
  // this is for testing purposes only, you should have 
  // a direct solver for the coarse problem (either Amesos, or the SuperLU/
  // SuperLU_DIST interface of ML)
  MLList.set("aggregation: type", "MIS");
  MLList.set("smoother: type","Jacobi");
  MLList.set("coarse: type","Jacobi");
#endif

  MLList.set("read XML", false); // skip XML file in this directory

  // Creates the preconditioning object. We suggest to use `new' and
  // `delete' because the destructor contains some calls to MPI (as
  // required by ML and possibly Amesos). This is an issue only if the
  // destructor is called **after** MPI_Finalize().
  ML_Epetra::MultiLevelPreconditioner* MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // verify unused parameters on process 0 (put -1 to print on all
  // processes)
  MLPrec->PrintUnused(0);

  // =========================== end of ML part =============================
  
  // tell AztecOO to use the ML preconditioner, specify the solver 
  // and the output, then solve with 500 maximum iterations and 1e-12 
  // of tolerance (see AztecOO's user guide for more details)
  
  solver.SetPrecOperator(MLPrec);
  solver.SetAztecOption(AZ_solver, AZ_gmres);
  solver.SetAztecOption(AZ_output, 32);
  solver.Iterate(500, 1e-12);

  // destroy the preconditioner
  delete MLPrec;
  
  // compute the real residual

  double residual;
  LHS.Norm2(&residual);
  
  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
  }

  // for testing purposes
  if (residual > 1e-5)
    exit(EXIT_FAILURE);

  delete A;
  delete Map;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
} //main
Exemplo n.º 6
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

#define ML_SCALING
#ifdef ML_SCALING
   const int ntimers=4;
   enum {total, probBuild, precBuild, solve};
   ml_DblLoc timeVec[ntimers], maxTime[ntimers], minTime[ntimers];

  for (int i=0; i<ntimers; i++) timeVec[i].rank = Comm.MyPID();
  timeVec[total].value = MPI_Wtime();
#endif

  int nx;
  if (argc > 1) nx = (int) strtol(argv[1],NULL,10);
  else          nx = 256;

  if (nx < 1) nx = 256; // input a nonpositive integer if you want to specify
                        // the XML input file name.
  nx = nx*(int)sqrt((double)Comm.NumProc());
  int ny = nx;

  printf("nx = %d\nny = %d\n",nx,ny);
  fflush(stdout);

  char xmlFile[80];
  bool readXML=false;
  if (argc > 2) {strcpy(xmlFile,argv[2]); readXML = true;}
  else sprintf(xmlFile,"%s","params.xml");

  ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", ny);

#ifdef ML_SCALING
  timeVec[probBuild].value = MPI_Wtime();
#endif
  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList);

  if (!Comm.MyPID()) printf("nx = %d, ny = %d, mx = %d, my = %d\n",nx,ny,GaleriList.get("mx",-1),GaleriList.get("my",-1));
  fflush(stdout);
  //avoid potential overflow
  double numMyRows = A->NumMyRows();
  double numGlobalRows;
  Comm.SumAll(&numMyRows,&numGlobalRows,1);
  if (!Comm.MyPID()) printf("# global rows = %1.0f\n",numGlobalRows);
  //printf("pid %d: #rows = %d\n",Comm.MyPID(),A->NumMyRows());
  fflush(stdout);

  Epetra_MultiVector *coords = CreateCartesianCoordinates("2D", Map,GaleriList);
  double *x_coord=0,*y_coord=0,*z_coord=0;
  double **ttt;
  if (!coords->ExtractView(&ttt)) {
    x_coord = ttt[0];
    y_coord = ttt[1];
  } else {
    if (!Comm.MyPID()) printf("Error extracting coordinate vectors\n");
    MPI_Finalize();
    exit(EXIT_FAILURE);
  }

  Epetra_Vector LHS(*Map); LHS.Random();
  Epetra_Vector RHS(*Map); RHS.PutScalar(0.0);
  Epetra_LinearProblem Problem(A, &LHS, &RHS);
  AztecOO solver(Problem);

#ifdef ML_SCALING
  timeVec[probBuild].value = MPI_Wtime() - timeVec[probBuild].value;
#endif

  // =========================== begin of ML part ===========================

#ifdef ML_SCALING
  timeVec[precBuild].value = MPI_Wtime();
#endif
  ParameterList MLList;

  if (readXML) {
    MLList.set("read XML",true);
    MLList.set("XML input file",xmlFile);
  }
  else {
    cout << "here" << endl;
    ML_Epetra::SetDefaults("SA",MLList);
    MLList.set("smoother: type","Chebyshev");
    MLList.set("smoother: sweeps",3);
    MLList.set("coarse: max size",1);
  }

  MLList.set("x-coordinates", x_coord);
  MLList.set("y-coordinates", y_coord);
  MLList.set("z-coordinates", z_coord);

/*
RCP<std::vector<int> >
   m_smootherAztecOptions = rcp(new std::vector<int>(AZ_OPTIONS_SIZE));
RCP<std::vector<double> >
   m_smootherAztecParams = rcp(new std::vector<double>(AZ_PARAMS_SIZE));
//int             m_smootherAztecOptions[AZ_OPTIONS_SIZE];
//double          m_smootherAztecParams[AZ_PARAMS_SIZE];

std::string smootherType("Aztec");
AZ_defaults(&(*m_smootherAztecOptions)[0],&(*m_smootherAztecParams)[0]);
(*m_smootherAztecOptions)[AZ_precond]         = AZ_dom_decomp;
(*m_smootherAztecOptions)[AZ_subdomain_solve] = AZ_icc;
bool smootherAztecAsSolver = true;
*/
  MLList.set("ML output",10);

  MLList.set("repartition: enable",1);
  MLList.set("repartition: max min ratio",1.3);
  MLList.set("repartition: min per proc",200);
  MLList.set("repartition: partitioner","Zoltan");
  MLList.set("repartition: Zoltan dimensions",2);
  MLList.set("repartition: put on single proc",1);
  MLList.set("repartition: Zoltan type","hypergraph");
  MLList.set("repartition: estimated iterations",13);

/*
MLList.set("smoother: Aztec options",m_smootherAztecOptions);
MLList.set("smoother: Aztec params",m_smootherAztecParams);
MLList.set("smoother: type",smootherType.c_str());
MLList.set("smoother: Aztec as solver", smootherAztecAsSolver);

MLList.set("ML print initial list", 0);
MLList.set("ML print final list", 0);
*/

  ML_Epetra::MultiLevelPreconditioner* MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // verify unused parameters on process 0 (put -1 to print on all
  // processes)
  MLPrec->PrintUnused(0);
#ifdef ML_SCALING
  timeVec[precBuild].value = MPI_Wtime() - timeVec[precBuild].value;
#endif

  // =========================== end of ML part =============================

#ifdef ML_SCALING
  timeVec[solve].value = MPI_Wtime();
#endif
  solver.SetPrecOperator(MLPrec);
  solver.SetAztecOption(AZ_solver, AZ_cg);
  solver.SetAztecOption(AZ_output, 1);
  solver.Iterate(500, 1e-12);
#ifdef ML_SCALING
  timeVec[solve].value = MPI_Wtime() - timeVec[solve].value;
#endif

  // destroy the preconditioner
  delete MLPrec;

  // compute the real residual

  double residual;
  LHS.Norm2(&residual);

  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
  }

  // for testing purposes
  if (residual > 1e-5)
    exit(EXIT_FAILURE);

  delete A;
  delete Map;
  delete coords;

#ifdef ML_SCALING
  timeVec[total].value = MPI_Wtime() - timeVec[total].value;

  //avg
  double dupTime[ntimers],avgTime[ntimers];
  for (int i=0; i<ntimers; i++) dupTime[i] = timeVec[i].value;
  MPI_Reduce(dupTime,avgTime,ntimers,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
  for (int i=0; i<ntimers; i++) avgTime[i] = avgTime[i]/Comm.NumProc();
  //min
  MPI_Reduce(timeVec,minTime,ntimers,MPI_DOUBLE_INT,MPI_MINLOC,0,MPI_COMM_WORLD);
  //max
  MPI_Reduce(timeVec,maxTime,ntimers,MPI_DOUBLE_INT,MPI_MAXLOC,0,MPI_COMM_WORLD);

  if (Comm.MyPID() == 0) {
    printf("timing :  max (pid)  min (pid)  avg\n");
    printf("Problem build         :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[probBuild].value,maxTime[probBuild].rank,
             minTime[probBuild].value,minTime[probBuild].rank,
             avgTime[probBuild]);
    printf("Preconditioner build  :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[precBuild].value,maxTime[precBuild].rank,
             minTime[precBuild].value,minTime[precBuild].rank,
             avgTime[precBuild]);
    printf("Solve                 :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[solve].value,maxTime[solve].rank,
             minTime[solve].value,minTime[solve].rank,
             avgTime[solve]);
    printf("Total                 :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[total].value,maxTime[total].rank,
             minTime[total].value,minTime[total].rank,
             avgTime[total]);
  }
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Exemplo n.º 7
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::CommandLineProcessor clp(false);

  clp.setDocString("This is the canonical ML scaling example");

  //Problem
  std::string optMatrixType = "Laplace2D"; clp.setOption("matrixType",       &optMatrixType,           "matrix type ('Laplace2D', 'Laplace3D')");
  int optNx = 100;                         clp.setOption("nx",               &optNx,                   "mesh size in x direction");
  int optNy = -1;                          clp.setOption("ny",               &optNy,                   "mesh size in y direction");
  int optNz = -1;                          clp.setOption("nz",               &optNz,                   "mesh size in z direction");

  //Smoothers
  //std::string optSmooType = "Chebshev";  clp.setOption("smooType",       &optSmooType,           "smoother type ('l1-sgs', 'sgs 'or 'cheby')");
  int optSweeps = 3;                     clp.setOption("sweeps",         &optSweeps,             "Chebyshev degreee (or SGS sweeps)");
  double optAlpha = 7;                   clp.setOption("alpha",          &optAlpha,              "Chebyshev eigenvalue ratio (recommend 7 in 2D, 20 in 3D)");

  //Coarsening
  int optMaxCoarseSize = 500;                     clp.setOption("maxcoarse",         &optMaxCoarseSize,  "Size of coarsest grid when coarsening should stop");
  int optMaxLevels = 10;                     clp.setOption("maxlevels",         &optMaxLevels,  "Maximum number of levels");

  //Krylov solver
  double optTol      = 1e-12;              clp.setOption("tol",            &optTol,                "stopping tolerance for Krylov method");
  int optMaxIts      = 500;              clp.setOption("maxits",            &optMaxIts,                "maximum iterations for Krylov method");

  //XML file with additional options
  std::string xmlFile = ""; clp.setOption("xml", &xmlFile, "XML file containing ML options. [OPTIONAL]");


  //Debugging
  int  optWriteMatrices = -2;                  clp.setOption("write",                  &optWriteMatrices, "write matrices to file (-1 means all; i>=0 means level i)");

  switch (clp.parse(argc, argv)) {
  case Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED:        return EXIT_SUCCESS; break;
  case Teuchos::CommandLineProcessor::PARSE_ERROR:
  case Teuchos::CommandLineProcessor::PARSE_UNRECOGNIZED_OPTION: return EXIT_FAILURE; break;
  case Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL:                               break;
  }

#ifdef ML_SCALING
   const int ntimers=4;
   enum {total, probBuild, precBuild, solve};
   ml_DblLoc timeVec[ntimers], maxTime[ntimers], minTime[ntimers];

  for (int i=0; i<ntimers; i++) timeVec[i].rank = Comm.MyPID();
  timeVec[total].value = MPI_Wtime();
#endif

  // Creates the linear problem using the Galeri package.
  // Several matrix examples are supported; please refer to the
  // Galeri documentation for more details.
  // Most of the examples using the ML_Epetra::MultiLevelPreconditioner
  // class are based on Epetra_CrsMatrix. Example
  // `ml_EpetraVbr.cpp' shows how to define a Epetra_VbrMatrix.
  // `Laplace2D' is a symmetric matrix; an example of non-symmetric
  // matrices is `Recirc2D' (advection-diffusion in a box, with
  // recirculating flow). The grid has optNx x optNy nodes, divided into
  // mx x my subdomains, each assigned to a different processor.
  if (optNy == -1) optNy = optNx;
  if (optNz == -1) optNz = optNx;

  ParameterList GaleriList;
  GaleriList.set("nx", optNx);
  GaleriList.set("ny", optNy);
  GaleriList.set("nz", optNz);
  //GaleriList.set("mx", 1);
  //GaleriList.set("my", Comm.NumProc());

#ifdef ML_SCALING
  timeVec[probBuild].value = MPI_Wtime();
#endif
  Epetra_Map* Map;
  Epetra_CrsMatrix* A;
  Epetra_MultiVector* Coord;

  if (optMatrixType == "Laplace2D") {
    Map = CreateMap("Cartesian2D", Comm, GaleriList);
    A = CreateCrsMatrix("Laplace2D", Map, GaleriList);
    Coord = CreateCartesianCoordinates("2D", &(A->Map()), GaleriList);
  } else if (optMatrixType == "Laplace3D") {
    Map = CreateMap("Cartesian3D", Comm, GaleriList);
    A = CreateCrsMatrix("Laplace3D", Map, GaleriList);
    Coord = CreateCartesianCoordinates("3D", &(A->Map()), GaleriList);
  } else {
    throw(std::runtime_error("Bad matrix type"));
  }

  //EpetraExt::RowMatrixToMatlabFile("A.m",*A);

  double *x_coord = (*Coord)[0];
  double *y_coord = (*Coord)[1];
  double* z_coord=NULL;
  if (optMatrixType == "Laplace3D") z_coord = (*Coord)[2];

  //EpetraExt::MultiVectorToMatrixMarketFile("mlcoords.m",*Coord);

  if( Comm.MyPID()==0 ) {
    std::cout << "========================================================" << std::endl;
    std::cout << " Matrix type: " << optMatrixType << std::endl;
    if (optMatrixType == "Laplace2D")
      std::cout << " Problem size: " << optNx*optNy << " (" << optNx << "x" << optNy << ")" << std::endl;
    else if (optMatrixType == "Laplace3D")
      std::cout << " Problem size: " << optNx*optNy*optNz << " (" << optNx << "x" << optNy << "x" << optNz << ")" << std::endl;

    int mx = GaleriList.get("mx", -1);
    int my = GaleriList.get("my", -1);
    int mz = GaleriList.get("my", -1);
    std::cout << " Processor subdomains in x direction: " << mx << std::endl
              << " Processor subdomains in y direction: " << my << std::endl;
    if (optMatrixType == "Laplace3D")
      std::cout << " Processor subdomains in z direction: " << mz << std::endl;
    std::cout << "========================================================" << std::endl;
  }

  // Build a linear system with trivial solution, using a random vector
  // as starting solution.
  Epetra_Vector LHS(*Map); LHS.Random();
  Epetra_Vector RHS(*Map); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  // As we wish to use AztecOO, we need to construct a solver object 
  // for this problem
  AztecOO solver(Problem);
#ifdef ML_SCALING
  timeVec[probBuild].value = MPI_Wtime() - timeVec[probBuild].value;
#endif

  // =========================== begin of ML part ===========================
  
#ifdef ML_SCALING
  timeVec[precBuild].value = MPI_Wtime();
#endif
  // create a parameter list for ML options
  ParameterList MLList;

  // Sets default parameters for classic smoothed aggregation. After this
  // call, MLList contains the default values for the ML parameters,
  // as required by typical smoothed aggregation for symmetric systems.
  // Other sets of parameters are available for non-symmetric systems
  // ("DD" and "DD-ML"), and for the Maxwell equations ("maxwell").
  ML_Epetra::SetDefaults("SA",MLList);
  
  // overwrite some parameters. Please refer to the user's guide
  // for more information
  // some of the parameters do not differ from their default value,
  // and they are here reported for the sake of clarity
  
  // output level, 0 being silent and 10 verbose
  MLList.set("ML output", 10);
  // maximum number of levels
  MLList.set("max levels",optMaxLevels);
  // set finest level to 0
  MLList.set("increasing or decreasing","increasing");
  MLList.set("coarse: max size",optMaxCoarseSize);

  // use Uncoupled scheme to create the aggregate
  MLList.set("aggregation: type", "Uncoupled");

  // smoother is Chebyshev. Example file 
  // `ml/examples/TwoLevelDD/ml_2level_DD.cpp' shows how to use
  // AZTEC's preconditioners as smoothers

  MLList.set("smoother: type","Chebyshev");
  MLList.set("smoother: Chebyshev alpha",optAlpha);
  MLList.set("smoother: sweeps",optSweeps);

  // use both pre and post smoothing
  MLList.set("smoother: pre or post", "both");

#ifdef HAVE_ML_AMESOS
  // solve with serial direct solver KLU
  MLList.set("coarse: type","Amesos-KLU");
#else
  // this is for testing purposes only, you should have 
  // a direct solver for the coarse problem (either Amesos, or the SuperLU/
  // SuperLU_DIST interface of ML)
  MLList.set("coarse: type","Jacobi");
#endif

  MLList.set("repartition: enable",1);
  MLList.set("repartition: start level",1);
  MLList.set("repartition: max min ratio",1.1);
  MLList.set("repartition: min per proc",800);
  MLList.set("repartition: partitioner","Zoltan");
  MLList.set("repartition: put on single proc",1);
  MLList.set("x-coordinates", x_coord);
  MLList.set("y-coordinates", y_coord);
  if (optMatrixType == "Laplace2D") {
    MLList.set("repartition: Zoltan dimensions",2);
  } else if (optMatrixType == "Laplace3D") {
    MLList.set("repartition: Zoltan dimensions",3);
    MLList.set("z-coordinates", z_coord);
  }

  MLList.set("print hierarchy",optWriteMatrices);
  //MLList.set("aggregation: damping factor",0.);

  // Read in XML options
  if (xmlFile != "")
    ML_Epetra::ReadXML(xmlFile,MLList,Comm);

  // Creates the preconditioning object. We suggest to use `new' and
  // `delete' because the destructor contains some calls to MPI (as
  // required by ML and possibly Amesos). This is an issue only if the
  // destructor is called **after** MPI_Finalize().
  ML_Epetra::MultiLevelPreconditioner* MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // verify unused parameters on process 0 (put -1 to print on all
  // processes)
  MLPrec->PrintUnused(0);
#ifdef ML_SCALING
  timeVec[precBuild].value = MPI_Wtime() - timeVec[precBuild].value;
#endif

  // ML allows the user to cheaply recompute the preconditioner. You can
  // simply uncomment the following line:
  // 
  // MLPrec->ReComputePreconditioner();
  //
  // It is supposed that the linear system matrix has different values, but
  // **exactly** the same structure and layout. The code re-built the
  // hierarchy and re-setup the smoothers and the coarse solver using
  // already available information on the hierarchy. A particular
  // care is required to use ReComputePreconditioner() with nonzero
  // threshold.

  // =========================== end of ML part =============================
  
  // tell AztecOO to use the ML preconditioner, specify the solver 
  // and the output, then solve with 500 maximum iterations and 1e-12 
  // of tolerance (see AztecOO's user guide for more details)
  
#ifdef ML_SCALING
  timeVec[solve].value = MPI_Wtime();
#endif
  solver.SetPrecOperator(MLPrec);
  solver.SetAztecOption(AZ_solver, AZ_cg);
  solver.SetAztecOption(AZ_output, 32);
  solver.Iterate(optMaxIts, optTol);
#ifdef ML_SCALING
  timeVec[solve].value = MPI_Wtime() - timeVec[solve].value;
#endif

  // destroy the preconditioner
  delete MLPrec;
  
  // compute the real residual

  double residual;
  LHS.Norm2(&residual);
  
  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
  }

  // for testing purposes
  if (residual > 1e-5)
    exit(EXIT_FAILURE);

  delete A;
  delete Map;

#ifdef ML_SCALING
  timeVec[total].value = MPI_Wtime() - timeVec[total].value;

  //avg
  double dupTime[ntimers],avgTime[ntimers];
  for (int i=0; i<ntimers; i++) dupTime[i] = timeVec[i].value;
  MPI_Reduce(dupTime,avgTime,ntimers,MPI_DOUBLE,MPI_SUM,0,MPI_COMM_WORLD);
  for (int i=0; i<ntimers; i++) avgTime[i] = avgTime[i]/Comm.NumProc();
  //min
  MPI_Reduce(timeVec,minTime,ntimers,MPI_DOUBLE_INT,MPI_MINLOC,0,MPI_COMM_WORLD);
  //max
  MPI_Reduce(timeVec,maxTime,ntimers,MPI_DOUBLE_INT,MPI_MAXLOC,0,MPI_COMM_WORLD);

  if (Comm.MyPID() == 0) {
    printf("timing :  max (pid)  min (pid)  avg\n");
    printf("Problem build         :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[probBuild].value,maxTime[probBuild].rank,
             minTime[probBuild].value,minTime[probBuild].rank,
             avgTime[probBuild]);
    printf("Preconditioner build  :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[precBuild].value,maxTime[precBuild].rank,
             minTime[precBuild].value,minTime[precBuild].rank,
             avgTime[precBuild]);
    printf("Solve                 :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[solve].value,maxTime[solve].rank,
             minTime[solve].value,minTime[solve].rank,
             avgTime[solve]);
    printf("Total                 :   %2.3e (%d)  %2.3e (%d)  %2.3e \n",
             maxTime[total].value,maxTime[total].rank,
             minTime[total].value,minTime[total].rank,
             avgTime[total]);
  }
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Exemplo n.º 8
0
int main(int argc, char *argv[])
{
  
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif
  
  Epetra_Time Time(Comm);

  // initialize an Gallery object
  CrsMatrixGallery Gallery("laplace_3d", Comm, false); // CJ TODO FIXME: change for Epetra64
  Gallery.Set("problem_size", 1000);

  // retrive pointers to matrix and linear problem
  Epetra_RowMatrix * A = Gallery.GetMatrix();
  Epetra_LinearProblem * Problem = Gallery.GetLinearProblem();
  
  // Construct a solver object for this problem
  AztecOO solver(*Problem);
  
  // create the preconditioner object and compute hierarchy
  ML_Epetra::MultiLevelPreconditioner * MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(*A, true);

  // tell AztecOO to use this preconditioner, then solve
  solver.SetPrecOperator(MLPrec);
 
  solver.SetAztecOption(AZ_solver, AZ_gmres_condnum);
  solver.SetAztecOption(AZ_output, 32);
  solver.SetAztecOption(AZ_kspace, 160);
   
  int Niters = 500;
  solver.Iterate(Niters, 1e-12);

  // print out some information about the preconditioner
  if( Comm.MyPID() == 0 ) cout << MLPrec->GetOutputList();
  
  delete MLPrec;
  
  // compute the real residual

  double residual, diff;
  Gallery.ComputeResidual(&residual);
  Gallery.ComputeDiffBetweenStartingAndExactSolutions(&diff);
  
  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
    cout << "||x_exact - x||_2 = " << diff << endl;
    cout << "Total Time = " << Time.ElapsedTime() << endl;
  }

  if (residual > 1e-5)
    exit(EXIT_FAILURE);
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif
  return(EXIT_SUCCESS);
}
Exemplo n.º 9
0
int main(int argc, char *argv[])
{
  
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  Epetra_Time Time(Comm);

  // Create the linear problem using the class `Trilinos_Util::CrsMatrixGallery.'
  // Various matrix examples are supported; please refer to the
  // Trilinos tutorial for more details.
  
  // create Aztec stuff
  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
#ifdef ML_MPI
  /* get number of processors and the name of this processor */
  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
  int proc   = proc_config[AZ_node];
  int nprocs = proc_config[AZ_N_procs];
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
  int proc   = 0;
  int nprocs = 1;
#endif
  // read in the matrix size
  FILE *fp = fopen("ExampleMatrices/cantilever2D/data_matrix.txt","r");
  int leng;
  fscanf(fp,"%d",&leng);
  int num_PDE_eqns=2;
  int N_grid_pts = leng/num_PDE_eqns;

  // make a linear distribution of the matrix respecting the blocks size
  int leng1 = leng/nprocs;
  int leng2 = leng-leng1*nprocs;
  if (proc >= leng2)
  {
     leng2 += (proc*leng1);
  }
  else
  {
     leng1++;
     leng2 = proc*leng1;
  }
  int     N_update = leng1;
  int*    update  = new int[N_update+1];
  int     i;
  double *val=NULL;
  int    *bindx=NULL;
  for (i=0; i<N_update; i++) update[i] = i+leng2;
  
  // create the Epetra_CrSMatrix
  Epetra_Map*        StandardMap = new Epetra_Map(leng,N_update,update,0,Comm);
  Epetra_CrsMatrix*  A           = new Epetra_CrsMatrix(Copy,*StandardMap,1);
  
  AZ_input_msr_matrix("ExampleMatrices/cantilever2D/data_matrix.txt",
                      update, &val, &bindx, N_update, proc_config);

  
  for (i=0; i<leng; i++)
  {
    int row = update[i];
    A->SumIntoGlobalValues(row,1,&(val[i]),&row);
    A->SumIntoGlobalValues(row,bindx[i+1]-bindx[i],&(val[bindx[i]]),&(bindx[bindx[i]]));
  }
  A->TransformToLocal();
  
  // create solution and right-hand side (MultiVectors are fine as well)
  Epetra_Vector* LHS = new Epetra_Vector(A->OperatorDomainMap());
  Epetra_Vector* RHS = new Epetra_Vector(A->OperatorRangeMap());
  LHS->Random();
  RHS->Random();

  // build the epetra linear problem
  Epetra_LinearProblem Problem(A, LHS, RHS);
  
  // Construct a solver object for this problem
  AztecOO solver(Problem);

  // =========================== begin of ML part ===========================
  
  // create a parameter list for ML options
  ParameterList MLList;

  // set defaults for classic smoothed aggregation
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("aggregation: damping factor", 0.0);

  // number of relaxation sweeps
  MLList.set("adaptive: max sweeps", 10);
  // number of additional null space vectors to compute
  MLList.set("adaptive: num vectors",2);

#if 1
  ML_Epetra::MultiLevelPreconditioner* MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(dynamic_cast<Epetra_RowMatrix&>(*A), MLList, false);

  // need to allocate and fill the null space (also the
  // default one, as in this case). This vector is no longer
  // needed after a call to ComputeAdaptivePreconditioner().
  int NullSpaceSize = 2;
  vector<double> NullSpace((NullSpaceSize*A->NumMyRows()));
  for (i = 0 ; i < A->NumMyRows() ; ++i)
  {
    NullSpace[i] = 1.0;
    ++i;
    NullSpace[i] = 0.0;
  }
  for (i = A->NumMyRows() ; i < 2*A->NumMyRows() ; ++i)
  {
    NullSpace[i] = 0.0;
    ++i;
    NullSpace[i] = 1.0;
  }

  MLPrec->ComputeAdaptivePreconditioner(NullSpaceSize,&NullSpace[0]);
#else
  ML_Epetra::MultiLevelPreconditioner* MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(dynamic_cast<Epetra_RowMatrix&>(*A), MLList);
#endif

  // tell AztecOO to use this preconditioner, then solve
  solver.SetPrecOperator(MLPrec);

  // =========================== end of ML part =============================
  
  solver.SetAztecOption(AZ_solver, AZ_gmres);
  solver.SetAztecOption(AZ_output, 32);

  // solve with 500 iterations and 1e-12 tolerance  
  solver.Iterate(1550, 1e-5);

  delete MLPrec;
  
  // compute the real residual

  double residual, diff;
  
  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
    cout << "||x_exact - x||_2 = " << diff << endl;
    cout << "Total Time = " << Time.ElapsedTime() << endl;
  }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return(0);
  
}
Exemplo n.º 10
0
int main(int argc, char *argv[])
{

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
  // This next bit of code drops a middle rank out of the calculation. This tests
  // that the Hiptmair smoother does not hang in its apply.  Hiptmair creates
  // two separate ML objects, one for edge and one for nodes.  By default, the ML
  // objects use MPI_COMM_WORLD, whereas the matrix that Hiptmair is being applied to
  // may have an MPI subcommunicator.
  int commWorldSize;
  MPI_Comm_size(MPI_COMM_WORLD,&commWorldSize);
  std::vector<int> splitKey;
  int rankToDrop = 1;
  for (int i=0; i<commWorldSize; ++i)
    splitKey.push_back(0);
  splitKey[rankToDrop] = MPI_UNDEFINED; //drop the last process from subcommunicator
  int myrank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm subcomm;
  MPI_Comm_split(MPI_COMM_WORLD, splitKey[myrank], myrank, &subcomm);
  if (myrank == rankToDrop) goto droppedRankLabel;
#endif

{ //scoping to avoid compiler error about goto jumping over initialization
#ifdef ML_MPI
  Epetra_MpiComm Comm(subcomm);
#else
  Epetra_SerialComm Comm;
#endif

  ML_Comm_Create(&mlcomm);

  char *datafile;

#ifdef CurlCurlAndMassAreSeparate
  if (argc != 5 && argc != 7) {
    if (Comm.MyPID() == 0) {
      std::cout << "usage: ml_maxwell.exe <S> <M> <T> <Kn> [edge map] [node map]"
           << std::endl;
      std::cout << "        S = edge stiffness matrix file" << std::endl;
      std::cout << "        M = edge mass matrix file" << std::endl;
      std::cout << "        T = discrete gradient file" << std::endl;
      std::cout << "       Kn = auxiliary nodal FE matrix file" << std::endl;
      std::cout << " edge map = edge distribution over processors" << std::endl;
      std::cout << " node map = node distribution over processors" << std::endl;
      std::cout << argc << std::endl;
    }
#else //ifdef CurlCurlAndMassAreSeparate
  if (argc != 4 && argc != 6) {
    if (Comm.MyPID() == 0) {
      std::cout << "usage: ml_maxwell.exe <A> <T> <Kn> [edge map] [node map]" <<std::endl;
      std::cout << "        A = edge element matrix file" << std::endl;
      std::cout << "        T = discrete gradient file" << std::endl;
      std::cout << "       Kn = auxiliary nodal FE matrix file" << std::endl;
      std::cout << " edge map = edge distribution over processors" << std::endl;
      std::cout << " node map = node distribution over processors" << std::endl;
      std::cout << argc << std::endl;
    }
#endif //ifdef CurlCurlAndMassAreSeparate
#ifdef ML_MPI
    MPI_Finalize();
#endif
    exit(1);
  }

  Epetra_Map *edgeMap, *nodeMap;
  Epetra_CrsMatrix *CCplusM=NULL, *CurlCurl=NULL, *Mass=NULL, *T=NULL, *Kn=NULL;

  // ================================================= //
  // READ IN MAPS FROM FILE                            //
  // ================================================= //
  // every processor reads this in
#ifdef CurlCurlAndMassAreSeparate
  if (argc > 5)
#else
  if (argc > 4)
#endif
  {
    datafile = argv[5];
    if (Comm.MyPID() == 0) {
      printf("Reading in edge map from %s ...\n",datafile);
      fflush(stdout);
    }
    EpetraExt::MatrixMarketFileToMap(datafile, Comm, edgeMap);
    datafile = argv[6];
    if (Comm.MyPID() == 0) {
      printf("Reading in node map from %s ...\n",datafile);
      fflush(stdout);
    }
    EpetraExt::MatrixMarketFileToMap(datafile, Comm, nodeMap);
  }
  else { // linear maps
         // Read the T matrix to determine the map sizes
         // and then construct linear maps

    if (Comm.MyPID() == 0)
      printf("Using linear edge and node maps ...\n");

    const int lineLength = 1025;
    char line[lineLength];
    FILE *handle;
    int M,N,NZ;
#ifdef CurlCurlAndMassAreSeparate
     handle = fopen(argv[3],"r");
#else
     handle = fopen(argv[2],"r");
#endif
    if (handle == 0) EPETRA_CHK_ERR(-1); // file not found
    // Strip off header lines (which start with "%")
    do {
       if(fgets(line, lineLength, handle)==0) {if (handle!=0) fclose(handle);}
    } while (line[0] == '%');
    // Get problem dimensions: M, N, NZ
    if(sscanf(line,"%d %d %d", &M, &N, &NZ)==0) {if (handle!=0) fclose(handle);}
    fclose(handle);
    edgeMap = new Epetra_Map(M,0,Comm);
    nodeMap = new Epetra_Map(N,0,Comm);
  }

  // ===================================================== //
  // READ IN MATRICES FROM FILE                            //
  // ===================================================== //
#ifdef CurlCurlAndMassAreSeparate
  for (int i = 1; i <5; i++) {
    datafile = argv[i];
    if (Comm.MyPID() == 0) {
      printf("reading %s ....\n",datafile); fflush(stdout);
    }
    switch (i) {
    case 1: //Curl
      MatrixMarketFileToCrsMatrix(datafile,*edgeMap,*edgeMap,*edgeMap,CurlCurl);
      break;
    case 2: //Mass
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap, *edgeMap, Mass);
      break;
    case 3: //Gradient
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap,*nodeMap, T);
      break;
    case 4: //Auxiliary nodal matrix
      MatrixMarketFileToCrsMatrix(datafile, *nodeMap,*nodeMap, *nodeMap, Kn);
      break;
    } //switch
  } //for (int i = 1; i <5; i++)

#else
  for (int i = 1; i <4; i++) {
    datafile = argv[i];
    if (Comm.MyPID() == 0) {
      printf("reading %s ....\n",datafile); fflush(stdout);
    }
    switch (i) {
    case 1: //Edge element matrix
      MatrixMarketFileToCrsMatrix(datafile,*edgeMap,*edgeMap,*edgeMap,CCplusM);
      break;
    case 2: //Gradient
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap, *nodeMap, T);
      break;
    case 3: //Auxiliary nodal matrix
      MatrixMarketFileToCrsMatrix(datafile, *nodeMap, *nodeMap, *nodeMap, Kn);
      break;
    } //switch
  } //for (int i = 1; i <4; i++)
#endif //ifdef CurlCurlAndMassAreSeparate

  // ==================================================== //
  // S E T U P   O F    M L   P R E C O N D I T I O N E R //
  // ==================================================== //

  Teuchos::ParameterList MLList;
  int *options    = new int[AZ_OPTIONS_SIZE];
  double *params  = new double[AZ_PARAMS_SIZE];
  ML_Epetra::SetDefaults("maxwell", MLList, options, params);

  MLList.set("ML output", 10);
  MLList.set("aggregation: type", "Uncoupled");
  MLList.set("coarse: max size", 15);
  MLList.set("aggregation: threshold", 0.0);
  //MLList.set("negative conductivity",true);
  //MLList.set("smoother: type", "Jacobi");
  MLList.set("subsmoother: type", "symmetric Gauss-Seidel");
  //MLList.set("max levels", 2);

  // coarse level solve
  MLList.set("coarse: type", "Amesos-KLU");
  //MLList.set("coarse: type", "Hiptmair");
  //MLList.set("coarse: type", "Jacobi");

  //MLList.set("dump matrix: enable", true);

#ifdef CurlCurlAndMassAreSeparate
  //Create the matrix of interest.
  CCplusM = Epetra_MatrixAdd(CurlCurl,Mass,1.0);
#endif

#ifdef CurlCurlAndMassAreSeparate
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CurlCurl, *Mass, *T, *Kn, MLList);
  // Comment out the line above and uncomment the next one if you have
  // mass and curl separately but want to precondition as if they are added
  // together.
/*
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CCplusM, *T, *Kn, MLList);
*/
#else
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CCplusM, *T, *Kn, MLList);
#endif //ifdef CurlCurlAndMassAreSeparate

  MLPrec->PrintUnused(0);

  // ========================================================= //
  // D E F I N I T I O N   O F   A Z T E C O O   P R O B L E M //
  // ========================================================= //

  // create left-hand side and right-hand side, and populate them with
  // data from file. Both vectors are defined on the domain map of the
  // edge matrix.
  // Epetra_Vectors can be created in View mode, to accept pointers to
  // double vectors.

  if (Comm.MyPID() == 0)
    std::cout << "Putting in a zero initial guess and random rhs (in the range of S+M)" << std::endl;
  Epetra_Vector x(CCplusM->DomainMap());
  x.Random();
  Epetra_Vector rhs(CCplusM->DomainMap());
  CCplusM->Multiply(false,x,rhs);
  x.PutScalar(0.0);

  double vecnorm;
  rhs.Norm2(&vecnorm);
  if (Comm.MyPID() == 0) std::cout << "||rhs|| = " << vecnorm << std::endl;
  x.Norm2(&vecnorm);
  if (Comm.MyPID() == 0) std::cout << "||x|| = " << vecnorm << std::endl;

  // for AztecOO, we need an Epetra_LinearProblem
  Epetra_LinearProblem Problem(CCplusM,&x,&rhs);
  // AztecOO Linear problem
  AztecOO solver(Problem);
  // set MLPrec as precondititoning operator for AztecOO linear problem
  //std::cout << "no ml preconditioner!!!" << std::endl;
  solver.SetPrecOperator(MLPrec);
  //solver.SetAztecOption(AZ_precond, AZ_Jacobi);

  // a few options for AztecOO
  solver.SetAztecOption(AZ_solver, AZ_cg);
  solver.SetAztecOption(AZ_output, 1);

  solver.Iterate(15, 1e-3);

  // =============== //
  // C L E A N   U P //
  // =============== //

  delete MLPrec;    // destroy phase prints out some information
  delete CurlCurl;
  delete CCplusM;
  delete Mass;
  delete T;
  delete Kn;
  delete nodeMap;
  delete edgeMap;
  delete [] params;
  delete [] options;

  ML_Comm_Destroy(&mlcomm);
} //avoids compiler error about jumping over initialization
  droppedRankLabel:
#ifdef ML_MPI
  MPI_Finalize();
#endif

  return 0;

} //main

#else

#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_MPI
#include "mpi.h"
#endif

int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  puts("Please configure ML with:");
#if !defined(HAVE_ML_EPETRA)
  puts("--enable-epetra");
#endif
#if !defined(HAVE_ML_TEUCHOS)
  puts("--enable-teuchos");
#endif
#if !defined(HAVE_ML_EPETRAEXT)
  puts("--enable-epetraext");
#endif
#if !defined(HAVE_ML_AZTECOO)
  puts("--enable-aztecoo");
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;
}
Exemplo n.º 11
0
int main(int argc, char *argv[])
{
  
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Create the linear problem using the Galeri package.
  
  int NumPDEEqns = 5;

  Teuchos::ParameterList GaleriList;
  int nx = 32;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* CrsA = CreateCrsMatrix("Laplace2D", Map, GaleriList);
  Epetra_VbrMatrix* A = CreateVbrMatrix(CrsA, NumPDEEqns);

  Epetra_Vector LHS(A->Map()); LHS.Random();
  Epetra_Vector RHS(A->Map()); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  AztecOO solver(Problem);

  // =========================== definition of coordinates =================
  
  // use the following Galeri function to get the
  // coordinates for a Cartesian grid. Note however that the
  // visualization capabilites of Trilinos accept non-structured grid as
  // well. Visualization and statistics occurs just after the ML
  // preconditioner has been build.

  Epetra_MultiVector* Coord = CreateCartesianCoordinates("2D", &(A->Map()),
                                                         GaleriList);
  double* x_coord = (*Coord)[0];
  double* y_coord = (*Coord)[1];
  
  // =========================== begin of ML part ===========================
  
  // create a parameter list for ML options
  ParameterList MLList;
  int *options    = new int[AZ_OPTIONS_SIZE];
  double *params  = new double[AZ_PARAMS_SIZE];

  // set defaults
  ML_Epetra::SetDefaults("SA",MLList, options, params);
  
  // overwrite some parameters. Please refer to the user's guide
  // for more information
  // some of the parameters do not differ from their default value,
  // and they are here reported for the sake of clarity
  
  // maximum number of levels
  MLList.set("max levels",3);
  MLList.set("increasing or decreasing","increasing");
  MLList.set("smoother: type", "symmetric Gauss-Seidel");

  // aggregation scheme set to Uncoupled. Note that the aggregates
  // created by MIS can be visualized for serial runs only, while 
  // Uncoupled, METIS for both serial and parallel runs.
  MLList.set("aggregation: type", "Uncoupled");

  // ======================== //
  // visualization parameters //
  // ======================== //
  // 
  // - set "viz: enable" to `false' to disable visualization and
  //   statistics.
  // - set "x-coordinates" to the pointer of x-coor
  // - set "viz: equation to plot" to the number of equation to 
  //   be plotted (for vector problems only). Default is -1 (that is,
  //   plot all the equations)
  // - set "viz: print starting solution" to print on file 
  //   the starting solution vector, that was used for pre-
  //   and post-smoothing, and for the cycle. This may help to
  //   understand whether the smoothed solution is "smooth" 
  //   or not.
  //
  // NOTE: visualization occurs *after* the creation of the ML preconditioner,
  // by calling VisualizeAggregates(), VisualizeSmoothers(), and
  // VisualizeCycle(). However, the user *must* enable visualization 
  // *before* creating the ML object. This is because ML must store some
  // additional information about the aggregates.
  // 
  // NOTE: the options above work only for "viz: output format" == "xyz"
  // (default value) or "viz: output format" == "vtk".
  // If "viz: output format" == "dx", the user
  // can only plot the aggregates. 

  MLList.set("viz: output format", "vtk");
  MLList.set("viz: enable", true);
  MLList.set("x-coordinates", x_coord);
  MLList.set("y-coordinates", y_coord);
  MLList.set("z-coordinates", (double *)0);
  MLList.set("viz: print starting solution", true);

  // =============================== //
  // end of visualization parameters //
  // =============================== //

  // create the preconditioner object and compute hierarchy

  ML_Epetra::MultiLevelPreconditioner * MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // ============= //
  // visualization //
  // ============= //

  // 1.- print out the shape of the aggregates, plus some
  //     statistics
  // 2.- print out the effect of presmoother and postsmoother
  //     on a random vector. Input integer number represent 
  //     the number of applications of presmoother and postmsoother,
  //     respectively
  // 3.- print out the effect of the ML cycle on a random vector.
  //     The integer parameter represents the number of cycles.
  // Below, `5' and `1' refers to the number of pre-smoother and
  // post-smoother applications. `10' refers to the number of ML
  // cycle applications. In both cases, smoothers and ML cycle are
  // applied to a random vector.

  MLPrec->VisualizeAggregates();
  MLPrec->VisualizeSmoothers(5,1);
  MLPrec->VisualizeCycle(10);

  // ==================== //
  // end of visualization //
  // ==================== //

  // destroy the preconditioner
  delete MLPrec;
  
  delete [] options;
  delete [] params;
  
  delete A;
  delete Coord;
  delete Map;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Exemplo n.º 12
0
int main(int argc, char *argv[])
{

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // `Laplace2D' is a symmetric matrix; an example of non-symmetric
  // matrices is `Recirc2D' (advection-diffusion in a box, with
  // recirculating flow). The grid has nx x ny nodes, divided into
  // mx x my subdomains, each assigned to a different processor.
  int nx = 8;
  int ny = 8 * Comm.NumProc();

  ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", ny);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList);

  // use the following Galeri function to get the
  // coordinates for a Cartesian grid.

  Epetra_MultiVector* Coord = CreateCartesianCoordinates("2D", &(A->Map()),
                                                         GaleriList);
  double* x_coord = (*Coord)[0];
  double* y_coord = (*Coord)[1];

  // Create the linear problem, with a zero solution
  Epetra_Vector LHS(*Map); LHS.Random();
  Epetra_Vector RHS(*Map); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  // As we wish to use AztecOO, we need to construct a solver object for this problem
  AztecOO solver(Problem);

  // =========================== begin of ML part ===========================

  // create a parameter list for ML options
  ParameterList MLList;

  // set defaults for classic smoothed aggregation.
  ML_Epetra::SetDefaults("SA",MLList);

  // use user's defined aggregation scheme to create the aggregates
  // 1.- set "user" as aggregation scheme (for all levels, or for
  //     a specify level only)
  MLList.set("aggregation: type", "user");
  // 2.- set the label (for output)
  ML_SetUserLabel(UserLabel);
  // 3.- set the aggregation scheme (see function above)
  ML_SetUserPartitions(UserPartitions);
  // 4.- set the coordinates.
  MLList.set("x-coordinates", x_coord);
  MLList.set("y-coordinates", y_coord);
  MLList.set("aggregation: dimensions", 2);

  // also setup some variables to visualize the aggregates
  // (more details are reported in example `ml_viz.cpp'.
  MLList.set("viz: enable", true);

  // now we create the preconditioner
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  MLPrec->VisualizeAggregates();

  // tell AztecOO to use this preconditioner, then solve
  solver.SetPrecOperator(MLPrec);

  // =========================== end of ML part =============================

  solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
  solver.SetAztecOption(AZ_output, 32);

  // solve with 500 iterations and 1e-12 tolerance
  solver.Iterate(500, 1e-12);

  delete MLPrec;

  // compute the real residual

  double residual;
  LHS.Norm2(&residual);

  if (Comm.MyPID() == 0)
  {
    cout << "||b-Ax||_2 = " << residual << endl;
  }

  delete Coord;
  delete A;
  delete Map;

  if (residual > 1e-3)
    exit(EXIT_FAILURE);

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  exit(EXIT_SUCCESS);

}