Ejemplo n.º 1
0
int main(int argc, char *argv[])
{

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Creates the linear problem using the Galeri package.
  // Various matrix examples are supported; please refer to the
  // Galeri documentation for more details.
  // This matrix is a simple VBR matrix, constructed by replicating
  // a point-matrix on each unknown. This example is
  // useful to test the vector capabilities of ML, or to debug
  // code for vector problems. The number of equations is here
  // hardwired as 5, but any positive number (including 1) can be
  // specified.
  //
  // NOTE: The epetra interface of ML has only limited capabilites
  // for matrices with variable block size (that is,
  // best is if the number of equations for
  // each block row is constant). If you are interested in variable
  // block capabilites, please contact the ML developers.

  int NumPDEEqns = 5;

  // build up a 9-point Laplacian in 2D. This stencil will lead to
  // "perfect" aggregates, of square shape, using almost all the ML
  // aggregation schemes.
  // The problem size (900) must be a square number. Otherwise, the user
  // can specify the number of points in the x- and y-direction, and the
  // length of the x- and y-side of the computational domain. Please
  // refer to the Trilinos tutorial for more details.
  //
  // Note also that this gallery matrix have no boundary nodes.

  int nx;
  if (argc > 1)
    nx = (int) strtol(argv[1],NULL,10);
  else
    nx = 16;

  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* CrsA = CreateCrsMatrix("Star2D", Map, GaleriList);
  Epetra_VbrMatrix* A = CreateVbrMatrix(CrsA, NumPDEEqns);

  Epetra_Vector LHS(A->Map()); LHS.Random();
  Epetra_Vector RHS(A->Map()); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  // Construct a solver object for this problem
  AztecOO solver(Problem);

  // =========================== begin of ML part ===========================

  // create a parameter list for ML options
  ParameterList MLList;

  // set defaults for classic smoothed aggregation
  ML_Epetra::SetDefaults("SA",MLList);

  // overwrite some parameters. Please refer to the user's guide
  // for more information
  // some of the parameters do not differ from their default value,
  // and they are here reported for the sake of clarity

  // maximum number of levels
  MLList.set("max levels",5);
  MLList.set("increasing or decreasing","increasing");

  // set different aggregation schemes for each level. Depending on the
  // size of your problem, the hierarchy will contain different number
  // of levels. As `Uncoupled' and `METIS' are local aggregation
  // schemes, they should be used only for the finest level. `MIS' and
  // `ParMETIS' are global aggregation schemes (meaning that the
  // aggregates can span across processes), and should be reserved for
  // coarsest levels.
  // Note also that `Uncoupled' and `MIS' will always try to create
  // aggregates of diameter 3 (in the graph sense), while `METIS' and
  // `ParMETIS' can generate aggregates of any size.

  MLList.set("aggregation: type (level 0)", "Uncoupled");
  MLList.set("aggregation: type (level 1)", "MIS");

  // this is recognized by `METIS' and `ParMETIS' only
  MLList.set("aggregation: nodes per aggregate", 9);

  // smoother is Gauss-Seidel. Example file
  // ml_2level_DD.cpp shows how to use
  // AZTEC's preconditioners as smoothers
  MLList.set("smoother: type","Gauss-Seidel");

  // use both pre and post smoothing. Non-symmetric problems may take
  // advantage of pre-smoothing or post-smoothing only.
  MLList.set("smoother: pre or post", "both");

  // solve with serial direct solver KLU
  MLList.set("coarse: type","Amesos-KLU");

  // create the preconditioner object and compute hierarchy
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // tell AztecOO to use this preconditioner, then solve
  solver.SetPrecOperator(MLPrec);

  // =========================== end of ML part =============================

  solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
  solver.SetAztecOption(AZ_output, 32);

  // solve with 500 iterations and 1e-5 tolerance
  solver.Iterate(500, 1e-7);

  delete MLPrec;

  // compute the real residual.

  double residual;
  LHS.Norm2(&residual);

  if (Comm.MyPID() == 0)
  {
    cout << "||b-Ax||_2 = " << residual << endl;
  }

  if (residual > 1e-3)
    exit(EXIT_FAILURE);

  delete A;
  delete CrsA;
  delete Map;

#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
Ejemplo n.º 2
0
int Aztec2Petra(int * proc_config,
		AZ_MATRIX * Amat, double * az_x, double * az_b,
		Epetra_Comm * & comm,
		Epetra_BlockMap * & map,
		Epetra_RowMatrix * &A,
		Epetra_Vector * & x,
		Epetra_Vector * & b,
		int ** global_indices) {

  bool do_throw = false;

#ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
  do_throw = true;
#else
  do_throw =
    map->GlobalIndicesLongLong() ||
    A->RowMatrixRowMap().GlobalIndicesLongLong();
#endif

  if(do_throw) {
    // We throw rather than let the compiler error out so that the
    // rest of the library is available and all possible tests can run.

    const char* error = "Aztec2Petra: Not available for 64-bit Maps.";
    std::cerr << error << std::endl;
    throw error;
  }

#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES // REMOVE BEGIN
  // If no 32 bit indices, remove the code below using the preprocessor
  // otherwise VbrMatrix functions cause linker issues.

  // Build Epetra_Comm object

#ifdef AZTEC_MPI
    MPI_Comm * mpicomm = (MPI_Comm * ) AZ_get_comm(proc_config);
    comm = (Epetra_Comm *) new Epetra_MpiComm(*mpicomm);
#else
    comm = (Epetra_Comm *) new Epetra_SerialComm();
#endif  

  int * MyGlobalElements, *global_bindx, *update;
  
  if (!Amat->has_global_indices) {
    //create a global bindx
    AZ_revert_to_global(proc_config, Amat, &global_bindx, &update);
    MyGlobalElements = update;
  }
  else // Already have global ordering
    {
      global_bindx = Amat->bindx;
      MyGlobalElements = Amat->update;
      if (MyGlobalElements==0) EPETRA_CHK_ERR(-1);
    }

  // Get matrix information
  int NumMyElements = 0;
  if (Amat->data_org[AZ_matrix_type] == AZ_VBR_MATRIX)
    NumMyElements = Amat->data_org[AZ_N_int_blk] + Amat->data_org[AZ_N_bord_blk];
  else
    NumMyElements = Amat->data_org[AZ_N_internal] + Amat->data_org[AZ_N_border];
  // int NumMyElements = Amat->N_update; // Note: This "official" way does not always work
  int * bpntr = Amat->bpntr;
  int * rpntr = Amat->rpntr;
  int * indx = Amat->indx;
  double * val = Amat->val;

  int NumGlobalElements;
  comm->SumAll(&NumMyElements, &NumGlobalElements, 1);


  // Make ElementSizeList (if VBR) - number of block entries in each block row

  int * ElementSizeList = 0;

  if (Amat->data_org[AZ_matrix_type] == AZ_VBR_MATRIX) {
  
    ElementSizeList = new int[NumMyElements];
    if (ElementSizeList==0) EPETRA_CHK_ERR(-1); // Ran out of memory
    
    for (int i=0; i<NumMyElements; i++) ElementSizeList[i] = rpntr[i+1] - rpntr[i];

#ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
    map = 0;
#else
    map = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, 
			     ElementSizeList, 0, *comm);
#endif

    if (map==0) EPETRA_CHK_ERR(-2); // Ran out of memory

    delete [] ElementSizeList;
 
    Epetra_VbrMatrix * AA = new Epetra_VbrMatrix(View, *map, 0);
  
    if (AA==0) EPETRA_CHK_ERR(-3); // Ran out of memory

    /* Add block rows one-at-a-time */
    {for (int i=0; i<NumMyElements; i++) {
      int BlockRow = MyGlobalElements[i];
      int NumBlockEntries = bpntr[i+1] - bpntr[i];
      int *BlockIndices = global_bindx + bpntr[i];
      int ierr = AA->BeginInsertGlobalValues(BlockRow, NumBlockEntries, BlockIndices);
      if (ierr!=0) {
	cerr << "Error in BeginInsertGlobalValues(GlobalBlockRow = " << BlockRow 
	     << ") = " << ierr << endl; 
	EPETRA_CHK_ERR(ierr);
      }
      int LDA = rpntr[i+1] - rpntr[i];
      int NumRows = LDA;
      for (int j=bpntr[i]; j<bpntr[i+1]; j++) {
	int NumCols = (indx[j+1] - indx[j])/LDA;
	double * Values = val + indx[j];
	ierr = AA->SubmitBlockEntry(Values, LDA, NumRows, NumCols);
	if (ierr!=0) {
	  cerr << "Error in SubmitBlockEntry, GlobalBlockRow = " << BlockRow 
	       << "GlobalBlockCol = " << BlockIndices[j] << "Error = " << ierr << endl; 
	  EPETRA_CHK_ERR(ierr);
	}
      }
      ierr = AA->EndSubmitEntries();
      if (ierr!=0) {
	cerr << "Error in EndSubmitEntries(GlobalBlockRow = " << BlockRow 
	     << ") = " << ierr << endl; 
	EPETRA_CHK_ERR(ierr);
      }
    }}  
    int ierr=AA->FillComplete();    
    if (ierr!=0) {
      cerr <<"Error in Epetra_VbrMatrix FillComplete" << ierr << endl;
      EPETRA_CHK_ERR(ierr);
    }
    
    A = dynamic_cast<Epetra_RowMatrix *> (AA); // cast VBR pointer to RowMatrix pointer
  }
  else if  (Amat->data_org[AZ_matrix_type] == AZ_MSR_MATRIX) {
  
    /* Make numNzBlks - number of block entries in each block row */

    int * numNz = new int[NumMyElements];
    for (int i=0; i<NumMyElements; i++) numNz[i] = global_bindx[i+1] - global_bindx[i] + 1;

#ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
    Epetra_Map * map1 = 0;
#else
    Epetra_Map * map1 = new Epetra_Map(NumGlobalElements, NumMyElements,
				     MyGlobalElements, 0, *comm);
#endif

    Epetra_CrsMatrix * AA = new Epetra_CrsMatrix(Copy, *map1, numNz);

    map = (Epetra_BlockMap *) map1; // cast Epetra_Map to Epetra_BlockMap

    /* Add  rows one-at-a-time */

    for (int row=0; row<NumMyElements; row++) {
      double * row_vals = val + global_bindx[row];
      int * col_inds = global_bindx + global_bindx[row];
      int numEntries = global_bindx[row+1] - global_bindx[row];
#ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
      int ierr = 1;
#else
      int ierr = AA->InsertGlobalValues(MyGlobalElements[row], numEntries, row_vals, col_inds);
#endif
      if (ierr!=0) {
	cerr << "Error puting row " << MyGlobalElements[row] << endl;
	EPETRA_CHK_ERR(ierr);
      }
#ifdef EPETRA_NO_32BIT_GLOBAL_INDICES
      ierr = 1;
#else
      ierr = AA->InsertGlobalValues(MyGlobalElements[row], 1, val+row, MyGlobalElements+row);
#endif
      if (ierr!=0) {
	cerr << "Error putting  diagonal" << endl;
	EPETRA_CHK_ERR(ierr);
      }
    }

    int ierr=AA->FillComplete();
    if (ierr!=0) {
      cerr << "Error in Epetra_CrsMatrix_FillComplete" << endl;
      EPETRA_CHK_ERR(ierr);
    }
    A = dynamic_cast<Epetra_RowMatrix *> (AA); // cast CRS pointer to RowMatrix pointer
  }
  else cerr << "Not a supported AZ_MATRIX data type" << endl;


  // Create x vector
  x = new Epetra_Vector(View, *map,az_x);

  
  // RPP: Can not use the OperatorRangeMap in the ctor of the "b" vector 
  // below.  In MPSalsa, we delete the VbrMatrix yet still use the vector "b".
  // Deleting the matrix deletes the OperatorRangeMap that the b vector is 
  // based on.  Losing the map means "b" and all vectors that are created 
  // with the copy constructor of "b" break.  Mike has suggested 
  // using reference counting (Boost smart pointers) so the map is not 
  // deleted.  For now we will use the "map" variable as the base map for "b". 
  //b = new Epetra_Vector (View, A->OperatorRangeMap(), az_b);
  b = new Epetra_Vector (View, *map, az_b);

  *global_indices = 0; // Assume return array will be empty
  if (!Amat->has_global_indices) {
   AZ_free((void *) update);
   if (Amat->data_org[AZ_matrix_type] != AZ_VBR_MATRIX)
     AZ_free((void *) global_bindx);
   else
     global_indices = &global_bindx;
   }
#endif // EPETRA_NO_32BIT_GLOBAL_INDICES REMOVE END

  return 0;
}
Ejemplo n.º 3
0
int main(int argc, char *argv[])
{
  
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Create the linear problem using the Galeri package.
  
  int NumPDEEqns = 5;

  Teuchos::ParameterList GaleriList;
  int nx = 32;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* CrsA = CreateCrsMatrix("Laplace2D", Map, GaleriList);
  Epetra_VbrMatrix* A = CreateVbrMatrix(CrsA, NumPDEEqns);

  Epetra_Vector LHS(A->Map()); LHS.Random();
  Epetra_Vector RHS(A->Map()); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  AztecOO solver(Problem);

  // =========================== definition of coordinates =================
  
  // use the following Galeri function to get the
  // coordinates for a Cartesian grid. Note however that the
  // visualization capabilites of Trilinos accept non-structured grid as
  // well. Visualization and statistics occurs just after the ML
  // preconditioner has been build.

  Epetra_MultiVector* Coord = CreateCartesianCoordinates("2D", &(A->Map()),
                                                         GaleriList);
  double* x_coord = (*Coord)[0];
  double* y_coord = (*Coord)[1];
  
  // =========================== begin of ML part ===========================
  
  // create a parameter list for ML options
  ParameterList MLList;
  int *options    = new int[AZ_OPTIONS_SIZE];
  double *params  = new double[AZ_PARAMS_SIZE];

  // set defaults
  ML_Epetra::SetDefaults("SA",MLList, options, params);
  
  // overwrite some parameters. Please refer to the user's guide
  // for more information
  // some of the parameters do not differ from their default value,
  // and they are here reported for the sake of clarity
  
  // maximum number of levels
  MLList.set("max levels",3);
  MLList.set("increasing or decreasing","increasing");
  MLList.set("smoother: type", "symmetric Gauss-Seidel");

  // aggregation scheme set to Uncoupled. Note that the aggregates
  // created by MIS can be visualized for serial runs only, while 
  // Uncoupled, METIS for both serial and parallel runs.
  MLList.set("aggregation: type", "Uncoupled");

  // ======================== //
  // visualization parameters //
  // ======================== //
  // 
  // - set "viz: enable" to `false' to disable visualization and
  //   statistics.
  // - set "x-coordinates" to the pointer of x-coor
  // - set "viz: equation to plot" to the number of equation to 
  //   be plotted (for vector problems only). Default is -1 (that is,
  //   plot all the equations)
  // - set "viz: print starting solution" to print on file 
  //   the starting solution vector, that was used for pre-
  //   and post-smoothing, and for the cycle. This may help to
  //   understand whether the smoothed solution is "smooth" 
  //   or not.
  //
  // NOTE: visualization occurs *after* the creation of the ML preconditioner,
  // by calling VisualizeAggregates(), VisualizeSmoothers(), and
  // VisualizeCycle(). However, the user *must* enable visualization 
  // *before* creating the ML object. This is because ML must store some
  // additional information about the aggregates.
  // 
  // NOTE: the options above work only for "viz: output format" == "xyz"
  // (default value) or "viz: output format" == "vtk".
  // If "viz: output format" == "dx", the user
  // can only plot the aggregates. 

  MLList.set("viz: output format", "vtk");
  MLList.set("viz: enable", true);
  MLList.set("x-coordinates", x_coord);
  MLList.set("y-coordinates", y_coord);
  MLList.set("z-coordinates", (double *)0);
  MLList.set("viz: print starting solution", true);

  // =============================== //
  // end of visualization parameters //
  // =============================== //

  // create the preconditioner object and compute hierarchy

  ML_Epetra::MultiLevelPreconditioner * MLPrec = 
    new ML_Epetra::MultiLevelPreconditioner(*A, MLList);

  // ============= //
  // visualization //
  // ============= //

  // 1.- print out the shape of the aggregates, plus some
  //     statistics
  // 2.- print out the effect of presmoother and postsmoother
  //     on a random vector. Input integer number represent 
  //     the number of applications of presmoother and postmsoother,
  //     respectively
  // 3.- print out the effect of the ML cycle on a random vector.
  //     The integer parameter represents the number of cycles.
  // Below, `5' and `1' refers to the number of pre-smoother and
  // post-smoother applications. `10' refers to the number of ML
  // cycle applications. In both cases, smoothers and ML cycle are
  // applied to a random vector.

  MLPrec->VisualizeAggregates();
  MLPrec->VisualizeSmoothers(5,1);
  MLPrec->VisualizeCycle(10);

  // ==================== //
  // end of visualization //
  // ==================== //

  // destroy the preconditioner
  delete MLPrec;
  
  delete [] options;
  delete [] params;
  
  delete A;
  delete Coord;
  delete Map;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Ejemplo n.º 4
0
int main(int argc, char *argv[])
{
  int i;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  // Uncomment to debug in parallel int tmp; if (comm.MyPID()==0) cin >> tmp; comm.Barrier();

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  if (!verbose) comm.SetTracebackMode(0); // This should shut down any error traceback reporting

  if (verbose) cout << comm << endl << flush;

  if (verbose) verbose = (comm.MyPID()==0);

  if (verbose)
    cout << EpetraExt::EpetraExt_Version() << endl << endl;

  int nx = 128;
  int ny = comm.NumProc()*nx; // Scale y grid with number of processors

  // Create funky stencil to make sure the matrix is non-symmetric (transpose non-trivial):

  // (i-1,j-1) (i-1,j  )
  // (i  ,j-1) (i  ,j  ) (i  ,j+1)
  // (i+1,j-1) (i+1,j  )

  int npoints = 7;

  int xoff[] = {-1,  0,  1, -1,  0,  1,  0};
  int yoff[] = {-1, -1, -1,  0,  0,  0,  1};

  Epetra_Map * map;
  Epetra_CrsMatrix * A;
  Epetra_Vector * x, * b, * xexact;
	
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact);

  if (nx<8)
  {
    cout << *A << endl;
    cout << "X exact = " << endl << *xexact << endl;
    cout << "B       = " << endl << *b << endl;
  }

  // Construct transposer 
  Epetra_Time timer(comm);

  double start = timer.ElapsedTime();

  //bool IgnoreNonLocalCols = false;
  bool MakeDataContiguous = true;
  EpetraExt::RowMatrix_Transpose transposer( MakeDataContiguous );

  if (verbose) cout << "\nTime to construct transposer  = " << timer.ElapsedTime() - start << endl;
  
  Epetra_CrsMatrix & transA = dynamic_cast<Epetra_CrsMatrix&>(transposer(*A));

  start = timer.ElapsedTime();
  if (verbose) cout << "\nTime to create transpose matrix  = " << timer.ElapsedTime() - start << endl;
 	
  // Now test output of transposer by performing matvecs
  int ierr = 0;
  ierr += checkResults(A, &transA, xexact, verbose);


  // Now change values in original matrix and test update facility of transposer
  // Add 2 to the diagonal of each row
  double Value = 2.0;
  for (i=0; i< A->NumMyRows(); i++)
  A->SumIntoMyValues(i, 1, &Value, &i);

  start = timer.ElapsedTime();
  transposer.fwd();

  if (verbose) cout << "\nTime to update transpose matrix  = " << timer.ElapsedTime() - start << endl;
 	
  ierr += checkResults(A, &transA, xexact, verbose);

  delete A;
  delete b;
  delete x;
  delete xexact;
  delete map;

  if (verbose) cout << endl << "Checking transposer for VbrMatrix objects" << endl<< endl;

  int nsizes = 4;
  int sizes[] = {4, 6, 5, 3};

  Epetra_VbrMatrix * Avbr;
  Epetra_BlockMap * bmap;

  Trilinos_Util_GenerateVbrProblem(nx, ny, npoints, xoff, yoff, nsizes, sizes,
                                   comm, bmap, Avbr, x, b, xexact);

  if (nx<8)
  {
    cout << *Avbr << endl;
    cout << "X exact = " << endl << *xexact << endl;
    cout << "B       = " << endl << *b << endl;
  }

  start = timer.ElapsedTime();
  EpetraExt::RowMatrix_Transpose transposer1( MakeDataContiguous );

  Epetra_CrsMatrix & transA1 = dynamic_cast<Epetra_CrsMatrix&>(transposer1(*Avbr));
  if (verbose) cout << "\nTime to create transpose matrix  = " << timer.ElapsedTime() - start << endl;
 	
  // Now test output of transposer by performing matvecs
;
  ierr += checkResults(Avbr, &transA1, xexact, verbose);

  // Now change values in original matrix and test update facility of transposer
  // Scale matrix on the left by rowsums

  Epetra_Vector invRowSums(Avbr->RowMap());

  Avbr->InvRowSums(invRowSums);
  Avbr->LeftScale(invRowSums);

  start = timer.ElapsedTime();
  transposer1.fwd();
  if (verbose) cout << "\nTime to update transpose matrix  = " << timer.ElapsedTime() - start << endl;
 	
  ierr += checkResults(Avbr, &transA1, xexact, verbose);

  delete Avbr;
  delete b;
  delete x;
  delete xexact;
  delete bmap;

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return ierr;
}
Ejemplo n.º 5
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int nx;
  if (argc > 1)
    nx = (int) strtol(argv[1],NULL,10);
  else
    nx = 256;
  int ny = nx * Comm.NumProc(); // each subdomain is a square

  ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", ny);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  int NumNodes = nx*ny;
  int NumPDEEqns = 2;

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* CrsA = CreateCrsMatrix("Laplace2D", Map, GaleriList);
  Epetra_VbrMatrix* A = CreateVbrMatrix(CrsA, NumPDEEqns);

  Epetra_Vector LHS(A->DomainMap()); LHS.PutScalar(0);
  Epetra_Vector RHS(A->DomainMap()); RHS.Random();
  Epetra_LinearProblem Problem(A, &LHS, &RHS);
  AztecOO solver(Problem);
  double *x_coord = 0, *y_coord = 0, *z_coord = 0;

  Epetra_MultiVector *coords = CreateCartesianCoordinates("2D", &(CrsA->Map()),
                                                          GaleriList);

  double **ttt;
  if (!coords->ExtractView(&ttt)) {
    x_coord = ttt[0];
    y_coord = ttt[1];
  } else {
    printf("Error extracting coordinate vectors\n");
#   ifdef HAVE_MPI
    MPI_Finalize() ;
#   endif
    exit(EXIT_FAILURE);
  }

  ParameterList MLList;
  SetDefaults("SA",MLList);
  MLList.set("ML output",10);
  MLList.set("max levels",10);
  MLList.set("increasing or decreasing","increasing");
  MLList.set("smoother: type", "Chebyshev");
  MLList.set("smoother: sweeps", 3);

  // *) if a low number, it will use all the available processes
  // *) if a big number, it will use only processor 0 on the next level
  MLList.set("aggregation: next-level aggregates per process", 1);

  MLList.set("aggregation: type (level 0)", "Zoltan");
  MLList.set("aggregation: type (level 1)", "Uncoupled");
  MLList.set("aggregation: type (level 2)", "Zoltan");
  MLList.set("aggregation: smoothing sweeps", 2);

  MLList.set("x-coordinates", x_coord);
  MLList.set("y-coordinates", y_coord);
  MLList.set("z-coordinates", z_coord);

  // specify the reduction with respect to the previous level
  // (very small values can break the code)
  int ratio = 16;
  MLList.set("aggregation: global aggregates (level 0)",
             NumNodes / ratio);
  MLList.set("aggregation: global aggregates (level 1)",
             NumNodes / (ratio * ratio));
  MLList.set("aggregation: global aggregates (level 2)",
             NumNodes / (ratio * ratio * ratio));

  MultiLevelPreconditioner* MLPrec =
    new MultiLevelPreconditioner(*A, MLList, true);

  solver.SetPrecOperator(MLPrec);
  solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
  solver.SetAztecOption(AZ_output, 1);
  solver.Iterate(100, 1e-12);

  // compute the real residual
  Epetra_Vector Residual(A->DomainMap());
  //1.0 * RHS + 0.0 * RHS - 1.0 * (A * LHS)
  A->Apply(LHS,Residual);
  Residual.Update(1.0, RHS, 0.0, RHS, -1.0);
  double rn;
  Residual.Norm2(&rn);

  if (Comm.MyPID() == 0 )
    std::cout << "||b-Ax||_2 = " << rn << endl;

  if (Comm.MyPID() == 0 && rn > 1e-5) {
    std::cout << "TEST FAILED!!!!" << endl;
#   ifdef HAVE_MPI
    MPI_Finalize() ;
#   endif
    exit(EXIT_FAILURE);
  }

  delete MLPrec;
  delete coords;
  delete Map;
  delete CrsA;
  delete A;

  if (Comm.MyPID() == 0)
    std::cout << "TEST PASSED" << endl;

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  exit(EXIT_SUCCESS);

}