int main(int argv, char* argc[])
{
  using Teuchos::RCP;
  using Teuchos::rcp;

#ifdef HAVE_MPI
  MPI_Init (&argv, &argc);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // Create a parameter list
  Teuchos::ParameterList GaleriList;

  // Set the number of discretization points in the x and y direction.
  GaleriList.set ("nx", 10 * Comm.NumProc ());
  GaleriList.set ("ny", 10);

  // Create the map and matrix using the parameter list for a 2D Laplacian.
  RCP<Epetra_Map> Map = rcp (CreateMap ("Cartesian2D", Comm, GaleriList));
  RCP<Epetra_CrsMatrix> Matrix = rcp (CreateCrsMatrix ("Laplace2D", &*Map, GaleriList));

  // Print out the map and matrices  
  Map->Print (std::cout);
  Matrix->Print (std::cout);

#ifdef HAVE_MPI
  MPI_Finalize ();
#endif
  return 0;
}
예제 #2
0
// This constructor is for just one subdomain, so only adds the info
// for multiple time steps on the domain. No two-level parallelism.
MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_,
                           const Teuchos::EVerbosityLevel verbLevel) :
        Epetra_MpiComm(EpetraMpiComm_),
        Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
        myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
        subComm(0)
{

  numSubDomains = 1;
  subDomainRank = 0;
  numTimeSteps = numTimeSteps_;
  numTimeStepsOnDomain = numTimeSteps_;
  firstTimeStepOnDomain = 0;

  subComm = new Epetra_MpiComm(EpetraMpiComm_);

  // Create split communicators for time domain
  MPI_Comm time_split_MPI_Comm;
  int rank = EpetraMpiComm_.MyPID();
  (void) MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank,
                        &time_split_MPI_Comm);
  timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
  numTimeDomains = EpetraMpiComm_.NumProc();
  timeDomainRank = rank;
}
예제 #3
0
//
// The same main() driver routine as in the first Epetra lesson.
//
int
main (int argc, char *argv[])
{
    using std::cout;
    using std::endl;

#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
    Epetra_SerialComm comm;
#endif // HAVE_MPI

    if (comm.MyPID () == 0) {
        cout << "Total number of processes: " << comm.NumProc () << endl;
    }

    // Do something with the new Epetra communicator.
    exampleRoutine (comm, cout);

    // This tells the Trilinos test framework that the test passed.
    if (comm.MyPID () == 0) {
        cout << "End Result: TEST PASSED" << endl;
    }

#ifdef HAVE_MPI
    // Since you called MPI_Init, you are responsible for calling
    // MPI_Finalize after you are done using MPI.
    (void) MPI_Finalize ();
#endif // HAVE_MPI

    return 0;
}
예제 #4
0
int
main (int argc, char *argv[])
{
  using std::cout;
  using std::endl;

#ifdef HAVE_MPI
  MPI_Init (&argc, &argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif // HAVE_MPI

  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    // Print out the Epetra software version.
    cout << Epetra_Version () << endl << endl
         << "Total number of processes: " << numProcs << endl;
  }

  example (comm); // Run the whole example.

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

#ifdef HAVE_MPI
  (void) MPI_Finalize ();
#endif // HAVE_MPI

  return 0;
}
예제 #5
0
int main(int argc, char *argv[]) 
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  if (Comm.NumProc() != 1)
  {
    cerr << "To be run with one processor only" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    exit(EXIT_SUCCESS);
  }

  Epetra_Map Map(8, 0, Comm);

  Epetra_CrsMatrix A(Copy, Map, 0);

  // for this matrix the incomplete factorization
  // is the exact one, so ILU and ILUT must be exact solvers.
  for (int row = 0; row < 8; ++row)
  {
    double value = 2.0 + row;
    A.InsertGlobalValues(row, 1, &value, &row);
    if (row)
    {
      int col = row - 1;
      value = 1.0 + row;
      A.InsertGlobalValues(row, 1, &value, &col);
    }
#if 0
    if (row != Map.NumGlobalElements() - 1)
    {
      int col = row + 1;
      value = 0.0;
      A.InsertGlobalValues(row, 1, &value, &col);
    }
#endif
  }

  A.FillComplete();

  Test<Ifpack_ILU>("Ifpack_ILU", A);
  Test<Ifpack_ILUT>("Ifpack_ILUT", A);
  Test<Ifpack_AdditiveSchwarz<Ifpack_ILU> >("AS, Ifpack_ILU", A);
  Test<Ifpack_AdditiveSchwarz<Ifpack_ILUT> >("AS, Ifpack_ILUT", A);

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
예제 #6
0
int
main (int argc, char *argv[])
{
  // These "using" declarations make the code more concise, in that
  // you don't have to write the namespace along with the class or
  // object name.  This is especially helpful with commonly used
  // things like std::endl.
  using std::cout;
  using std::endl;

  // We assume that your code calls MPI_Init.  It's bad form
  // to ignore the error codes returned by MPI functions, but
  // we do so here for brevity.
  (void) MPI_Init (&argc, &argv);

  // This code takes the place of whatever you do to get an MPI_Comm.
  MPI_Comm yourComm = MPI_COMM_WORLD;

  // If your code plans to use MPI on its own, as well as through
  // Trilinos, you should strongly consider giving Trilinos a copy
  // of your MPI_Comm (created via MPI_Comm_dup).  Trilinos may in
  // the future duplicate the MPI_Comm automatically, but it does
  // not currently do this.

  // Wrap the MPI_Comm.  You are responsible for calling MPI_Comm_free
  // on your MPI_Comm after use, if necessary.  (It's not necessary or
  // legal to do this for built-in communicators like MPI_COMM_WORLD
  // or MPI_COMM_SELF.)
  Epetra_MpiComm comm (yourComm);

  // Epetra_Comm has methods that wrap basic MPI functionality.
  // MyPID() is equivalent to MPI_Comm_rank; it returns my process'
  // rank.  NumProc() is equivalent to MPI_Comm_size; it returns the
  // total number of processes in the communicator.
  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    cout << "Total number of processes: " << numProcs << endl;
  }

  // Do something with the new Epetra communicator.
  exampleRoutine (comm, cout);

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

  // If you need to call MPI_Comm_free on your MPI_Comm, now would be
  // the time to do so, before calling MPI_Finalize.

  // Since you called MPI_Init, you are responsible for calling
  // MPI_Finalize after you are done using MPI.
  (void) MPI_Finalize ();
  return 0;
}
void reportAverageTimes(Epetra_MpiComm &myEpetraComm){
	double myTime(0.0), globalSumTime(0.0);
	for(auto it: timeNames){
		myTime = accumulatedTimes[it.first];  
		globalSumTime = 0.0;
		myEpetraComm.SumAll(&myTime, &globalSumTime, 1);
		if(myEpetraComm.MyPID() == 0) std::cout << "Average " << timeNames[it.first] << 
		" time per iteration, averaged over all processors\n was: " << 
		(globalSumTime/myEpetraComm.NumProc())/NUM_ITERATIONS << std::endl;
	}
	
}
예제 #8
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Condest;
  
  Teuchos::RefCountPtr<Ifpack_CrsIct> ICT;
  ICT = Teuchos::rcp( new Ifpack_CrsIct(*A,DropTol,LevelFill) );
  ICT->SetAbsoluteThreshold(0.00123);
  ICT->SetRelativeThreshold(0.9876);
  // Init values from A
  ICT->InitValues(*A);
  // compute the factors
  ICT->Factor();
  // and now estimate the condition number
  ICT->Condest(false,Condest);
  
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  string label = "Ifpack_CrsIct Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  ICT->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  int Niters = 1200;

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*ICT);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int OldIters = solver.NumIters();

  // now rebuild the same preconditioner using ICT, we expect the same
  // number of iterations

  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IC", &*A) );

  Teuchos::ParameterList List;
  List.get("fact: level-of-fill", 2);
  List.get("fact: drop tolerance", 0.3333);
  List.get("fact: absolute threshold", 0.00123);
  List.get("fact: relative threshold", 0.9876);
  List.get("fact: relaxation value", 0.0);

  IFPACK_CHK_ERR(Prec->SetParameters(List));
  IFPACK_CHK_ERR(Prec->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*Prec);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int NewIters = solver.NumIters();

  if (OldIters != NewIters)
    IFPACK_CHK_ERR(-1);

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
int 
main (int argc, char *argv[])
{
  // These "using" statements make the code a bit more concise.
  using std::cout;
  using std::endl;

  int ierr = 0, i;

  // If Trilinos was built with MPI, initialize MPI, otherwise
  // initialize the serial "communicator" that stands in for MPI.
#ifdef EPETRA_MPI
  MPI_Init (&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  const int MyPID = Comm.MyPID();
  const int NumProc = Comm.NumProc();
  // We only allow (MPI) Process 0 to write to stdout.
  const bool verbose = (MyPID == 0);
  const int NumGlobalElements = 100;

  if (verbose)
    cout << Epetra_Version() << endl << endl;

  // Asking the Epetra_Comm to print itself is a good test for whether
  // you are running in an MPI environment.  However, it will print
  // something on all MPI processes, so you should remove it for a
  // large-scale parallel run.
  cout << Comm << endl;

  if (NumGlobalElements < NumProc)
    {
      if (verbose)
        cout << "numGlobalBlocks = " << NumGlobalElements 
             << " cannot be < number of processors = " << NumProc << endl;
      std::exit (EXIT_FAILURE);
    }

  // Construct a Map that puts approximately the same number of rows
  // of the matrix A on each processor.
  Epetra_Map Map (NumGlobalElements, 0, Comm);

  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map.NumMyElements();

  std::vector<int> MyGlobalElements(NumMyElements);
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // NumNz[i] is the number of nonzero elements in row i of the sparse
  // matrix on this MPI process.  Epetra_CrsMatrix uses this to figure
  // out how much space to allocate.
  std::vector<int> NumNz (NumMyElements);

  // We are building a tridiagonal matrix where each row contains the
  // nonzero elements (-1 2 -1).  Thus, we need 2 off-diagonal terms,
  // except for the first and last row of the matrix.
  for (int i = 0; i < NumMyElements; ++i)
    if (MyGlobalElements[i] == 0 || MyGlobalElements[i] == NumGlobalElements-1)
      NumNz[i] = 2; // First or last row
    else
      NumNz[i] = 3; // Not the (first or last row)

  // Create the Epetra_CrsMatrix.
  Epetra_CrsMatrix A (Copy, Map, &NumNz[0]);

  //
  // Add rows to the sparse matrix one at a time.
  //
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  const double two = 2.0;
  int NumEntries;

  for (int i = 0; i < NumMyElements; ++i)
    {
      if (MyGlobalElements[i] == 0)
        { // The first row of the matrix.
          Indices[0] = 1;
          NumEntries = 1;
        }
      else if (MyGlobalElements[i] == NumGlobalElements - 1)
        { // The last row of the matrix.
          Indices[0] = NumGlobalElements-2;
          NumEntries = 1;
        }
      else
        { // Any row of the matrix other than the first or last.
          Indices[0] = MyGlobalElements[i]-1;
          Indices[1] = MyGlobalElements[i]+1;
          NumEntries = 2;
        }
      ierr = A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert (ierr==0);
      // Insert the diagonal entry.
      ierr = A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i]);
      assert(ierr==0);
    }

  // Finish up.  We can call FillComplete() with no arguments, because
  // the matrix is square.
  ierr = A.FillComplete ();
  assert (ierr==0);

  // Parameters for the power method.
  const int niters = NumGlobalElements*10;
  const double tolerance = 1.0e-2;

  //
  // Run the power method.  Keep track of the flop count and the total
  // elapsed time.
  //
  Epetra_Flops counter;
  A.SetFlopCounter(counter);
  Epetra_Time timer(Comm);
  double lambda = 0.0;
  ierr += powerMethod (lambda, A, niters, tolerance, verbose);
  double elapsedTime = timer.ElapsedTime ();
  double totalFlops =counter.Flops ();
  // Mflop/s: Million floating-point arithmetic operations per second.
  double Mflop_per_s = totalFlops / elapsedTime / 1000000.0;

  if (verbose) 
    cout << endl << endl << "Total Mflop/s for first solve = " 
         << Mflop_per_s << endl<< endl;

  // Increase the first (0,0) diagonal entry of the matrix.
  if (verbose) 
    cout << endl << "Increasing magnitude of first diagonal term, solving again"
         << endl << endl << endl;

  if (A.MyGlobalRow (0)) {
    int numvals = A.NumGlobalEntries (0);
    std::vector<double> Rowvals (numvals);
    std::vector<int> Rowinds (numvals);
    A.ExtractGlobalRowCopy (0, numvals, numvals, &Rowvals[0], &Rowinds[0]); // Get A(0,0)
    for (int i = 0; i < numvals; ++i) 
      if (Rowinds[i] == 0) 
        Rowvals[i] *= 10.0;

    A.ReplaceGlobalValues (0, numvals, &Rowvals[0], &Rowinds[0]);
  }

  //
  // Run the power method again.  Keep track of the flop count and the
  // total elapsed time.
  //
  lambda = 0.0;
  timer.ResetStartTime();
  counter.ResetFlops();
  ierr += powerMethod (lambda, A, niters, tolerance, verbose);
  elapsedTime = timer.ElapsedTime();
  totalFlops = counter.Flops();
  Mflop_per_s = totalFlops / elapsedTime / 1000000.0;

  if (verbose) 
    cout << endl << endl << "Total Mflop/s for second solve = " 
         << Mflop_per_s << endl << endl;

#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

  return ierr;
}
예제 #10
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ========================================= //
  // Compare IC preconditioners to no precond. //
  // ----------------------------------------- //

  const double tol = 1e-5;
  const int maxIter = 500;

  // Baseline: No preconditioning
  // Compute number of iterations, to compare to IC later.

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  //solver.SetPrecOperator(&*PrecDiag);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int Iters = solver.NumIters();
  //cout << "No preconditioner iterations: " << Iters << endl;

#if 0 
  // Not sure how to use Ifpack_CrsRick - leave out for now.
  //
  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Condest;
  
  Teuchos::RefCountPtr<Ifpack_CrsRick> IC;
  Ifpack_IlukGraph mygraph (A->Graph(), 0, 0);
  IC = Teuchos::rcp( new Ifpack_CrsRick(*A, mygraph) );
  IC->SetAbsoluteThreshold(0.00123);
  IC->SetRelativeThreshold(0.9876);
  // Init values from A
  IC->InitValues(*A);
  // compute the factors
  IC->Factor();
  // and now estimate the condition number
  IC->Condest(false,Condest);
  
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  std::string label = "Ifpack_CrsRick Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  IC->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*IC);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int RickIters = solver.NumIters();
  //cout << "Ifpack_Rick iterations: " << RickIters << endl;

  // Compare to no preconditioning
  if (RickIters > Iters/2)
    IFPACK_CHK_ERR(-1);

#endif

  //////////////////////////////////////////////////////
  // Same test with Ifpack_IC
  // This is Crout threshold Cholesky, so different than IC(0)

  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecIC = Teuchos::rcp( Factory.Create("IC", &*A) );

  Teuchos::ParameterList List;
  //List.get("fact: ict level-of-fill", 2.);
  //List.get("fact: drop tolerance", 0.3333);
  //List.get("fact: absolute threshold", 0.00123);
  //List.get("fact: relative threshold", 0.9876);
  //List.get("fact: relaxation value", 0.0);

  IFPACK_CHK_ERR(PrecIC->SetParameters(List));
  IFPACK_CHK_ERR(PrecIC->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  //AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecIC);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int ICIters = solver.NumIters();
  //cout << "Ifpack_IC iterations: " << ICIters << endl;

  // Compare to no preconditioning
  if (ICIters > Iters/2)
    IFPACK_CHK_ERR(-1);

#if 0
  //////////////////////////////////////////////////////
  // Same test with Ifpack_ICT 
  // This is another threshold Cholesky

  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecICT = Teuchos::rcp( Factory.Create("ICT", &*A) );

  //Teuchos::ParameterList List;
  //List.get("fact: level-of-fill", 2);
  //List.get("fact: drop tolerance", 0.3333);
  //List.get("fact: absolute threshold", 0.00123);
  //List.get("fact: relative threshold", 0.9876);
  //List.get("fact: relaxation value", 0.0);

  IFPACK_CHK_ERR(PrecICT->SetParameters(List));
  IFPACK_CHK_ERR(PrecICT->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecICT);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int ICTIters = solver.NumIters();
  //cout << "Ifpack_ICT iterations: " << ICTIters << endl;

  // Compare to no preconditioning
  if (ICTIters > Iters/2)
    IFPACK_CHK_ERR(-1);
#endif

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
예제 #11
0
int main(int argc, char *argv[]) {

// standard Epetra MPI/Serial Comm startup	
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif
  
  int MyPID = comm.MyPID();
  int ierr = 0;
  bool verbose = (0 == MyPID);
  bool reportErrors = (0 == MyPID);
  // setup MatlabEngine
  if (verbose) cout << "going to startup a matlab process...\n";
  EpetraExt::EpetraExt_MatlabEngine engine (comm);
  if (verbose) cout << "matlab started\n";
  
  // setup an array of doubles to be used for the examples
  int M = 20;
  int numGlobalElements = M * comm.NumProc();
  int N = 3;
  int numMyEntries = M * N;
  double* A = new double[numMyEntries];
  double* Aptr = A;
  int startValue = numMyEntries * MyPID;

  for(int col=0; col < N; col++) {
	for(int row=0; row < M; row++) {
          *Aptr++ = startValue++;
      }
  }

  // setup an array of ints to be used for the examples
  int* intA = new int[numMyEntries];
  int* intAptr = intA;
  int intStartValue = numMyEntries * MyPID;
  for(int i=0; i < M*N; i++) {
      *intAptr++ = intStartValue++;
  }
  
  // construct a map to be used by distributed objects
  Epetra_Map map (numGlobalElements, 0, comm);
  
  // CrsMatrix example
  // constructs a globally distributed CrsMatrix and then puts it into Matlab
  if (verbose) cout << " constructing CrsMatrix...\n";
  Epetra_CrsMatrix crsMatrix (Copy, map, N);
  int* indices = new int[N];
  for (int col=0; col < N; col++) {
    indices[col] = col;	  
  }
  
  double value = startValue;
  double* values = new double[numMyEntries];
  int minMyGID = map.MinMyGID();
  for (int row=0; row < M; row++) {
    for (int col=0; col < N; col++) {
      values[col] = value++;
    }
      
    crsMatrix.InsertGlobalValues(minMyGID + row, N, values, indices);
  }
  
  crsMatrix.FillComplete();
  if (verbose) cout << " CrsMatrix constructed\n";
  if (verbose) cout << " putting CrsMatrix into Matlab as CRSM\n";
  ierr = engine.PutRowMatrix(crsMatrix, "CRSM", false);
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutRowMatrix(crsMatrix, \"CRSM\", false): " << ierr << endl;
  }
  
  // BlockMap example
  // puts a map into Matlab
  if (verbose) cout << " putting Map into Matlab as MAP\n";
  ierr = engine.PutBlockMap(map, "MAP", false);
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutBlockMap(map, \"MAP\", false);: " << ierr << endl;
  }
  
  // MultiVector example
  // constructs a globally distributed MultiVector and then puts it into Matlab
  if (verbose) cout << " constructing MultiVector...\n";
  Epetra_MultiVector multiVector (Copy, map, A, M, N);
  if (verbose) cout << " MultiVector constructed\n";
  if (verbose) cout << " putting MultiVector into Matlab as MV\n";
  ierr = engine.PutMultiVector(multiVector, "MV");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutMultiVector(multiVector, \"MV\"): " << ierr << endl;
  }
  
  // SerialDenseMatrix example
  // constructs a SerialDenseMatrix on every PE
  if (verbose) cout << " constructing a SerialDenseMatrix...\n";
  Epetra_SerialDenseMatrix sdMatrix (Copy, A, M, M, N);
  if (verbose) cout << " SerialDenseMatrix constructed\n";
  if (verbose) cout << " putting SerialDenseMatrix from PE0 into Matlab as SDM_PE0\n";
  // since the third parameter is left out, the SerialDenseMatrix from PE0 is used by default
  ierr = engine.PutSerialDenseMatrix(sdMatrix, "SDM_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdMatrix, \"SDM_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting SerialDenseMatrix from PE1 into Matlab as SDM_PE1\n";
    // specifying 1 as the third parameter will put the SerialDenseMatrix from PE1 into Matlab
    ierr = engine.PutSerialDenseMatrix(sdMatrix, "SDM_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdMatrix, \"SDM_PE1\", 1): " << ierr << endl;
    }
  }
  

  // SerialDenseVector example
  // constructs a SerialDenseVector on every PE
  if (verbose) cout << " constructing a SerialDenseVector...\n";
  Epetra_SerialDenseVector sdVector (Copy, A, M);
  if (verbose) cout << " SerialDenseVector constructed\n";
  // since the third parameter is left out, the SerialDenseMatrix from PE0 is used by default
  if (verbose) cout << " putting SerialDenseVector from PE0 into Matlab as SDV_PE0\n";
  ierr = engine.PutSerialDenseMatrix(sdVector, "SDV_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdVector, \"SDV_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting SerialDenseVector from PE1 into Matlab as SDV_PE1\n";
    // specifying 1 as the third parameter will put the SerialDenseVector from PE1 into Matlab
    ierr = engine.PutSerialDenseMatrix(sdVector, "SDV_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdMatrix, \"SDV_PE1\", 1): " << ierr << endl;
    }
  }

  // IntSerialDenseMatrix example
  // constructs a IntSerialDenseMatrix on every PE
  if (verbose) cout << " constructing a IntSerialDenseMatrix...\n";
  Epetra_IntSerialDenseMatrix isdMatrix (Copy, intA, M, M, N);
  if (verbose) cout << " IntSerialDenseMatrix constructed\n";
  // since the third parameter is left out, the IntSerialDenseMatrix from PE0 is used by default
  if (verbose) cout << " putting IntSerialDenseMatrix from PE0 into Matlab as ISDM_PE0\n";
  ierr = engine.PutIntSerialDenseMatrix(isdMatrix, "ISDM_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutIntSerialDenseMatrix(isdMatrix, \"ISDM_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting IntSerialDenseMatrix from PE1 into Matlab as ISDM_PE1\n";
    // specifying 1 as the third parameter will put the IntSerialDenseMatrix from PE1 into Matlab
    ierr = engine.PutIntSerialDenseMatrix(isdMatrix, "ISDM_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(isdMatrix, \"ISDM_PE1\", 1): " << ierr << endl;
    }
  }


  // IntSerialDenseVector example
  // constructs a IntSerialDenseVector on every PE
  if (verbose) cout << " constructing a IntSerialDenseVector...\n";
  Epetra_IntSerialDenseVector isdVector (Copy, intA, M);
  if (verbose) cout << " IntSerialDenseVector constructed\n";
  // since the third parameter is left out, the IntSerialDenseVector from PE0 is used by default
  if (verbose) cout << " putting IntSerialDenseVector from PE0 into Matlab as ISDV_PE0\n";
  ierr = engine.PutIntSerialDenseMatrix(isdVector, "ISDV_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutIntSerialDenseMatrix(isdVector, \"ISDV_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting IntSerialDenseVector from PE1 into Matlab as ISDV_PE1\n";
    // specifying 1 as the third parameter will put the IntSerialDenseVector from PE1 into Matlab
    ierr = engine.PutIntSerialDenseMatrix(isdVector, "ISDV_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(isdVector, \"ISDV_PE1\", 1): " << ierr << endl;
    }
  }
  
  // entering a while loop on PE0 will keep the Matlab workspace alive
  /*
  if (MyPID == 0)
  while(1) {
    // do nothing
  }
  */

  const int bufSize = 200;
  char s [bufSize];
  const int matlabBufferSize = 1024 * 16;
  char matlabBuffer [matlabBufferSize];
  
  // send some commands to Matlab and output the result to stdout
  engine.EvalString("whos", matlabBuffer, matlabBufferSize);
  if (verbose) cout << matlabBuffer << endl;
  engine.EvalString("SDV_PE0", matlabBuffer, matlabBufferSize);
  if (verbose) cout << matlabBuffer << endl;
  if (comm.NumProc() > 1) {
    engine.EvalString("SDV_PE1", matlabBuffer, matlabBufferSize);
    if (verbose) cout << matlabBuffer << endl;
  }
  
  // the following allows user interaction with Matlab
  if (MyPID == 0)
  while(1) {
      // Prompt the user and get a string
      printf(">> ");
      if (fgets(s, bufSize, stdin) == NULL) {
          printf("Bye\n");
          break ;
      }
      printf ("command :%s:\n", s) ;
      
      // send the command to MATLAB
      // output goes to stdout
      ierr = engine.EvalString(s, matlabBuffer, matlabBufferSize);
      if (ierr != 0) {
          printf("there was an error: %d", ierr);
		  ierr = 0;
      }
      else {
      	  printf("Matlab Output:\n%s", matlabBuffer);
      }
  }
  
  if (verbose) cout << endl << " all done\n";

// standard finalizer for Epetra MPI Comms
#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  // we need to delete engine because the MatlabEngine finalizer shuts down the Matlab process associated with this example
  // if we don't delete the Matlab engine, then this example application will not shut down properly
  delete &engine;
  return(0);
}
예제 #12
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int ierr=HIPS_Initialize(1);
  HIPS_ExitOnError(ierr);

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  Teuchos::ParameterList GaleriList;
  int nx = 100; 
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  //  GaleriList.set("ny", nx);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
                 
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  Teuchos::RefCountPtr<Ifpack_HIPS> RILU;

  RILU = Teuchos::rcp( new Ifpack_HIPS(&*A) );

  Teuchos::ParameterList List;
  List.set("hips: id",0);
  List.set("hips: setup output",2);
  List.set("hips: iteration output",0);
  List.set("hips: drop tolerance",5e-3);
  List.set("hips: graph symmetric",1);
  RILU->SetParameters(List);

  
  RILU->Initialize();
  RILU->Compute();

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  int Niters = 50;

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*RILU);
  solver.SetAztecOption(AZ_output, 1); 
  solver.Iterate(Niters, 1.0e-8);

  int OldIters = solver.NumIters();


  HIPS_Finalize();
  
#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
예제 #13
0
void build_test_matrix(Epetra_MpiComm & Comm, int test_number, Epetra_CrsMatrix *&A){
  int NumProc = Comm.NumProc();
  int MyPID   = Comm.MyPID();

  if(test_number==1){
    // Case 1: Tridiagonal
    int NumMyEquations = 100;

    int NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
    if(MyPID < 3)  NumMyEquations++;

    // Construct a Map that puts approximately the same Number of equations on each processor
    Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0, Comm);

    // Get update list and number of local equations from newly created Map
    int* MyGlobalElements = new int[Map.NumMyElements()];
    Map.MyGlobalElements(MyGlobalElements);

    // Create an integer vector NumNz that is used to build the Petra Matrix.
    // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

    int* NumNz = new int[NumMyEquations];

    // We are building a tridiagonal matrix where each row has (-1 2 -1)
    // So we need 2 off-diagonal terms (except for the first and last equation)

    for (int i = 0; i < NumMyEquations; i++)
      if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
	NumNz[i] = 1;
      else
	NumNz[i] = 2;

    // Create a Epetra_Matrix
    A=new Epetra_CrsMatrix(Copy, Map, NumNz);

    // Add  rows one-at-a-time
    // Need some vectors to help
    // Off diagonal Values will always be -1

    double* Values = new double[2];
    Values[0] = -1.0;
    Values[1] = -1.0;
    int* Indices = new int[2];
    double two = 2.0;
    int NumEntries;

    for (int i = 0; i < NumMyEquations; i++) {
      if(MyGlobalElements[i] == 0) {
	Indices[0] = 1;
	NumEntries = 1;
      }
      else if (MyGlobalElements[i] == NumGlobalEquations-1) {
	Indices[0] = NumGlobalEquations-2;
	NumEntries = 1;
      }
      else {
	Indices[0] = MyGlobalElements[i]-1;
	Indices[1] = MyGlobalElements[i]+1;
	NumEntries = 2;
      }
      A->InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices);
      A->InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i);
    }

    A->FillComplete();

    // Cleanup
    delete [] MyGlobalElements;
    delete [] NumNz;
    delete [] Values;
    delete [] Indices;

  }
}
예제 #14
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif
  
  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  
  /*int npRows = -1;
  int npCols = -1;
  bool useTwoD = false;
  int randomize = 1;
  std::string matrix = "Laplacian";
  
  Epetra_CrsMatrix *AK = NULL;
    std::string filename = "email.mtx";
  read_matrixmarket_file((char*) filename.c_str(), Comm, AK,
			 useTwoD, npRows, npCols,
			 randomize, false,
			 (matrix.find("Laplacian")!=std::string::npos));
  Teuchos::RCP<Epetra_CrsMatrix> A(AK);
  const Epetra_Map *AMap = &(AK->DomainMap());
  Teuchos::RCP<const Epetra_Map> Map(AMap, false);*/

  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );


  LHS->PutScalar(0.0); RHS->Random();

  // ==================================================== //
  // Compare support graph preconditioners to no precond. //
  // ---------------------------------------------------- //

  const double tol = 1e-5;
  const int maxIter = 500;

  // Baseline: No preconditioning
  // Compute number of iterations, to compare to IC later.

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int Iters = solver.NumIters();


  int SupportIters;
  Ifpack Factory;
  Teuchos::ParameterList List;

#ifdef HAVE_IFPACK_AMESOS
  //////////////////////////////////////////////////////
  // Same test with Ifpack_SupportGraph
  // Factored with Amesos

  
  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportAmesos = Teuchos::rcp( Factory.Create("MSF Amesos", &*A) );
  List.set("amesos: solver type","Klu");
  List.set("MST: keep diagonal", 1.0);
  List.set("MST: randomize", 1);
  //List.set("fact: absolute threshold", 3.0);
  
  IFPACK_CHK_ERR(PrecSupportAmesos->SetParameters(List));
  IFPACK_CHK_ERR(PrecSupportAmesos->Initialize());
  IFPACK_CHK_ERR(PrecSupportAmesos->Compute());


  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  //AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecSupportAmesos);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  SupportIters = solver.NumIters();




  
  // Compare to no preconditioning
  if (SupportIters > 2*Iters)
    IFPACK_CHK_ERR(-1);

#endif

  //////////////////////////////////////////////////////
  // Same test with Ifpack_SupportGraph
  // Factored with IC
  

  
  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportIC = Teuchos::rcp( Factory.Create("MSF IC", &*A) );

  

  IFPACK_CHK_ERR(PrecSupportIC->SetParameters(List));
  IFPACK_CHK_ERR(PrecSupportIC->Compute());


  // Here we create an AztecOO object                                                                                                                                                                                                        
  LHS->PutScalar(0.0);

  //AztecOO solver;                                                                                                                                                                                                                          
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecSupportIC);
  solver.SetAztecOption(AZ_output, 16);
  solver.Iterate(maxIter, tol);

  SupportIters = solver.NumIters();

  // Compare to no preconditioning                                                                                                                                                                                                           
  if (SupportIters > 2*Iters)
    IFPACK_CHK_ERR(-1);
  




#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
예제 #15
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;
  int nx = 30; 

  GaleriList.set("nx", nx);
  //  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("ny", nx);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  GaleriList.set("alpha", .0);
  GaleriList.set("diff", 1.0);
  GaleriList.set("conv", 100.0);

  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("UniFlow2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();
  Ifpack Factory;  
  int Niters = 100;

  // ============================= //
  // Construct IHSS preconditioner //
  // ============================= //
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IHSS", &*A,0) );
  Teuchos::ParameterList List;
  List.set("ihss: hermetian type","ILU");
  List.set("ihss: skew hermetian type","ILU");
  List.set("ihss: ratio eigenvalue",100.0);
  // Could set sublist values here to better control the ILU, but this isn't needed for this example.
  IFPACK_CHK_ERR(Prec->SetParameters(List));
  IFPACK_CHK_ERR(Prec->Compute());

  // ============================= //
  // Create solver Object          //
  // ============================= //

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*Prec);
  solver.SetAztecOption(AZ_output, 1); 
  solver.Iterate(Niters, 1e-8);

  // ============================= //
  // Construct SORa preconditioner //
  // ============================= //
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SORa", &*A,0) );
  Teuchos::ParameterList List2;
  List2.set("sora: sweeps",1);
  // Could set sublist values here to better control the ILU, but this isn't needed for this example.
  IFPACK_CHK_ERR(Prec2->SetParameters(List2));
  IFPACK_CHK_ERR(Prec2->Compute());

  // ============================= //
  // Create solver Object          //
  // ============================= //
  AztecOO solver2;
  LHS->PutScalar(0.0);
  solver2.SetUserMatrix(&*A);
  solver2.SetLHS(&*LHS);
  solver2.SetRHS(&*RHS);
  solver2.SetAztecOption(AZ_solver,AZ_gmres);
  solver2.SetPrecOperator(&*Prec2);
  solver2.SetAztecOption(AZ_output, 1); 
  solver2.Iterate(Niters, 1e-8);

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
예제 #16
0
//=============================================================================
Epetra_Map * Epetra_Map::RemoveEmptyProcesses() const
{
#ifdef HAVE_MPI
  const Epetra_MpiComm * MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());

  // If the Comm isn't MPI, just treat this as a copy constructor
  if(!MpiComm) return new Epetra_Map(*this);

  MPI_Comm NewComm,MyMPIComm = MpiComm->Comm();

  // Create the new communicator.  MPI_Comm_split returns a valid
  // communicator on all processes.  On processes where color == MPI_UNDEFINED,
  // ignore the result.  Passing key == 0 tells MPI to order the
  // processes in the new communicator by their rank in the old
  // communicator.
  const int color = (NumMyElements() == 0) ? MPI_UNDEFINED : 1;

  // MPI_Comm_split must be called collectively over the original
  // communicator.  We can't just call it on processes with color
  // one, even though we will ignore its result on processes with
  // color zero.
  int rv = MPI_Comm_split(MyMPIComm,color,0,&NewComm);
  if(rv!=MPI_SUCCESS) throw ReportError("Epetra_Map::RemoveEmptyProcesses: MPI_Comm_split failed.",-1);

  if(color == MPI_UNDEFINED)
    return 0; // We're not in the new map
  else {
    Epetra_MpiComm * NewEpetraComm = new Epetra_MpiComm(NewComm);

    // Use the copy constructor for a new map, but basically because it does nothing useful
    Epetra_Map * NewMap = new Epetra_Map(*this);

    // Get rid of the old BlockMapData, now make a new one from scratch...
    NewMap->CleanupData();
    if(GlobalIndicesInt()) {
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
      NewMap->BlockMapData_ = new Epetra_BlockMapData(NumGlobalElements(),0,IndexBase(),*NewEpetraComm,false);
#endif
    }
    else {
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
      NewMap->BlockMapData_ = new Epetra_BlockMapData(NumGlobalElements64(),0,IndexBase64(),*NewEpetraComm,true);
#endif
    }

    // Now copy all of the relevent bits of BlockMapData...
    //    NewMap->BlockMapData_->Comm_                    = NewEpetraComm;
    NewMap->BlockMapData_->LID_                     = BlockMapData_->LID_;
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
    NewMap->BlockMapData_->MyGlobalElements_int_    = BlockMapData_->MyGlobalElements_int_;
#endif
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
    NewMap->BlockMapData_->MyGlobalElements_LL_     = BlockMapData_->MyGlobalElements_LL_;
#endif
    NewMap->BlockMapData_->FirstPointInElementList_ = BlockMapData_->FirstPointInElementList_;
    NewMap->BlockMapData_->ElementSizeList_         = BlockMapData_->ElementSizeList_;
    NewMap->BlockMapData_->PointToElementList_      = BlockMapData_->PointToElementList_;

    NewMap->BlockMapData_->NumGlobalElements_       = BlockMapData_->NumGlobalElements_;
    NewMap->BlockMapData_->NumMyElements_           = BlockMapData_->NumMyElements_;
    NewMap->BlockMapData_->IndexBase_               = BlockMapData_->IndexBase_;
    NewMap->BlockMapData_->ElementSize_             = BlockMapData_->ElementSize_;
    NewMap->BlockMapData_->MinMyElementSize_        = BlockMapData_->MinMyElementSize_;
    NewMap->BlockMapData_->MaxMyElementSize_        = BlockMapData_->MaxMyElementSize_;
    NewMap->BlockMapData_->MinElementSize_          = BlockMapData_->MinElementSize_;
    NewMap->BlockMapData_->MaxElementSize_          = BlockMapData_->MaxElementSize_;
    NewMap->BlockMapData_->MinAllGID_               = BlockMapData_->MinAllGID_;
    NewMap->BlockMapData_->MaxAllGID_               = BlockMapData_->MaxAllGID_;
    NewMap->BlockMapData_->MinMyGID_                = BlockMapData_->MinMyGID_;
    NewMap->BlockMapData_->MaxMyGID_                = BlockMapData_->MaxMyGID_;
    NewMap->BlockMapData_->MinLID_                  = BlockMapData_->MinLID_;
    NewMap->BlockMapData_->MaxLID_                  = BlockMapData_->MaxLID_;
    NewMap->BlockMapData_->NumGlobalPoints_         = BlockMapData_->NumGlobalPoints_;
    NewMap->BlockMapData_->NumMyPoints_             = BlockMapData_->NumMyPoints_;
    NewMap->BlockMapData_->ConstantElementSize_     = BlockMapData_->ConstantElementSize_;
    NewMap->BlockMapData_->LinearMap_               = BlockMapData_->LinearMap_;
    NewMap->BlockMapData_->DistributedGlobal_       = NewEpetraComm->NumProc()==1 ? false : BlockMapData_->DistributedGlobal_;
    NewMap->BlockMapData_->OneToOneIsDetermined_    = BlockMapData_->OneToOneIsDetermined_;
    NewMap->BlockMapData_->OneToOne_                = BlockMapData_->OneToOne_;
    NewMap->BlockMapData_->GlobalIndicesInt_        = BlockMapData_->GlobalIndicesInt_;
    NewMap->BlockMapData_->GlobalIndicesLongLong_   = BlockMapData_->GlobalIndicesLongLong_;
    NewMap->BlockMapData_->LastContiguousGID_       = BlockMapData_->LastContiguousGID_;
    NewMap->BlockMapData_->LastContiguousGIDLoc_    = BlockMapData_->LastContiguousGIDLoc_;
    NewMap->BlockMapData_->LIDHash_                 = BlockMapData_->LIDHash_ ? new Epetra_HashTable<int>(*BlockMapData_->LIDHash_) : 0;

    // Delay directory construction
    NewMap->BlockMapData_->Directory_               = 0;

    // Cleanup
    delete NewEpetraComm;

    return NewMap;
  }
#else
    // MPI isn't compiled, so just treat this as a copy constructor
    return new Epetra_Map(*this);
#endif
}
예제 #17
0
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int MyPID = comm.MyPID();

  bool verbose = false;
  bool verbose1 = false; 
  // Check if we should print results to standard out
  if (argc > 1) {
    if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
      verbose1 = true;
      if (MyPID==0) verbose = true;
    }
  }
  if (verbose)
    std::cout << EpetraExt::EpetraExt_Version() << std::endl << std::endl;

  if (verbose1) std::cout << comm << std::endl;


  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //comm.Barrier();

  Epetra_Map * map;
  Epetra_CrsMatrix * A; 
  Epetra_Vector * x; 
  Epetra_Vector * b;
  Epetra_Vector * xexact;

  int nx = 20*comm.NumProc();
  int ny = 30;
  int npoints = 7;
  int xoff[] = {-1,  0,  1, -1,  0,  1,  0};
  int yoff[] = {-1, -1, -1,  0,  0,  0,  1};

   
  int ierr = 0;
  // Call routine to read in HB problem 0-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  // Call routine to read in HB problem 1-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact, 1);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  // Call routine to read in HB problem -1-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact, -1);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  int nx1 = 5;
  int ny1 = 4;
  Poisson2dOperator Op(nx1, ny1, comm);
  ierr += runOperatorTests(Op, verbose);

  generateHyprePrintOut("MyMatrixFile", comm);

  EPETRA_CHK_ERR(EpetraExt::HypreFileToCrsMatrix("MyMatrixFile", comm, A));
  
  runHypreTest(*A);
  delete A;

  #ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

  return(ierr);
}
예제 #18
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  Teuchos::ParameterList GaleriList;
  int nx = 30; 

  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Athresh = 0.0123;
  double Rthresh = 0.9876;
  double Relax   = 0.1;
  int    Overlap = 2;
  
  Teuchos::RefCountPtr<Ifpack_IlukGraph> Graph;
  Teuchos::RefCountPtr<Ifpack_CrsRiluk> RILU;

  Graph = Teuchos::rcp( new Ifpack_IlukGraph(A->Graph(), LevelFill, Overlap) );
  int ierr;
  ierr = Graph->ConstructFilledGraph();
  IFPACK_CHK_ERR(ierr);

  RILU = Teuchos::rcp( new Ifpack_CrsRiluk(*Graph) );
  RILU->SetAbsoluteThreshold(Athresh);
  RILU->SetRelativeThreshold(Rthresh);
  RILU->SetRelaxValue(Relax);
  int initerr = RILU->InitValues(*A);
  if (initerr!=0) cout << Comm << "*ERR* InitValues = " << initerr;

  RILU->Factor();

  // Define label for printing out during the solve phase
  string label = "Ifpack_CrsRiluk Preconditioner: LevelFill = " + toString(LevelFill) +
                                                 " Overlap = " + toString(Overlap) +
                                                 " Athresh = " + toString(Athresh) +
                                                 " Rthresh = " + toString(Rthresh);
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  int Niters = 1200;

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*RILU);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int OldIters = solver.NumIters();

  // now rebuild the same preconditioner using RILU, we expect the same
  // number of iterations

  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("ILU", &*A, Overlap) );

  Teuchos::ParameterList List;
  List.get("fact: level-of-fill", LevelFill);
  List.get("fact: drop tolerance", DropTol);
  List.get("fact: absolute threshold", Athresh);
  List.get("fact: relative threshold", Rthresh);
  List.get("fact: relax value", Relax);

  IFPACK_CHK_ERR(Prec->SetParameters(List));
  IFPACK_CHK_ERR(Prec->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*Prec);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int NewIters = solver.NumIters();

  if (OldIters != NewIters)
    IFPACK_CHK_ERR(-1);


#ifdef HAVE_IFPACK_SUPERLU
  // Now test w/ SuperLU's ILU, if we've got it
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SILU", &*A,0) ); 
  Teuchos::ParameterList SList;
  SList.set("fact: drop tolerance",1e-4);
  SList.set("fact: zero pivot threshold",.1);
  SList.set("fact: maximum fill factor",10.0);
  // NOTE: There is a bug in SuperLU 4.0 which will crash the code if the maximum fill factor is set too low.
  // This bug was reported to Sherry Li on 4/8/10.
  SList.set("fact: silu drop rule",9);

  IFPACK_CHK_ERR(Prec2->SetParameters(SList));
  IFPACK_CHK_ERR(Prec2->Compute());

  LHS->PutScalar(0.0);
  solver.SetPrecOperator(&*Prec2);
  solver.Iterate(Niters, 5.0e-5);
  Prec2->Print(cout);

#endif


#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}