//
// The same main() driver routine as in the first Epetra lesson.
//
int
main (int argc, char *argv[])
{
    using std::cout;
    using std::endl;

#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
    Epetra_SerialComm comm;
#endif // HAVE_MPI

    if (comm.MyPID () == 0) {
        cout << "Total number of processes: " << comm.NumProc () << endl;
    }

    // Do something with the new Epetra communicator.
    exampleRoutine (comm, cout);

    // This tells the Trilinos test framework that the test passed.
    if (comm.MyPID () == 0) {
        cout << "End Result: TEST PASSED" << endl;
    }

#ifdef HAVE_MPI
    // Since you called MPI_Init, you are responsible for calling
    // MPI_Finalize after you are done using MPI.
    (void) MPI_Finalize ();
#endif // HAVE_MPI

    return 0;
}
// This constructor is for just one subdomain, so only adds the info
// for multiple time steps on the domain. No two-level parallelism.
MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_,
                           const Teuchos::EVerbosityLevel verbLevel) :
        Epetra_MpiComm(EpetraMpiComm_),
        Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
        myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
        subComm(0)
{

  numSubDomains = 1;
  subDomainRank = 0;
  numTimeSteps = numTimeSteps_;
  numTimeStepsOnDomain = numTimeSteps_;
  firstTimeStepOnDomain = 0;

  subComm = new Epetra_MpiComm(EpetraMpiComm_);

  // Create split communicators for time domain
  MPI_Comm time_split_MPI_Comm;
  int rank = EpetraMpiComm_.MyPID();
  (void) MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank,
                        &time_split_MPI_Comm);
  timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
  numTimeDomains = EpetraMpiComm_.NumProc();
  timeDomainRank = rank;
}
Example #3
0
int
main ( int argc, char** argv )
{

#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm Comm (MPI_COMM_WORLD);
    if ( Comm.MyPID() == 0 )
    {
        std::cout << "% using MPI" << std::endl;
    }
#else
    Epetra_SerialComm Comm;
    cout << "% using serial Version" << endl;
#endif

    //**************** cylinder
    //    MPI_Init(&argc,&argv);

    EnsightToHdf5 es ( argc, argv );
    es.run();

#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return ( EXIT_SUCCESS );
}
int
main (int argc, char *argv[])
{
  using std::cout;
  using std::endl;

#ifdef HAVE_MPI
  MPI_Init (&argc, &argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif // HAVE_MPI

  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    // Print out the Epetra software version.
    cout << Epetra_Version () << endl << endl
         << "Total number of processes: " << numProcs << endl;
  }

  example (comm); // Run the whole example.

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

#ifdef HAVE_MPI
  (void) MPI_Finalize ();
#endif // HAVE_MPI

  return 0;
}
int
main (int argc, char *argv[])
{
  // These "using" declarations make the code more concise, in that
  // you don't have to write the namespace along with the class or
  // object name.  This is especially helpful with commonly used
  // things like std::endl.
  using std::cout;
  using std::endl;

  // We assume that your code calls MPI_Init.  It's bad form
  // to ignore the error codes returned by MPI functions, but
  // we do so here for brevity.
  (void) MPI_Init (&argc, &argv);

  // This code takes the place of whatever you do to get an MPI_Comm.
  MPI_Comm yourComm = MPI_COMM_WORLD;

  // If your code plans to use MPI on its own, as well as through
  // Trilinos, you should strongly consider giving Trilinos a copy
  // of your MPI_Comm (created via MPI_Comm_dup).  Trilinos may in
  // the future duplicate the MPI_Comm automatically, but it does
  // not currently do this.

  // Wrap the MPI_Comm.  You are responsible for calling MPI_Comm_free
  // on your MPI_Comm after use, if necessary.  (It's not necessary or
  // legal to do this for built-in communicators like MPI_COMM_WORLD
  // or MPI_COMM_SELF.)
  Epetra_MpiComm comm (yourComm);

  // Epetra_Comm has methods that wrap basic MPI functionality.
  // MyPID() is equivalent to MPI_Comm_rank; it returns my process'
  // rank.  NumProc() is equivalent to MPI_Comm_size; it returns the
  // total number of processes in the communicator.
  const int myRank = comm.MyPID ();
  const int numProcs = comm.NumProc ();

  if (myRank == 0) {
    cout << "Total number of processes: " << numProcs << endl;
  }

  // Do something with the new Epetra communicator.
  exampleRoutine (comm, cout);

  // This tells the Trilinos test framework that the test passed.
  if (myRank == 0) {
    cout << "End Result: TEST PASSED" << endl;
  }

  // If you need to call MPI_Comm_free on your MPI_Comm, now would be
  // the time to do so, before calling MPI_Finalize.

  // Since you called MPI_Init, you are responsible for calling
  // MPI_Finalize after you are done using MPI.
  (void) MPI_Finalize ();
  return 0;
}
void reportAverageTimes(Epetra_MpiComm &myEpetraComm){
	double myTime(0.0), globalSumTime(0.0);
	for(auto it: timeNames){
		myTime = accumulatedTimes[it.first];  
		globalSumTime = 0.0;
		myEpetraComm.SumAll(&myTime, &globalSumTime, 1);
		if(myEpetraComm.MyPID() == 0) std::cout << "Average " << timeNames[it.first] << 
		" time per iteration, averaged over all processors\n was: " << 
		(globalSumTime/myEpetraComm.NumProc())/NUM_ITERATIONS << std::endl;
	}
	
}
Example #7
0
Int main ( Int argc, char** argv )
{
    //! Initializing Epetra communicator
    MPI_Init (&argc, &argv);
    Epetra_MpiComm Comm (MPI_COMM_WORLD);
    if ( Comm.MyPID() == 0 )
    {
        cout << "% using MPI" << endl;
    }
    Heart heart ( argc, argv );
    heart.run();

    //! Finalizing Epetra communicator
    MPI_Finalize();
    return ( EXIT_SUCCESS );
}
Example #8
0
int main(int argc, char **argv)
{
  //
  //  If --enable-mpi, an MPI communicator is used, otherwise a serial
  //  stub communicator is used.  
  //
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  //
  //  Print out a summary line followed by a "Hello" line from each process
  //

  if (Comm.MyPID()==0)
    cout << New_Package_Version() << endl << endl;

  Newp_Hello Hello( Comm ) ; 
  Hello.Print( cout );


  //
  //  If --enable-newp_swahili is set, HAVE_NEWP_SWAHILI is set in 
  //    New_Package_config.h which is included by Newp_Hello.h and hence:
  //      Print out a summary line followed by a "Jambo" line from each process
  //
#ifdef HAVE_NEWP_SWAHILI
  Newp_Jambo Jambo( Comm ) ; 
  Jambo.Print( cout );
#endif

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif
  return 0;
}
Example #9
0
int
main ( int argc, char** argv )
{

    bool verbose (false);
#ifdef HAVE_MPI
    MPI_Init (&argc, &argv);
    Epetra_MpiComm Comm (MPI_COMM_WORLD);
    if ( Comm.MyPID() == 0 )
    {
        verbose = true;
    }
#else
    Epetra_SerialComm Comm;
    verbose = true;
#endif

    NavierStokes<RegionMesh<LinearTriangle>, KimMoin >
    ns ( argc, argv, "dataKimMoin", "kimMoin" );

    ns.run();

    if (verbose)
    {
        std::cout << "End Result: TEST PASSED" << std::endl;
    }

#ifdef HAVE_MPI
    if (verbose)
    {
        std::cout << "MPI Finalization" << std::endl;
    }
    MPI_Finalize();
#endif
    return ( EXIT_SUCCESS );
}
int 
main (int argc, char *argv[])
{
  // These "using" statements make the code a bit more concise.
  using std::cout;
  using std::endl;

  int ierr = 0, i;

  // If Trilinos was built with MPI, initialize MPI, otherwise
  // initialize the serial "communicator" that stands in for MPI.
#ifdef EPETRA_MPI
  MPI_Init (&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  const int MyPID = Comm.MyPID();
  const int NumProc = Comm.NumProc();
  // We only allow (MPI) Process 0 to write to stdout.
  const bool verbose = (MyPID == 0);
  const int NumGlobalElements = 100;

  if (verbose)
    cout << Epetra_Version() << endl << endl;

  // Asking the Epetra_Comm to print itself is a good test for whether
  // you are running in an MPI environment.  However, it will print
  // something on all MPI processes, so you should remove it for a
  // large-scale parallel run.
  cout << Comm << endl;

  if (NumGlobalElements < NumProc)
    {
      if (verbose)
        cout << "numGlobalBlocks = " << NumGlobalElements 
             << " cannot be < number of processors = " << NumProc << endl;
      std::exit (EXIT_FAILURE);
    }

  // Construct a Map that puts approximately the same number of rows
  // of the matrix A on each processor.
  Epetra_Map Map (NumGlobalElements, 0, Comm);

  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map.NumMyElements();

  std::vector<int> MyGlobalElements(NumMyElements);
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // NumNz[i] is the number of nonzero elements in row i of the sparse
  // matrix on this MPI process.  Epetra_CrsMatrix uses this to figure
  // out how much space to allocate.
  std::vector<int> NumNz (NumMyElements);

  // We are building a tridiagonal matrix where each row contains the
  // nonzero elements (-1 2 -1).  Thus, we need 2 off-diagonal terms,
  // except for the first and last row of the matrix.
  for (int i = 0; i < NumMyElements; ++i)
    if (MyGlobalElements[i] == 0 || MyGlobalElements[i] == NumGlobalElements-1)
      NumNz[i] = 2; // First or last row
    else
      NumNz[i] = 3; // Not the (first or last row)

  // Create the Epetra_CrsMatrix.
  Epetra_CrsMatrix A (Copy, Map, &NumNz[0]);

  //
  // Add rows to the sparse matrix one at a time.
  //
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  const double two = 2.0;
  int NumEntries;

  for (int i = 0; i < NumMyElements; ++i)
    {
      if (MyGlobalElements[i] == 0)
        { // The first row of the matrix.
          Indices[0] = 1;
          NumEntries = 1;
        }
      else if (MyGlobalElements[i] == NumGlobalElements - 1)
        { // The last row of the matrix.
          Indices[0] = NumGlobalElements-2;
          NumEntries = 1;
        }
      else
        { // Any row of the matrix other than the first or last.
          Indices[0] = MyGlobalElements[i]-1;
          Indices[1] = MyGlobalElements[i]+1;
          NumEntries = 2;
        }
      ierr = A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert (ierr==0);
      // Insert the diagonal entry.
      ierr = A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i]);
      assert(ierr==0);
    }

  // Finish up.  We can call FillComplete() with no arguments, because
  // the matrix is square.
  ierr = A.FillComplete ();
  assert (ierr==0);

  // Parameters for the power method.
  const int niters = NumGlobalElements*10;
  const double tolerance = 1.0e-2;

  //
  // Run the power method.  Keep track of the flop count and the total
  // elapsed time.
  //
  Epetra_Flops counter;
  A.SetFlopCounter(counter);
  Epetra_Time timer(Comm);
  double lambda = 0.0;
  ierr += powerMethod (lambda, A, niters, tolerance, verbose);
  double elapsedTime = timer.ElapsedTime ();
  double totalFlops =counter.Flops ();
  // Mflop/s: Million floating-point arithmetic operations per second.
  double Mflop_per_s = totalFlops / elapsedTime / 1000000.0;

  if (verbose) 
    cout << endl << endl << "Total Mflop/s for first solve = " 
         << Mflop_per_s << endl<< endl;

  // Increase the first (0,0) diagonal entry of the matrix.
  if (verbose) 
    cout << endl << "Increasing magnitude of first diagonal term, solving again"
         << endl << endl << endl;

  if (A.MyGlobalRow (0)) {
    int numvals = A.NumGlobalEntries (0);
    std::vector<double> Rowvals (numvals);
    std::vector<int> Rowinds (numvals);
    A.ExtractGlobalRowCopy (0, numvals, numvals, &Rowvals[0], &Rowinds[0]); // Get A(0,0)
    for (int i = 0; i < numvals; ++i) 
      if (Rowinds[i] == 0) 
        Rowvals[i] *= 10.0;

    A.ReplaceGlobalValues (0, numvals, &Rowvals[0], &Rowinds[0]);
  }

  //
  // Run the power method again.  Keep track of the flop count and the
  // total elapsed time.
  //
  lambda = 0.0;
  timer.ResetStartTime();
  counter.ResetFlops();
  ierr += powerMethod (lambda, A, niters, tolerance, verbose);
  elapsedTime = timer.ElapsedTime();
  totalFlops = counter.Flops();
  Mflop_per_s = totalFlops / elapsedTime / 1000000.0;

  if (verbose) 
    cout << endl << endl << "Total Mflop/s for second solve = " 
         << Mflop_per_s << endl << endl;

#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

  return ierr;
}
Example #11
0
// *************************************************************
// main program - This benchmark code reads a Harwell-Boeing data
//                set and finds the minimal eigenvalue of the matrix
//                using inverse iteration.
// *************************************************************
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  cout << Comm << endl;

  int MyPID = Comm.MyPID();

  bool verbose = false;
  if (MyPID==0) verbose = true; // Print out detailed results (turn off for best performance)

  if(argc != 2) {
    if (verbose) cerr << "Usage: " << argv[0] << " HB_data_file" << endl;
    exit(1); // Error
  }

  // Define pointers that will be set by HB read function

  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA;
  Epetra_Vector * readx;
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;

  // Call function to read in HB problem
  Trilinos_Util_ReadHb2Epetra(argv[1], Comm, readMap, readA, readx, readb, readxexact);

  // Not interested in x, b or xexact for an eigenvalue problem
  delete readx;
  delete readb;
  delete readxexact;

#ifdef EPETRA_MPI // If running in parallel, we need to distribute matrix across all PEs.

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);

  A.Export(*readA, exporter, Add);
  assert(A.FillComplete()==0);

  delete readA;
  delete readMap;

#else // If not running in parallel, we do not need to distribute the matrix
  Epetra_CrsMatrix & A = *readA;
#endif

  // Create flop counter to collect all FLOPS
  Epetra_Flops counter;
  A.SetFlopCounter(counter);

  double lambda = 0; // Minimal eigenvalue returned here
  // Call inverse iteration solver
  Epetra_Time timer(Comm);
  invIteration(A, lambda, verbose);
  double elapsedTime = timer.ElapsedTime();
  double totalFlops = counter.Flops();
  double MFLOPS = totalFlops/elapsedTime/1000000.0;


  cout << endl
       << "*************************************************" << endl
       << " Approximate smallest eigenvalue = " << lambda << endl
       << "    Total Time    = " << elapsedTime << endl
       << "    Total FLOPS   = " << totalFlops << endl
       << "    Total MFLOPS  = " << MFLOPS << endl
       << "*************************************************" << endl;

  // All done
#ifdef EPETRA_MPI
  MPI_Finalize();
#else
  delete readA;
  delete readMap;
#endif

return (0);
}
Example #12
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ========================================= //
  // Compare IC preconditioners to no precond. //
  // ----------------------------------------- //

  const double tol = 1e-5;
  const int maxIter = 500;

  // Baseline: No preconditioning
  // Compute number of iterations, to compare to IC later.

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  //solver.SetPrecOperator(&*PrecDiag);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int Iters = solver.NumIters();
  //cout << "No preconditioner iterations: " << Iters << endl;

#if 0 
  // Not sure how to use Ifpack_CrsRick - leave out for now.
  //
  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Condest;
  
  Teuchos::RefCountPtr<Ifpack_CrsRick> IC;
  Ifpack_IlukGraph mygraph (A->Graph(), 0, 0);
  IC = Teuchos::rcp( new Ifpack_CrsRick(*A, mygraph) );
  IC->SetAbsoluteThreshold(0.00123);
  IC->SetRelativeThreshold(0.9876);
  // Init values from A
  IC->InitValues(*A);
  // compute the factors
  IC->Factor();
  // and now estimate the condition number
  IC->Condest(false,Condest);
  
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  std::string label = "Ifpack_CrsRick Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  IC->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*IC);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int RickIters = solver.NumIters();
  //cout << "Ifpack_Rick iterations: " << RickIters << endl;

  // Compare to no preconditioning
  if (RickIters > Iters/2)
    IFPACK_CHK_ERR(-1);

#endif

  //////////////////////////////////////////////////////
  // Same test with Ifpack_IC
  // This is Crout threshold Cholesky, so different than IC(0)

  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecIC = Teuchos::rcp( Factory.Create("IC", &*A) );

  Teuchos::ParameterList List;
  //List.get("fact: ict level-of-fill", 2.);
  //List.get("fact: drop tolerance", 0.3333);
  //List.get("fact: absolute threshold", 0.00123);
  //List.get("fact: relative threshold", 0.9876);
  //List.get("fact: relaxation value", 0.0);

  IFPACK_CHK_ERR(PrecIC->SetParameters(List));
  IFPACK_CHK_ERR(PrecIC->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  //AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecIC);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int ICIters = solver.NumIters();
  //cout << "Ifpack_IC iterations: " << ICIters << endl;

  // Compare to no preconditioning
  if (ICIters > Iters/2)
    IFPACK_CHK_ERR(-1);

#if 0
  //////////////////////////////////////////////////////
  // Same test with Ifpack_ICT 
  // This is another threshold Cholesky

  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecICT = Teuchos::rcp( Factory.Create("ICT", &*A) );

  //Teuchos::ParameterList List;
  //List.get("fact: level-of-fill", 2);
  //List.get("fact: drop tolerance", 0.3333);
  //List.get("fact: absolute threshold", 0.00123);
  //List.get("fact: relative threshold", 0.9876);
  //List.get("fact: relaxation value", 0.0);

  IFPACK_CHK_ERR(PrecICT->SetParameters(List));
  IFPACK_CHK_ERR(PrecICT->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecICT);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int ICTIters = solver.NumIters();
  //cout << "Ifpack_ICT iterations: " << ICTIters << endl;

  // Compare to no preconditioning
  if (ICTIters > Iters/2)
    IFPACK_CHK_ERR(-1);
#endif

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
Example #13
0
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  bool verbose = true; 
  if (MyPID==0) verbose = true;

  if (verbose)
    cout << EpetraExt::EpetraExt_Version() << endl << endl;
                                                                                
  cout << Comm << endl;

  if(argc < 2 && verbose) {
    cerr << "Usage: " << argv[0] 
	 << " HB_filename" << endl;
    return(1);

  }

  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //Comm.Barrier();

  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  // Call routine to read in HB problem
  Trilinos_Util_ReadHb2Epetra(argv[1], Comm, readMap, readA, readx, readb, readxexact);

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);
  Epetra_Vector x(map);
  Epetra_Vector b(map);
  Epetra_Vector xexact(map);

  Epetra_Time FillTimer(Comm);
  x.Export(*readx, exporter, Add);
  b.Export(*readb, exporter, Add);
  xexact.Export(*readxexact, exporter, Add);
  Comm.Barrier();
  double vectorRedistributeTime = FillTimer.ElapsedTime();
  A.Export(*readA, exporter, Add);
  Comm.Barrier();
  double matrixRedistributeTime = FillTimer.ElapsedTime() - vectorRedistributeTime;
  assert(A.FillComplete()==0);    
  Comm.Barrier();
  double fillCompleteTime = FillTimer.ElapsedTime() - matrixRedistributeTime;
  if (Comm.MyPID()==0)	{
    cout << "\n\n****************************************************" << endl;
    cout << "\n Vector redistribute  time (sec) = " << vectorRedistributeTime<< endl;
    cout << "    Matrix redistribute time (sec) = " << matrixRedistributeTime << endl;
    cout << "    Transform to Local  time (sec) = " << fillCompleteTime << endl<< endl;
  }
  Epetra_Vector tmp1(*readMap);
  Epetra_Vector tmp2(map);
  readA->Multiply(false, *readxexact, tmp1);

  A.Multiply(false, xexact, tmp2);
  double residual;
  tmp1.Norm2(&residual);
  if (verbose) cout << "Norm of Ax from file            = " << residual << endl;
  tmp2.Norm2(&residual);
  if (verbose) cout << "Norm of Ax after redistribution = " << residual << endl << endl << endl;

  //cout << "A from file = " << *readA << endl << endl << endl;

  //cout << "A after dist = " << A << endl << endl << endl;

  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;

  Comm.Barrier();

  EpetraExt::RowMatrixToMatrixMarketFile("test.mm", A, "test matrix", "This is a test matrix");
				       
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

return 0 ;
}
Example #14
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int MyPID = comm.MyPID();

  bool verbose = false;
  bool verbose1 = false; 
  // Check if we should print results to standard out
  if (argc > 1) {
    if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
      verbose1 = true;
      if (MyPID==0) verbose = true;
    }
  }

  if (verbose1) cout << comm << endl;


  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //comm.Barrier();

  Epetra_CrsMatrix * A; 
  EPETRA_CHK_ERR(EpetraExt::MatlabFileToCrsMatrix("A.dat", comm, A));

  Epetra_Vector  x(A->OperatorDomainMap()); 
  Epetra_Vector  b(A->OperatorRangeMap());
  x.Random();
  A->Apply(x,b); // Generate RHS from x
  Epetra_Vector xx(x); // Copy x to xx for later use

  Epetra_LinearProblem problem(A, &x, &b);
  // Construct a solver object for this problem

  AztecOO solver(problem);
  solver.SetAztecOption(AZ_precond, AZ_none);
  if (!verbose1) solver.SetAztecOption(AZ_output, AZ_none);
  solver.SetAztecOption(AZ_kspace, A->NumGlobalRows());
  AztecOO_Operator AOpInv(&solver, A->NumGlobalRows());
  Epetra_InvOperator AInvOp(&AOpInv);

  EPETRA_CHK_ERR(EpetraExt::OperatorToMatlabFile("Ainv.dat", AInvOp));

  comm.Barrier();

  Epetra_CrsMatrix * AInv; 
  EPETRA_CHK_ERR(EpetraExt::MatlabFileToCrsMatrix("Ainv.dat", comm, AInv));

  EPETRA_CHK_ERR(AInv->Apply(b,x));

  EPETRA_CHK_ERR(x.Update(1.0, xx, -1.0));
  double residual = 0.0;
  EPETRA_CHK_ERR(x.Norm2(&residual));
  if (verbose) cout << "Norm of difference between computed x and exact x = " << residual << endl;
  int ierr = checkValues(residual,0.0,"Norm of difference between computed A1x1 and A1x1 from file", verbose);

  
  delete A;
  delete AInv;


  #ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(ierr);
}
Example #15
0
int main(int argc, char *argv[]) {
cout << "going to setup MPI...\n";

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif
  cout << "mpit setup complete\n";

  int MyPID = comm.MyPID();

  char s [BUFSIZE] ;
  char matlabBuffer [MATLABBUF];
  cout << "going to init matlab\n";
  EpetraExt::EpetraExt_MatlabEngine * enginePtr = new EpetraExt::EpetraExt_MatlabEngine(comm);
  EpetraExt::EpetraExt_MatlabEngine & engine = *enginePtr;
  cout << "matlab started\n";
  
  /* GetCrsMatrix test
  engine.EvalString("CRSM=sparse(eye(8,10))", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int myM=4;
  int M = myM * comm.NumProc();
  int N = 10;  
  Epetra_Map getMap (M, 0, comm);
  Epetra_Map colMap(N, N, 0, comm);
  Epetra_CrsMatrix getCRSM (Copy, getMap, colMap, N);
  double colValue = 0;
  for(int row=myM*MyPID; row < myM*(MyPID+1); row++) {
    getCRSM.InsertGlobalValues(row, 1, &colValue, &row);
  }
  getCRSM.FillComplete(colMap, getMap);
  //getCRSM.FillComplete();
  int ierr = engine.GetCrsMatrix("CRSM", getCRSM, false);
  if (ierr) {
    cout << "engine.GetCrsMatrix(\"CRSM\", getCRSM, false) failed" << endl;
  }
  
  cout << getCRSM << endl;
  
  engine.EvalString("whos", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  */
  
  /* GetIntSerialDenseMatrix test
  engine.EvalString("ISDM=rand(8,2)*100", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int procToGet = 1;
  int M = 8;
  int N = 2;  
  int* A = new int[M*N];
  Epetra_IntSerialDenseMatrix getISDM (View, A, M, M, N);
  int ierr = engine.GetIntSerialDenseMatrix("ISDM", getISDM, procToGet);
  if (ierr) {
    cout << "engine.GetIntSerialDenseMatrix(\"ISDM\", getISDM, procToGet) failed" << endl;
  }
  
  if (MyPID == 1) cout << getISDM << endl;
  */
  
  /* GetSerialDenseMatrix test
  engine.EvalString("SDM=rand(8,2)", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int procToGet = 1;
  int M = 8;
  int N = 2;  
  double* A = new double[M*N];
  Epetra_SerialDenseMatrix getSDM (View, A, M, M, N);
  int ierr = engine.GetSerialDenseMatrix("SDM", getSDM, procToGet);
  if (ierr) {
    cout << "engine.GetSerialDenseMatrix(\"SDM\", getSDM, procToGet) failed" << endl;
  }
  
  if (MyPID == 1) cout << getSDM << endl;
  */
  
  /* GetMultiVector test
  if (comm.NumProc() != 2) {
    if (MyPID == 0) cout << "Error: this test must be run with exactly two PE." << endl;
    delete &engine;
    #ifdef EPETRA_MPI
    MPI_Finalize();
    #endif
    return(-1);
  }
  engine.EvalString("MV=rand(8,2)", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << endl;
  int myM = 4;
  int M = myM * comm.NumProc();
  int N = 2;
  Epetra_Map getMap (M, 0, comm);
  double* A = new double[myM*N];
  Epetra_MultiVector getMV (View, getMap, A, myM, N);
  cout << "MultiVector created" << endl;
  int ierr = engine.GetMultiVector("MV", getMV);
  if (ierr) {
    cout << "engine.GetMultiVector(\"MV\", getMV) failed" << endl;
  }
  
  cout << getMV << endl;
  */
  
  /* CrsMatrix test
  int numGlobalElements = 8;
  int numMyElements = 8/comm.NumProc();
  int M=numGlobalElements/comm.NumProc();
  int N=10;
  int* myGlobalElements = new int[M];
  
  int minGID = 0;
  int startIndex = minGID + M*comm.MyPID();
  for(int i=0; i < M; i++) {
      myGlobalElements[i] = startIndex++;
  }
  

  int* colMapGIDs = new int[N];
  for (int i=0; i < N; i++) {
    colMapGIDs[i] = i;
  }
  //Epetra_Map map (numGlobalElements, numMyElements, myGlobalElements, minGID, comm);
  Epetra_Map map (numGlobalElements, minGID, comm);
  Epetra_Map colMap (N, N, colMapGIDs, 0, comm);		   

  Epetra_CrsMatrix crsMatrix (Copy, map, colMap, N);
  
  cout << "crs matrix created\n";
  
  //int indices[8] = {-4,-3,-2,-1,0,1,2,3};
  int indices[10] = {0,1,2,3,4,5,6,7,8,9};
  double* values = new double[N];
  double value = M * N * comm.MyPID();
  for (int i=0; i < M; i++) {
    if (i % 2 == 0) {
      for (int j=0; j < N; j++) {
      	values[j] = value++;
      }
      
      crsMatrix.InsertGlobalValues(myGlobalElements[i], N, values, indices);
    }
  }
  
  cout << "done putting values\n";
  crsMatrix.FillComplete(colMap, map);
  cout << "done filling crsMatrix and calling crsMatrix.FillComplete()\n";
  
  cout << crsMatrix;
  
  cout << "done printing crsMatrix\n";
  
  //cout << map;
  //cout << "done printing map\n";
  
  int ierr = engine.PutRowMatrix(crsMatrix, "TEST", true);
  //int ierr = engine.PutBlockMap(map, "TEST", true);
  //cout << "done calling engine.PutRowMatrix(crsMatrix, \"TEST\", false)\n";
  if (ierr != 0) {
    cout << "engine.PutRowMatrix(crsMatrix, \"TEST\") returned nonzero result: " << ierr << "\n";
    return(-1);
  }
  
  */

  /* MultiVector test
  cout << MyPID << " going to do multivector test...\n";
  int numGlobalElements = 100;
  int M = numGlobalElements/comm.NumProc();
  int N = 3;
  int numMyElements = M * N;
  double* A = new double[numMyElements];
  double* Aptr = A;
  int startValue = 0;

  cout << MyPID << " allocated space for A, now filling A\n";
  for(int col=0; col < N; col++) {
	startValue = (col * numGlobalElements) + (M * MyPID);
	for(int row=0; row < M; row++) {
          *Aptr++ = row+startValue;
      }
  }
  cout << MyPID << " A filled\n";

  Epetra_Map map (numGlobalElements, 0, comm);
  Epetra_MultiVector multiVector (Copy, map, A, M, N);
  //cout << multiVector;
  engine.PutMultiVector(multiVector, "TEST");
  */
  
  /*SerialDenseMatrix test
  cout << MyPID << " going to do SerialDenseMatrix test...\n";
  double* A = new double[30];
  cout << MyPID << " allocated space for A, now filling A\n";
  double* Aptr = A;
  int M = 5;
  int N = 6;
  int startValue = M*N*comm.MyPID();
  for(int i=0; i < M*N; i++) {
      *Aptr++ = i + startValue;
  }
  cout << MyPID << " A filled\n";

  Epetra_SerialDenseMatrix sdMatrix (View, A, M, M, N);
  engine.PutSerialDenseMatrix(sdMatrix, "TEST", 0);
  cout << sdMatrix;
  */

  /* SerialDenseVector test
  double* A = new double[30];
  double* Aptr = A;
  int length = 30;
  for(int i=0; i < length; i++) {
      *Aptr++ = i;
  }

  Epetra_SerialDenseVector sdVector (Copy, A, length);
  engine.PutSerialDenseMatrix(sdVector, "SDVECTOR");
  cout << sdVector;
  */

  /*IntSerialDenseMatrix test
  cout << MyPID << " going to do IntSerialDenseMatrix test...\n";
  int* A = new int[30];
  cout << MyPID << " allocated space for A, now filling A\n";
  int* Aptr = A;
  int M = 5;
  int N = 6;
  int startValue = M*N*comm.MyPID();
  for(int i=0; i < M*N; i++) {
      *Aptr++ = i + startValue;
  }
  cout << MyPID << " A filled\n";
  Epetra_IntSerialDenseMatrix isdMatrix (Copy, A, M, M, N);
  cout << isdMatrix;
  engine.PutIntSerialDenseMatrix(isdMatrix, "TEST", 0);
  */


  /* SerialDenseVector test
  int* A = new int[30];
  int* Aptr = A;
  int length = 30;
  for(int i=0; i < length; i++) {
      *Aptr++ = i;
  }

  Epetra_IntSerialDenseVector isdVector (Copy, A, length);
  engine.PutIntSerialDenseMatrix(isdVector, "ISDVECTOR");
  cout << isdVector;
  */

  /*while(1) {

	// do nothing
	}*/

  /*if (comm.NumProc() == 1) {
  int err;
  while(1) {
      // Prompt the user and get a string
      printf(">> ");
      if (fgets(s, BUFSIZE, stdin) == NULL) {
          printf("Bye\n");
          break ;
      }
      printf ("command :%s:\n", s) ;
      
      // Send the command to MATLAB
      // output goes to stdout
      err = engine.EvalString(s, matlabBuffer, MATLABBUF);
      if (err != 0) {
          printf("there was an error: %d", err);
		  err = 0;
      }
      else {
      	  printf("Matlab Output:\n%s", matlabBuffer);
      }
  }
  }*/

  //delete engine ;

  /*
  engine.EvalString("size(TEST)", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << "\n";
  engine.EvalString("TEST", matlabBuffer, MATLABBUF);
  cout << matlabBuffer << "\n";
  */
  
  cout << "\n" << comm.MyPID() << " all done\n";
  
#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  delete enginePtr;

  return(0);
 
}
Example #16
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  // matrix downloaded from MatrixMarket
  char FileName[] = "../HBMatrices/fidap005.rua";

  Epetra_Map * readMap; // Pointers because of Trilinos_Util_ReadHb2Epetra
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  // Call routine to read in HB problem
  Trilinos_Util_ReadHb2Epetra(FileName, Comm, readMap, readA, readx, 
			      readb, readxexact);

  int NumGlobalElements = readMap->NumGlobalElements();

  // Create uniform distributed map
  Epetra_Map map(NumGlobalElements, 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);
  Epetra_Vector x(map);
  Epetra_Vector b(map);
  Epetra_Vector xexact(map);

  Epetra_Time FillTimer(Comm);
  x.Export(*readx, exporter, Add);
  b.Export(*readb, exporter, Add);
  xexact.Export(*readxexact, exporter, Add);
  Comm.Barrier();
  double vectorRedistributeTime = FillTimer.ElapsedTime();
  A.Export(*readA, exporter, Add);
  Comm.Barrier();
  double matrixRedistributeTime = FillTimer.ElapsedTime() - vectorRedistributeTime;
  A.FillComplete();
  Comm.Barrier();
  double fillCompleteTime = FillTimer.ElapsedTime() - matrixRedistributeTime;

  if( MyPID==0 ) {
    cout << "Vector redistribute  time (sec) = "
	 << vectorRedistributeTime<< endl;
    cout << "Matrix redistribute time (sec) = "
	 << matrixRedistributeTime << endl;
    cout << "Transform to Local  time (sec) = "
	 << fillCompleteTime << endl<< endl;
  }

  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
Example #17
0
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  cout << Comm << endl;

  int MyPID = Comm.MyPID();

  bool verbose = false; 
  if (MyPID==0) verbose = true;

  if(argc < 2 && verbose) {
    cerr << "Usage: " << argv[0] 
	 << " HB_filename [level_fill [level_overlap [absolute_threshold [ relative_threshold]]]]" << endl
	 << "where:" << endl
	 << "HB_filename        - filename and path of a Harwell-Boeing data set" << endl
	 << "level_fill         - The amount of fill to use for ILU(k) preconditioner (default 0)" << endl
	 << "level_overlap      - The amount of overlap used for overlapping Schwarz subdomains (default 0)" << endl
	 << "absolute_threshold - The minimum value to place on the diagonal prior to factorization (default 0.0)" << endl
	 << "relative_threshold - The relative amount to perturb the diagonal prior to factorization (default 1.0)" << endl << endl
	 << "To specify a non-default value for one of these parameters, you must specify all" << endl
	 << " preceding values but not any subsequent parameters. Example:" << endl
	 << "ifpackHbSerialMsr.exe mymatrix.hb 1  - loads mymatrix.hb, uses level fill of one, all other values are defaults" << endl
	 << endl;
    return(1);

  }

  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //Comm.Barrier();

  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  // Call routine to read in HB problem
  Trilinos_Util_ReadHb2Epetra(argv[1], Comm, readMap, readA, readx, readb, readxexact);

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);
  Epetra_Vector x(map);
  Epetra_Vector b(map);
  Epetra_Vector xexact(map);

  Epetra_Time FillTimer(Comm);
  x.Export(*readx, exporter, Add);
  b.Export(*readb, exporter, Add);
  xexact.Export(*readxexact, exporter, Add);
  Comm.Barrier();
  double vectorRedistributeTime = FillTimer.ElapsedTime();
  A.Export(*readA, exporter, Add);
  Comm.Barrier();
  double matrixRedistributeTime = FillTimer.ElapsedTime() - vectorRedistributeTime;
  assert(A.FillComplete()==0);    
  Comm.Barrier();
  double fillCompleteTime = FillTimer.ElapsedTime() - matrixRedistributeTime;
  if (Comm.MyPID()==0)	{
    cout << "\n\n****************************************************" << endl;
    cout << "\n Vector redistribute  time (sec) = " << vectorRedistributeTime<< endl;
    cout << "    Matrix redistribute time (sec) = " << matrixRedistributeTime << endl;
    cout << "    Transform to Local  time (sec) = " << fillCompleteTime << endl<< endl;
  }
  Epetra_Vector tmp1(*readMap);
  Epetra_Vector tmp2(map);
  readA->Multiply(false, *readxexact, tmp1);

  A.Multiply(false, xexact, tmp2);
  double residual;
  tmp1.Norm2(&residual);
  if (verbose) cout << "Norm of Ax from file            = " << residual << endl;
  tmp2.Norm2(&residual);
  if (verbose) cout << "Norm of Ax after redistribution = " << residual << endl << endl << endl;

  //cout << "A from file = " << *readA << endl << endl << endl;

  //cout << "A after dist = " << A << endl << endl << endl;

  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;

  Comm.Barrier();

  // Construct ILU preconditioner

  double elapsed_time, total_flops, MFLOPs;
  Epetra_Time timer(Comm);

  int LevelFill = 0;
  if (argc > 2)  LevelFill = atoi(argv[2]);
  if (verbose) cout << "Using Level Fill = " << LevelFill << endl;
  int Overlap = 0;
  if (argc > 3) Overlap = atoi(argv[3]);
  if (verbose) cout << "Using Level Overlap = " << Overlap << endl;
  double Athresh = 0.0;
  if (argc > 4) Athresh = atof(argv[4]);
  if (verbose) cout << "Using Absolute Threshold Value of = " << Athresh << endl;

  double Rthresh = 1.0;
  if (argc > 5) Rthresh = atof(argv[5]);
  if (verbose) cout << "Using Relative Threshold Value of = " << Rthresh << endl;

  Ifpack_IlukGraph * IlukGraph = 0;
  Ifpack_CrsRiluk * ILUK = 0;

  if (LevelFill>-1) {
    elapsed_time = timer.ElapsedTime();
    IlukGraph = new Ifpack_IlukGraph(A.Graph(), LevelFill, Overlap);
    assert(IlukGraph->ConstructFilledGraph()==0);
    elapsed_time = timer.ElapsedTime() - elapsed_time;
    if (verbose) cout << "Time to construct ILUK graph = " << elapsed_time << endl;


    Epetra_Flops fact_counter;
  
    elapsed_time = timer.ElapsedTime();
    ILUK = new Ifpack_CrsRiluk(*IlukGraph);
    ILUK->SetFlopCounter(fact_counter);
    ILUK->SetAbsoluteThreshold(Athresh);
    ILUK->SetRelativeThreshold(Rthresh);
    //assert(ILUK->InitValues()==0);
    int initerr = ILUK->InitValues(A);
    if (initerr!=0) cout << Comm << "InitValues error = " << initerr;
    assert(ILUK->Factor()==0);
    elapsed_time = timer.ElapsedTime() - elapsed_time;
    total_flops = ILUK->Flops();
    MFLOPs = total_flops/elapsed_time/1000000.0;
    if (verbose) cout << "Time to compute preconditioner values = " 
		    << elapsed_time << endl
		    << "MFLOPS for Factorization = " << MFLOPs << endl;
    //cout << *ILUK << endl;
  }
  double Condest;
  ILUK->Condest(false, Condest);

  if (verbose) cout << "Condition number estimate for this preconditioner = " << Condest << endl;
  int Maxiter = 500;
  double Tolerance = 1.0E-14;

  Epetra_Vector xcomp(map);
  Epetra_Vector resid(map);

  Epetra_Flops counter;
  A.SetFlopCounter(counter);
  xcomp.SetFlopCounter(A);
  b.SetFlopCounter(A);
  resid.SetFlopCounter(A);
  ILUK->SetFlopCounter(A);

  elapsed_time = timer.ElapsedTime();

  BiCGSTAB(A, xcomp, b, ILUK, Maxiter, Tolerance, &residual, verbose);

  elapsed_time = timer.ElapsedTime() - elapsed_time;
  total_flops = counter.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;
  if (verbose) cout << "Time to compute solution = " 
		    << elapsed_time << endl
		    << "Number of operations in solve = " << total_flops << endl
		    << "MFLOPS for Solve = " << MFLOPs<< endl << endl;

  resid.Update(1.0, xcomp, -1.0, xexact, 0.0); // resid = xcomp - xexact

  resid.Norm2(&residual);

  if (verbose) cout << "Norm of the difference between exact and computed solutions = " << residual << endl;

  


  if (ILUK!=0) delete ILUK;
  if (IlukGraph!=0) delete IlukGraph;
				       
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

return 0 ;
}
Example #18
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int ierr=HIPS_Initialize(1);
  HIPS_ExitOnError(ierr);

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  Teuchos::ParameterList GaleriList;
  int nx = 100; 
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  //  GaleriList.set("ny", nx);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
                 
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  Teuchos::RefCountPtr<Ifpack_HIPS> RILU;

  RILU = Teuchos::rcp( new Ifpack_HIPS(&*A) );

  Teuchos::ParameterList List;
  List.set("hips: id",0);
  List.set("hips: setup output",2);
  List.set("hips: iteration output",0);
  List.set("hips: drop tolerance",5e-3);
  List.set("hips: graph symmetric",1);
  RILU->SetParameters(List);

  
  RILU->Initialize();
  RILU->Compute();

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  int Niters = 50;

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*RILU);
  solver.SetAztecOption(AZ_output, 1); 
  solver.Iterate(Niters, 1.0e-8);

  int OldIters = solver.NumIters();


  HIPS_Finalize();
  
#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
Example #19
0
void build_test_matrix(Epetra_MpiComm & Comm, int test_number, Epetra_CrsMatrix *&A){
  int NumProc = Comm.NumProc();
  int MyPID   = Comm.MyPID();

  if(test_number==1){
    // Case 1: Tridiagonal
    int NumMyEquations = 100;

    int NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
    if(MyPID < 3)  NumMyEquations++;

    // Construct a Map that puts approximately the same Number of equations on each processor
    Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0, Comm);

    // Get update list and number of local equations from newly created Map
    int* MyGlobalElements = new int[Map.NumMyElements()];
    Map.MyGlobalElements(MyGlobalElements);

    // Create an integer vector NumNz that is used to build the Petra Matrix.
    // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

    int* NumNz = new int[NumMyEquations];

    // We are building a tridiagonal matrix where each row has (-1 2 -1)
    // So we need 2 off-diagonal terms (except for the first and last equation)

    for (int i = 0; i < NumMyEquations; i++)
      if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
	NumNz[i] = 1;
      else
	NumNz[i] = 2;

    // Create a Epetra_Matrix
    A=new Epetra_CrsMatrix(Copy, Map, NumNz);

    // Add  rows one-at-a-time
    // Need some vectors to help
    // Off diagonal Values will always be -1

    double* Values = new double[2];
    Values[0] = -1.0;
    Values[1] = -1.0;
    int* Indices = new int[2];
    double two = 2.0;
    int NumEntries;

    for (int i = 0; i < NumMyEquations; i++) {
      if(MyGlobalElements[i] == 0) {
	Indices[0] = 1;
	NumEntries = 1;
      }
      else if (MyGlobalElements[i] == NumGlobalEquations-1) {
	Indices[0] = NumGlobalEquations-2;
	NumEntries = 1;
      }
      else {
	Indices[0] = MyGlobalElements[i]-1;
	Indices[1] = MyGlobalElements[i]+1;
	NumEntries = 2;
      }
      A->InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices);
      A->InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i);
    }

    A->FillComplete();

    // Cleanup
    delete [] MyGlobalElements;
    delete [] NumNz;
    delete [] Values;
    delete [] Indices;

  }
}
Example #20
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif
  
  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  
  /*int npRows = -1;
  int npCols = -1;
  bool useTwoD = false;
  int randomize = 1;
  std::string matrix = "Laplacian";
  
  Epetra_CrsMatrix *AK = NULL;
    std::string filename = "email.mtx";
  read_matrixmarket_file((char*) filename.c_str(), Comm, AK,
			 useTwoD, npRows, npCols,
			 randomize, false,
			 (matrix.find("Laplacian")!=std::string::npos));
  Teuchos::RCP<Epetra_CrsMatrix> A(AK);
  const Epetra_Map *AMap = &(AK->DomainMap());
  Teuchos::RCP<const Epetra_Map> Map(AMap, false);*/

  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );


  LHS->PutScalar(0.0); RHS->Random();

  // ==================================================== //
  // Compare support graph preconditioners to no precond. //
  // ---------------------------------------------------- //

  const double tol = 1e-5;
  const int maxIter = 500;

  // Baseline: No preconditioning
  // Compute number of iterations, to compare to IC later.

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  int Iters = solver.NumIters();


  int SupportIters;
  Ifpack Factory;
  Teuchos::ParameterList List;

#ifdef HAVE_IFPACK_AMESOS
  //////////////////////////////////////////////////////
  // Same test with Ifpack_SupportGraph
  // Factored with Amesos

  
  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportAmesos = Teuchos::rcp( Factory.Create("MSF Amesos", &*A) );
  List.set("amesos: solver type","Klu");
  List.set("MST: keep diagonal", 1.0);
  List.set("MST: randomize", 1);
  //List.set("fact: absolute threshold", 3.0);
  
  IFPACK_CHK_ERR(PrecSupportAmesos->SetParameters(List));
  IFPACK_CHK_ERR(PrecSupportAmesos->Initialize());
  IFPACK_CHK_ERR(PrecSupportAmesos->Compute());


  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  //AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecSupportAmesos);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(maxIter, tol);

  SupportIters = solver.NumIters();




  
  // Compare to no preconditioning
  if (SupportIters > 2*Iters)
    IFPACK_CHK_ERR(-1);

#endif

  //////////////////////////////////////////////////////
  // Same test with Ifpack_SupportGraph
  // Factored with IC
  

  
  Teuchos::RefCountPtr<Ifpack_Preconditioner> PrecSupportIC = Teuchos::rcp( Factory.Create("MSF IC", &*A) );

  

  IFPACK_CHK_ERR(PrecSupportIC->SetParameters(List));
  IFPACK_CHK_ERR(PrecSupportIC->Compute());


  // Here we create an AztecOO object                                                                                                                                                                                                        
  LHS->PutScalar(0.0);

  //AztecOO solver;                                                                                                                                                                                                                          
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*PrecSupportIC);
  solver.SetAztecOption(AZ_output, 16);
  solver.Iterate(maxIter, tol);

  SupportIters = solver.NumIters();

  // Compare to no preconditioning                                                                                                                                                                                                           
  if (SupportIters > 2*Iters)
    IFPACK_CHK_ERR(-1);
  




#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
Example #21
0
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  cout << Comm << endl;

  int MyPID = Comm.MyPID();

  bool verbose = false;
  bool verbose1 = true;
  if (MyPID==0) verbose = true;

  if(argc < 2 && verbose) {
    cerr << "Usage: " << argv[0] 
	 << " HB_filename [level_fill [level_overlap [absolute_threshold [ relative_threshold]]]]" << endl
	 << "where:" << endl
	 << "HB_filename        - filename and path of a Harwell-Boeing data set" << endl
	 << "level_fill         - The amount of fill to use for ILU(k) preconditioner (default 0)" << endl
	 << "level_overlap      - The amount of overlap used for overlapping Schwarz subdomains (default 0)" << endl
	 << "absolute_threshold - The minimum value to place on the diagonal prior to factorization (default 0.0)" << endl
	 << "relative_threshold - The relative amount to perturb the diagonal prior to factorization (default 1.0)" << endl << endl
	 << "To specify a non-default value for one of these parameters, you must specify all" << endl
	 << " preceding values but not any subsequent parameters. Example:" << endl
	 << "ifpackHpcSerialMsr.exe mymatrix.hpc 1  - loads mymatrix.hpc, uses level fill of one, all other values are defaults" << endl
	 << endl;
    return(1);

  }

  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //Comm.Barrier();

  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  // Call routine to read in HB problem
  Trilinos_Util_ReadHb2Epetra(argv[1], Comm, readMap, readA, readx, readb, readxexact);

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);
  Epetra_Vector x(map);
  Epetra_Vector b(map);
  Epetra_Vector xexact(map);

  Epetra_Time FillTimer(Comm);
  x.Export(*readx, exporter, Add);
  b.Export(*readb, exporter, Add);
  xexact.Export(*readxexact, exporter, Add);
  Comm.Barrier();
  double vectorRedistributeTime = FillTimer.ElapsedTime();
  A.Export(*readA, exporter, Add);
  Comm.Barrier();
  double matrixRedistributeTime = FillTimer.ElapsedTime() - vectorRedistributeTime;
  assert(A.FillComplete()==0);    
  Comm.Barrier();
  double fillCompleteTime = FillTimer.ElapsedTime() - matrixRedistributeTime;
  if (Comm.MyPID()==0)	{
    cout << "\n\n****************************************************" << endl;
    cout << "\n Vector redistribute  time (sec) = " << vectorRedistributeTime<< endl;
    cout << "    Matrix redistribute time (sec) = " << matrixRedistributeTime << endl;
    cout << "    Transform to Local  time (sec) = " << fillCompleteTime << endl<< endl;
  }
  Epetra_Vector tmp1(*readMap);
  Epetra_Vector tmp2(map);
  readA->Multiply(false, *readxexact, tmp1);

  A.Multiply(false, xexact, tmp2);
  double residual;
  tmp1.Norm2(&residual);
  if (verbose) cout << "Norm of Ax from file            = " << residual << endl;
  tmp2.Norm2(&residual);
  if (verbose) cout << "Norm of Ax after redistribution = " << residual << endl << endl << endl;

  //cout << "A from file = " << *readA << endl << endl << endl;

  //cout << "A after dist = " << A << endl << endl << endl;

  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;

  Comm.Barrier();

  bool smallProblem = false;
  if (A.RowMap().NumGlobalElements()<100) smallProblem = true;

  if (smallProblem)
    cout << "Original Matrix = " << endl << A   << endl;

  x.PutScalar(0.0);

  Epetra_LinearProblem FullProblem(&A, &x, &b);
  double normb, norma;
  b.NormInf(&normb);
  norma = A.NormInf();
  if (verbose)
    cout << "Inf norm of Original Matrix = " << norma << endl
	 << "Inf norm of Original RHS    = " << normb << endl;
  
  Epetra_Time ReductionTimer(Comm);
  Epetra_CrsSingletonFilter SingletonFilter;
  Comm.Barrier();
  double reduceInitTime = ReductionTimer.ElapsedTime();
  SingletonFilter.Analyze(&A);
  Comm.Barrier();
  double reduceAnalyzeTime = ReductionTimer.ElapsedTime() - reduceInitTime;

  if (SingletonFilter.SingletonsDetected())
    cout << "Singletons found" << endl;
  else {
    cout << "Singletons not found" << endl;
    exit(1);
  }
  SingletonFilter.ConstructReducedProblem(&FullProblem);
  Comm.Barrier();
  double reduceConstructTime = ReductionTimer.ElapsedTime() - reduceInitTime;

  double totalReduceTime = ReductionTimer.ElapsedTime();

  if (verbose)
    cout << "\n\n****************************************************" << endl
	 << "    Reduction init  time (sec)           = " << reduceInitTime<< endl
	 << "    Reduction Analyze time (sec)         = " << reduceAnalyzeTime << endl
	 << "    Construct Reduced Problem time (sec) = " << reduceConstructTime << endl
	 << "    Reduction Total time (sec)           = " << totalReduceTime << endl<< endl;

  Statistics(SingletonFilter);

  Epetra_LinearProblem * ReducedProblem = SingletonFilter.ReducedProblem();

  Epetra_CrsMatrix * Ap = dynamic_cast<Epetra_CrsMatrix *>(ReducedProblem->GetMatrix());
  Epetra_Vector * bp = (*ReducedProblem->GetRHS())(0);
  Epetra_Vector * xp = (*ReducedProblem->GetLHS())(0);
  

  if (smallProblem)
    cout << " Reduced Matrix = " << endl << *Ap << endl
	 << " LHS before sol = " << endl << *xp << endl
	 << " RHS            = " << endl << *bp << endl;

  // Construct ILU preconditioner

  double elapsed_time, total_flops, MFLOPs;
  Epetra_Time timer(Comm);

  int LevelFill = 0;
  if (argc > 2)  LevelFill = atoi(argv[2]);
  if (verbose) cout << "Using Level Fill = " << LevelFill << endl;
  int Overlap = 0;
  if (argc > 3) Overlap = atoi(argv[3]);
  if (verbose) cout << "Using Level Overlap = " << Overlap << endl;
  double Athresh = 0.0;
  if (argc > 4) Athresh = atof(argv[4]);
  if (verbose) cout << "Using Absolute Threshold Value of = " << Athresh << endl;

  double Rthresh = 1.0;
  if (argc > 5) Rthresh = atof(argv[5]);
  if (verbose) cout << "Using Relative Threshold Value of = " << Rthresh << endl;

  Ifpack_IlukGraph * IlukGraph = 0;
  Ifpack_CrsRiluk * ILUK = 0;

  if (LevelFill>-1) {
    elapsed_time = timer.ElapsedTime();
    IlukGraph = new Ifpack_IlukGraph(Ap->Graph(), LevelFill, Overlap);
    assert(IlukGraph->ConstructFilledGraph()==0);
    elapsed_time = timer.ElapsedTime() - elapsed_time;
    if (verbose) cout << "Time to construct ILUK graph = " << elapsed_time << endl;


    Epetra_Flops fact_counter;
  
    elapsed_time = timer.ElapsedTime();
    ILUK = new Ifpack_CrsRiluk(*IlukGraph);
    ILUK->SetFlopCounter(fact_counter);
    ILUK->SetAbsoluteThreshold(Athresh);
    ILUK->SetRelativeThreshold(Rthresh);
    //assert(ILUK->InitValues()==0);
    int initerr = ILUK->InitValues(*Ap);
    if (initerr!=0) {
      cout << endl << Comm << endl << "  InitValues error = " << initerr;
      if (initerr==1) cout << "  Zero diagonal found, warning error only";
      cout << endl << endl;
    }
    assert(ILUK->Factor()==0);
    elapsed_time = timer.ElapsedTime() - elapsed_time;
    total_flops = ILUK->Flops();
    MFLOPs = total_flops/elapsed_time/1000000.0;
    if (verbose) cout << "Time to compute preconditioner values = " 
		    << elapsed_time << endl
		    << "MFLOPS for Factorization = " << MFLOPs << endl;
    //cout << *ILUK << endl;
  double Condest;
  ILUK->Condest(false, Condest);

  if (verbose) cout << "Condition number estimate for this preconditioner = " << Condest << endl;
  }
  int Maxiter = 100;
  double Tolerance = 1.0E-8;

  Epetra_Flops counter;
  Ap->SetFlopCounter(counter);
  xp->SetFlopCounter(*Ap);
  bp->SetFlopCounter(*Ap);
  if (ILUK!=0) ILUK->SetFlopCounter(*Ap);

  elapsed_time = timer.ElapsedTime();

  double normreducedb, normreduceda;
  bp->NormInf(&normreducedb);
  normreduceda = Ap->NormInf();
  if (verbose) 
    cout << "Inf norm of Reduced Matrix = " << normreduceda << endl
	 << "Inf norm of Reduced RHS    = " << normreducedb << endl;

  BiCGSTAB(*Ap, *xp, *bp, ILUK, Maxiter, Tolerance, &residual, verbose);

  elapsed_time = timer.ElapsedTime() - elapsed_time;
  total_flops = counter.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;
  if (verbose) cout << "Time to compute solution = " 
		    << elapsed_time << endl
		    << "Number of operations in solve = " << total_flops << endl
		    << "MFLOPS for Solve = " << MFLOPs<< endl << endl;

  SingletonFilter.ComputeFullSolution();

  if (smallProblem)
  cout << " Reduced LHS after sol = " << endl << *xp << endl
       << " Full    LHS after sol = " << endl << x << endl
       << " Full  Exact LHS         = " << endl << xexact << endl;

  Epetra_Vector resid(x);

  resid.Update(1.0, x, -1.0, xexact, 0.0); // resid = xcomp - xexact

  resid.Norm2(&residual);
  double normx, normxexact;
  x.Norm2(&normx);
  xexact.Norm2(&normxexact);

  if (verbose) 
    cout << "2-norm of computed solution                               = " << normx << endl
	 << "2-norm of exact solution                                  = " << normxexact << endl
	 << "2-norm of difference between computed and exact solution  = " << residual << endl;
    
  if (verbose1 && residual>1.0e-5) {
    if (verbose)
      cout << "Difference between computed and exact solution appears large..." << endl
	   << "Computing norm of A times this difference.  If this norm is small, then matrix is singular"
	   << endl;
    Epetra_Vector bdiff(b);
    assert(A.Multiply(false, resid, bdiff)==0);
    assert(bdiff.Norm2(&residual)==0);
    if (verbose) 
      cout << "2-norm of A times difference between computed and exact solution  = " << residual << endl;
    
  }
  if (verbose) 
    cout << "********************************************************" << endl
	 << "              Solving again with 2*Ax=2*b" << endl
	 << "********************************************************" << endl;

  A.Scale(1.0); // A = 2*A
  b.Scale(1.0); // b = 2*b
  x.PutScalar(0.0);
  b.NormInf(&normb);
  norma = A.NormInf();
  if (verbose)
    cout << "Inf norm of Original Matrix = " << norma << endl
	 << "Inf norm of Original RHS    = " << normb << endl;
  double updateReducedProblemTime = ReductionTimer.ElapsedTime();
  SingletonFilter.UpdateReducedProblem(&FullProblem);
  Comm.Barrier();
  updateReducedProblemTime = ReductionTimer.ElapsedTime() - updateReducedProblemTime;
  if (verbose)
    cout << "\n\n****************************************************" << endl
	 << "    Update Reduced Problem time (sec)           = " << updateReducedProblemTime<< endl
	 << "****************************************************" << endl;
  Statistics(SingletonFilter);

  if (LevelFill>-1) {

    Epetra_Flops fact_counter;
  
    elapsed_time = timer.ElapsedTime();

    int initerr = ILUK->InitValues(*Ap);
    if (initerr!=0) {
      cout << endl << Comm << endl << "  InitValues error = " << initerr;
      if (initerr==1) cout << "  Zero diagonal found, warning error only";
      cout << endl << endl;
    }
    assert(ILUK->Factor()==0);
    elapsed_time = timer.ElapsedTime() - elapsed_time;
    total_flops = ILUK->Flops();
    MFLOPs = total_flops/elapsed_time/1000000.0;
    if (verbose) cout << "Time to compute preconditioner values = " 
		    << elapsed_time << endl
		    << "MFLOPS for Factorization = " << MFLOPs << endl;
    double Condest;
    ILUK->Condest(false, Condest);
    
    if (verbose) cout << "Condition number estimate for this preconditioner = " << Condest << endl;
  }
  bp->NormInf(&normreducedb);
  normreduceda = Ap->NormInf();
  if (verbose) 
    cout << "Inf norm of Reduced Matrix = " << normreduceda << endl
	 << "Inf norm of Reduced RHS    = " << normreducedb << endl;

  BiCGSTAB(*Ap, *xp, *bp, ILUK, Maxiter, Tolerance, &residual, verbose);

  elapsed_time = timer.ElapsedTime() - elapsed_time;
  total_flops = counter.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;
  if (verbose) cout << "Time to compute solution = " 
		    << elapsed_time << endl
		    << "Number of operations in solve = " << total_flops << endl
		    << "MFLOPS for Solve = " << MFLOPs<< endl << endl;

  SingletonFilter.ComputeFullSolution();

  if (smallProblem)
  cout << " Reduced LHS after sol = " << endl << *xp << endl
       << " Full    LHS after sol = " << endl << x << endl
       << " Full  Exact LHS         = " << endl << xexact << endl;

  resid.Update(1.0, x, -1.0, xexact, 0.0); // resid = xcomp - xexact

  resid.Norm2(&residual);
  x.Norm2(&normx);
  xexact.Norm2(&normxexact);

  if (verbose) 
    cout << "2-norm of computed solution                               = " << normx << endl
	 << "2-norm of exact solution                                  = " << normxexact << endl
	 << "2-norm of difference between computed and exact solution  = " << residual << endl;
    
  if (verbose1 && residual>1.0e-5) {
    if (verbose)
      cout << "Difference between computed and exact solution appears large..." << endl
	   << "Computing norm of A times this difference.  If this norm is small, then matrix is singular"
	   << endl;
    Epetra_Vector bdiff(b);
    assert(A.Multiply(false, resid, bdiff)==0);
    assert(bdiff.Norm2(&residual)==0);
    if (verbose) 
      cout << "2-norm of A times difference between computed and exact solution  = " << residual << endl;
    
  }
 


  if (ILUK!=0) delete ILUK;
  if (IlukGraph!=0) delete IlukGraph;
				       
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

return 0 ;
}
Example #22
0
int main(int argc, char *argv[])
{

  std::cout << Epetra_Version() << std::endl << std::endl;

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif


  Teuchos::RCP<Teuchos::FancyOStream> fos = getFancyOStream(Teuchos::rcpFromRef(std::cout));

  // Construct a Map with NumElements and index base of 0
  Teuchos::RCP<Epetra_Map> rgMap = Teuchos::rcp(new Epetra_Map(63, 0, Comm));
  Teuchos::RCP<Epetra_Map> doMap = Teuchos::rcp(new Epetra_Map(20, 0, Comm));

  Epetra_CrsMatrix* Pin = NULL;
  EpetraExt::MatrixMarketFileToCrsMatrix("Test.mat",
      *rgMap, *rgMap, *doMap,
      Pin, false,
      true);
  Teuchos::RCP<Epetra_CrsMatrix> P = Teuchos::rcp(Pin);

  Epetra_CrsMatrix* A = NULL;
  A = TridiagMatrix(rgMap.get(), 1.0, -2.0, 1.0);
  Teuchos::RCP<Epetra_CrsMatrix> rcpA = Teuchos::rcp(A);

  ////////////////////////////////////////////
  // plain Epetra
  ////////////////////////////////////////////

  // Create x and b vectors
  Teuchos::RCP<Epetra_Vector> x = Teuchos::rcp(new Epetra_Vector(*doMap));
  Teuchos::RCP<Epetra_Vector> x2 = Teuchos::rcp(new Epetra_Vector(*rgMap));
  Teuchos::RCP<Epetra_Vector> b = Teuchos::rcp(new Epetra_Vector(*rgMap));
  Teuchos::RCP<Epetra_Vector> b2 = Teuchos::rcp(new Epetra_Vector(*rgMap));

  x->PutScalar(1.0);
  x2->PutScalar(1.0);
  double normx = 0.0;
  x->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "||x|| = " << normx << std::endl;
  x2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "||x2|| = " << normx << std::endl;

  /*P->Apply(*x,*b);
  normx = 0.0;
  b->Norm1(&normx);*/
  //if (Comm.MyPID() == 0) std::cout << "||Px||_1 = " << normx << std::endl;

  /*Epetra_RowMatrixTransposer et(&*P);
        Epetra_CrsMatrix* PT;
        int rv = et.CreateTranspose(true,PT);
        if (rv != 0) {
          std::ostringstream buf;
          buf << rv;
          std::string msg = "Utils::Transpose: Epetra::RowMatrixTransposer returned value of " + buf.str();
          std::cout << msg << std::endl;
        }

  Teuchos::RCP<Epetra_CrsMatrix> rcpPT(PT);

  rcpPT->Apply(*x2,*b2);
  normx = 0.0;
  b2->Norm1(&normx);*/
  //if (Comm.MyPID() == 0) std::cout << "||P^T x||_1 = " << normx << std::endl;

  // matrix-matrix multiply
  Teuchos::RCP<Epetra_CrsMatrix> AP = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->RangeMap(),1));
  EpetraExt::MatrixMatrix::Multiply(*rcpA,false,*P,false,*AP, *fos);
  //  AP->FillComplete(P->DomainMap(),rcpA->RangeMap());
  //std::cout << *AP << std::endl;
  AP->Apply(*x,*b2);
  normx = 0.0;
  b2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "Epetra: ||AP x||_1 = " << normx << std::endl;

  // build A^T explicitely
  Epetra_RowMatrixTransposer etA(&*rcpA);
        Epetra_CrsMatrix* AT;
        int rv3 = etA.CreateTranspose(true,AT);
        if (rv3 != 0) {
          std::ostringstream buf;
          buf << rv3;
          std::string msg = "Utils::Transpose: Epetra::RowMatrixTransposer returned value of " + buf.str();
          std::cout << msg << std::endl;
        }
  Teuchos::RCP<Epetra_CrsMatrix> rcpAT(AT);

  // calculate A^T Px
  Teuchos::RCP<Epetra_CrsMatrix> APexpl = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->DomainMap(),20));
  EpetraExt::MatrixMatrix::Multiply(*rcpAT,false,*P,false,*APexpl, *fos);
  //  APexpl->FillComplete(P->DomainMap(),rcpA->DomainMap()); // check me
  APexpl->Apply(*x,*b2);
  normx = 0.0;
  b2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "Epetra: ||A^T_expl P x||_1 = " << normx << std::endl;

  // calculate A^T Px
  Teuchos::RCP<Epetra_CrsMatrix> APimpl = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->RangeMap(),20));
  EpetraExt::MatrixMatrix::Multiply(*rcpA,true,*P,false,*APimpl, *fos);
  //  APimpl->FillComplete(P->DomainMap(),APimpl->RangeMap()); // check me
  APimpl->Apply(*x,*b2);
  normx = 0.0;
  b2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "Epetra: ||A^T_impl P x||_1 = " << normx << std::endl;


  ///////////////////////////////////////
  // Xpetra
  ///////////////////////////////////////


   // wrap original matrix to Matrix
   Teuchos::RCP<Xpetra::EpetraMap> xrgMap = Teuchos::rcp(new Xpetra::EpetraMap(rgMap));
   Teuchos::RCP<Xpetra::EpetraMap> xdoMap = Teuchos::rcp(new Xpetra::EpetraMap(doMap));

   Teuchos::RCP<Xpetra::EpetraCrsMatrix> Pop = Teuchos::rcp(new Xpetra::EpetraCrsMatrix(P) );
   Teuchos::RCP<Xpetra::CrsMatrix<double> > crsP = Teuchos::rcp_implicit_cast<Xpetra::CrsMatrix<double> >(Pop);
   Teuchos::RCP<Xpetra::CrsMatrixWrap<double> > crsPOp = Teuchos::rcp( new Xpetra::CrsMatrixWrap<double>(crsP) );
   crsPOp->fillComplete(xdoMap,xrgMap);

   Teuchos::RCP<Xpetra::EpetraCrsMatrix> Aop = Teuchos::rcp(new Xpetra::EpetraCrsMatrix(rcpA) );
   Teuchos::RCP<Xpetra::CrsMatrix<double> > crsA = Teuchos::rcp_implicit_cast<Xpetra::CrsMatrix<double> >(Aop);
   Teuchos::RCP<Xpetra::CrsMatrixWrap<double> > crsAOp = Teuchos::rcp( new Xpetra::CrsMatrixWrap<double>(crsA) );
   crsAOp->fillComplete(xrgMap,xrgMap);

   // wrap vectors
   Teuchos::RCP<Xpetra::EpetraVector> xx = Teuchos::rcp(new Xpetra::EpetraVector(x));
   Teuchos::RCP<Xpetra::EpetraVector> xx2 = Teuchos::rcp(new Xpetra::EpetraVector(x2));
   Teuchos::RCP<Xpetra::EpetraVector> bb1 = Teuchos::rcp(new Xpetra::EpetraVector(b));
   Teuchos::RCP<Xpetra::EpetraVector> bb2 = Teuchos::rcp(new Xpetra::EpetraVector(b2));

   bb1->putScalar(0.0);
   bb2->putScalar(0.0);

   //crsPOp->apply(*xx,*bb1);

   // if (Comm.MyPID() == 0) std::cout << "||Pop x||_1 = " << bb1->norm1() << std::endl;

   //Teuchos::RCP<Xpetra::Matrix<double> > crsPOpT = MueLu::Utils2<double>::Transpose(crsPOp,false);

   //crsPOpT->apply(*xx2,*bb2);
   //if (Comm.MyPID() == 0) std::cout << "||Pop^T x||_1 = " << bb2->norm1() << std::endl;

   // calculate APx
   Teuchos::RCP<Xpetra::Matrix<double> > crsAP = MueLu::Utils<double>::Multiply(*crsAOp,false,*crsPOp,false, *fos);
   //crsAP->describe(*fos,Teuchos::VERB_EXTREME);
   bb1->putScalar(0.0);
   crsAP->apply(*xx,*bb1);
   normx = bb1->norm1();
   if (Comm.MyPID() == 0) std::cout << "Xpetra: ||A Pop x||_1 = " << normx << std::endl;

   // calculate A^T P x explicitely
   Teuchos::RCP<Xpetra::Matrix<double> > crsAOpT = MueLu::Utils2<double>::Transpose(*crsAOp,  false);
   Teuchos::RCP<Xpetra::Matrix<double> > AtPexpl = MueLu::Utils<double> ::Multiply (*crsAOpT, false, *crsPOp, false, *fos);
   bb1->putScalar(0.0);
   AtPexpl->apply(*xx,*bb1);
   normx = bb1->norm1();
   if (Comm.MyPID() == 0)
       std::cout << "Xpetra: ||A^T_expl Pop x||_1 = " << normx << std::endl;

   // calculate A^T P x
   Teuchos::RCP<Xpetra::Matrix<double> > AtPimpl = MueLu::Utils<double>::Multiply(*crsAOp,true,*crsPOp,false, *fos);
   bb1->putScalar(0.0);
   AtPimpl->apply(*xx,*bb1);
   normx = bb1->norm1();
   if (Comm.MyPID() == 0)
       std::cout << "Xpetra: ||A^T_impl Pop x||_1 = " << normx << std::endl;


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;
}
Example #23
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30;
  Teuchos::ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Condest;
  
  Teuchos::RefCountPtr<Ifpack_CrsIct> ICT;
  ICT = Teuchos::rcp( new Ifpack_CrsIct(*A,DropTol,LevelFill) );
  ICT->SetAbsoluteThreshold(0.00123);
  ICT->SetRelativeThreshold(0.9876);
  // Init values from A
  ICT->InitValues(*A);
  // compute the factors
  ICT->Factor();
  // and now estimate the condition number
  ICT->Condest(false,Condest);
  
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  string label = "Ifpack_CrsIct Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  ICT->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  int Niters = 1200;

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*ICT);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int OldIters = solver.NumIters();

  // now rebuild the same preconditioner using ICT, we expect the same
  // number of iterations

  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("IC", &*A) );

  Teuchos::ParameterList List;
  List.get("fact: level-of-fill", 2);
  List.get("fact: drop tolerance", 0.3333);
  List.get("fact: absolute threshold", 0.00123);
  List.get("fact: relative threshold", 0.9876);
  List.get("fact: relaxation value", 0.0);

  IFPACK_CHK_ERR(Prec->SetParameters(List));
  IFPACK_CHK_ERR(Prec->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  solver.SetPrecOperator(&*Prec);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int NewIters = solver.NumIters();

  if (OldIters != NewIters)
    IFPACK_CHK_ERR(-1);

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}
Example #24
0
int main(int argc, char *argv[]) {

// standard Epetra MPI/Serial Comm startup	
#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif
  
  int MyPID = comm.MyPID();
  int ierr = 0;
  bool verbose = (0 == MyPID);
  bool reportErrors = (0 == MyPID);
  // setup MatlabEngine
  if (verbose) cout << "going to startup a matlab process...\n";
  EpetraExt::EpetraExt_MatlabEngine engine (comm);
  if (verbose) cout << "matlab started\n";
  
  // setup an array of doubles to be used for the examples
  int M = 20;
  int numGlobalElements = M * comm.NumProc();
  int N = 3;
  int numMyEntries = M * N;
  double* A = new double[numMyEntries];
  double* Aptr = A;
  int startValue = numMyEntries * MyPID;

  for(int col=0; col < N; col++) {
	for(int row=0; row < M; row++) {
          *Aptr++ = startValue++;
      }
  }

  // setup an array of ints to be used for the examples
  int* intA = new int[numMyEntries];
  int* intAptr = intA;
  int intStartValue = numMyEntries * MyPID;
  for(int i=0; i < M*N; i++) {
      *intAptr++ = intStartValue++;
  }
  
  // construct a map to be used by distributed objects
  Epetra_Map map (numGlobalElements, 0, comm);
  
  // CrsMatrix example
  // constructs a globally distributed CrsMatrix and then puts it into Matlab
  if (verbose) cout << " constructing CrsMatrix...\n";
  Epetra_CrsMatrix crsMatrix (Copy, map, N);
  int* indices = new int[N];
  for (int col=0; col < N; col++) {
    indices[col] = col;	  
  }
  
  double value = startValue;
  double* values = new double[numMyEntries];
  int minMyGID = map.MinMyGID();
  for (int row=0; row < M; row++) {
    for (int col=0; col < N; col++) {
      values[col] = value++;
    }
      
    crsMatrix.InsertGlobalValues(minMyGID + row, N, values, indices);
  }
  
  crsMatrix.FillComplete();
  if (verbose) cout << " CrsMatrix constructed\n";
  if (verbose) cout << " putting CrsMatrix into Matlab as CRSM\n";
  ierr = engine.PutRowMatrix(crsMatrix, "CRSM", false);
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutRowMatrix(crsMatrix, \"CRSM\", false): " << ierr << endl;
  }
  
  // BlockMap example
  // puts a map into Matlab
  if (verbose) cout << " putting Map into Matlab as MAP\n";
  ierr = engine.PutBlockMap(map, "MAP", false);
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutBlockMap(map, \"MAP\", false);: " << ierr << endl;
  }
  
  // MultiVector example
  // constructs a globally distributed MultiVector and then puts it into Matlab
  if (verbose) cout << " constructing MultiVector...\n";
  Epetra_MultiVector multiVector (Copy, map, A, M, N);
  if (verbose) cout << " MultiVector constructed\n";
  if (verbose) cout << " putting MultiVector into Matlab as MV\n";
  ierr = engine.PutMultiVector(multiVector, "MV");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutMultiVector(multiVector, \"MV\"): " << ierr << endl;
  }
  
  // SerialDenseMatrix example
  // constructs a SerialDenseMatrix on every PE
  if (verbose) cout << " constructing a SerialDenseMatrix...\n";
  Epetra_SerialDenseMatrix sdMatrix (Copy, A, M, M, N);
  if (verbose) cout << " SerialDenseMatrix constructed\n";
  if (verbose) cout << " putting SerialDenseMatrix from PE0 into Matlab as SDM_PE0\n";
  // since the third parameter is left out, the SerialDenseMatrix from PE0 is used by default
  ierr = engine.PutSerialDenseMatrix(sdMatrix, "SDM_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdMatrix, \"SDM_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting SerialDenseMatrix from PE1 into Matlab as SDM_PE1\n";
    // specifying 1 as the third parameter will put the SerialDenseMatrix from PE1 into Matlab
    ierr = engine.PutSerialDenseMatrix(sdMatrix, "SDM_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdMatrix, \"SDM_PE1\", 1): " << ierr << endl;
    }
  }
  

  // SerialDenseVector example
  // constructs a SerialDenseVector on every PE
  if (verbose) cout << " constructing a SerialDenseVector...\n";
  Epetra_SerialDenseVector sdVector (Copy, A, M);
  if (verbose) cout << " SerialDenseVector constructed\n";
  // since the third parameter is left out, the SerialDenseMatrix from PE0 is used by default
  if (verbose) cout << " putting SerialDenseVector from PE0 into Matlab as SDV_PE0\n";
  ierr = engine.PutSerialDenseMatrix(sdVector, "SDV_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdVector, \"SDV_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting SerialDenseVector from PE1 into Matlab as SDV_PE1\n";
    // specifying 1 as the third parameter will put the SerialDenseVector from PE1 into Matlab
    ierr = engine.PutSerialDenseMatrix(sdVector, "SDV_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(sdMatrix, \"SDV_PE1\", 1): " << ierr << endl;
    }
  }

  // IntSerialDenseMatrix example
  // constructs a IntSerialDenseMatrix on every PE
  if (verbose) cout << " constructing a IntSerialDenseMatrix...\n";
  Epetra_IntSerialDenseMatrix isdMatrix (Copy, intA, M, M, N);
  if (verbose) cout << " IntSerialDenseMatrix constructed\n";
  // since the third parameter is left out, the IntSerialDenseMatrix from PE0 is used by default
  if (verbose) cout << " putting IntSerialDenseMatrix from PE0 into Matlab as ISDM_PE0\n";
  ierr = engine.PutIntSerialDenseMatrix(isdMatrix, "ISDM_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutIntSerialDenseMatrix(isdMatrix, \"ISDM_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting IntSerialDenseMatrix from PE1 into Matlab as ISDM_PE1\n";
    // specifying 1 as the third parameter will put the IntSerialDenseMatrix from PE1 into Matlab
    ierr = engine.PutIntSerialDenseMatrix(isdMatrix, "ISDM_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(isdMatrix, \"ISDM_PE1\", 1): " << ierr << endl;
    }
  }


  // IntSerialDenseVector example
  // constructs a IntSerialDenseVector on every PE
  if (verbose) cout << " constructing a IntSerialDenseVector...\n";
  Epetra_IntSerialDenseVector isdVector (Copy, intA, M);
  if (verbose) cout << " IntSerialDenseVector constructed\n";
  // since the third parameter is left out, the IntSerialDenseVector from PE0 is used by default
  if (verbose) cout << " putting IntSerialDenseVector from PE0 into Matlab as ISDV_PE0\n";
  ierr = engine.PutIntSerialDenseMatrix(isdVector, "ISDV_PE0");
  if (ierr) {
    if (reportErrors) cout << "There was an error in engine.PutIntSerialDenseMatrix(isdVector, \"ISDV_PE0\"): " << ierr << endl;
  }
  if (comm.NumProc() > 1) {
    if (verbose) cout << " putting IntSerialDenseVector from PE1 into Matlab as ISDV_PE1\n";
    // specifying 1 as the third parameter will put the IntSerialDenseVector from PE1 into Matlab
    ierr = engine.PutIntSerialDenseMatrix(isdVector, "ISDV_PE1", 1);
    if (ierr) {
      if (reportErrors) cout << "There was an error in engine.PutSerialDenseMatrix(isdVector, \"ISDV_PE1\", 1): " << ierr << endl;
    }
  }
  
  // entering a while loop on PE0 will keep the Matlab workspace alive
  /*
  if (MyPID == 0)
  while(1) {
    // do nothing
  }
  */

  const int bufSize = 200;
  char s [bufSize];
  const int matlabBufferSize = 1024 * 16;
  char matlabBuffer [matlabBufferSize];
  
  // send some commands to Matlab and output the result to stdout
  engine.EvalString("whos", matlabBuffer, matlabBufferSize);
  if (verbose) cout << matlabBuffer << endl;
  engine.EvalString("SDV_PE0", matlabBuffer, matlabBufferSize);
  if (verbose) cout << matlabBuffer << endl;
  if (comm.NumProc() > 1) {
    engine.EvalString("SDV_PE1", matlabBuffer, matlabBufferSize);
    if (verbose) cout << matlabBuffer << endl;
  }
  
  // the following allows user interaction with Matlab
  if (MyPID == 0)
  while(1) {
      // Prompt the user and get a string
      printf(">> ");
      if (fgets(s, bufSize, stdin) == NULL) {
          printf("Bye\n");
          break ;
      }
      printf ("command :%s:\n", s) ;
      
      // send the command to MATLAB
      // output goes to stdout
      ierr = engine.EvalString(s, matlabBuffer, matlabBufferSize);
      if (ierr != 0) {
          printf("there was an error: %d", ierr);
		  ierr = 0;
      }
      else {
      	  printf("Matlab Output:\n%s", matlabBuffer);
      }
  }
  
  if (verbose) cout << endl << " all done\n";

// standard finalizer for Epetra MPI Comms
#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  // we need to delete engine because the MatlabEngine finalizer shuts down the Matlab process associated with this example
  // if we don't delete the Matlab engine, then this example application will not shut down properly
  delete &engine;
  return(0);
}
Example #25
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  // matrix downloaded from MatrixMarket
  char FileName[] = "../HBMatrices/fidap005.rua";

  Epetra_Map * readMap; // Pointers because of Trilinos_Util_ReadHb2Epetra
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  // Call routine to read in HB problem
  Trilinos_Util_ReadHb2Epetra(FileName, Comm, readMap, readA, readx, 
			      readb, readxexact);

  int NumGlobalElements = readMap->NumGlobalElements();

  // Create uniform distributed map
  Epetra_Map map(NumGlobalElements, 0, Comm);

  // Create Exporter to distribute read-in matrix and vectors

  Epetra_Export exporter(*readMap, map);
  Epetra_CrsMatrix A(Copy, map, 0);
  Epetra_Vector x(map);
  Epetra_Vector b(map);
  Epetra_Vector xexact(map);

  Epetra_Time FillTimer(Comm);
  A.Export(*readA, exporter, Add);
  x.Export(*readx, exporter, Add);
  b.Export(*readb, exporter, Add);
  xexact.Export(*readxexact, exporter, Add);

  A.FillComplete();
  
  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;
  
  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  //  modify those parameters 
  int    LevelFill = 1;
  double DropTol = 0.0;
  double Condest;
  
  Ifpack_CrsIct * ICT = NULL;
  ICT = new Ifpack_CrsIct(A,DropTol,LevelFill);
  // Init values from A
  ICT->InitValues(A);
  // compute the factors
  ICT->Factor();
  // and now estimate the condition number
  ICT->Condest(false,Condest);
  
  cout << Condest << endl;
    
  if( Comm.MyPID() == 0 ) {
    cout << "Condition number estimate (level-of-fill = "
	 << LevelFill <<  ") = " << Condest << endl;
  }

  // Define label for printing out during the solve phase
  string label = "Ifpack_CrsIct Preconditioner: LevelFill = " + toString(LevelFill) + 
                                                 " Overlap = 0"; 
  ICT->SetLabel(label.c_str());
  
  // Here we create an AztecOO object
  AztecOO solver;
  solver.SetUserMatrix(&A);
  solver.SetLHS(&x);
  solver.SetRHS(&b);
  solver.SetAztecOption(AZ_solver,AZ_cg);
  
  // Here we set the IFPACK preconditioner and specify few parameters
  
  solver.SetPrecOperator(ICT);

  int Niters = 1200;
  solver.SetAztecOption(AZ_kspace, Niters);
  solver.SetAztecOption(AZ_output, 20); 
  solver.Iterate(Niters, 5.0e-5);

  if (ICT!=0) delete ICT;
				       
#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

return 0 ;
}
int
main (int argc, char *argv[])
{
  using Teuchos::RCP;
  using Teuchos::rcp;
  using std::cerr;
  using std::cout;
  using std::endl;
  // Anasazi solvers have the following template parameters:
  //
  //   - Scalar: The type of dot product results.
  //   - MV: The type of (multi)vectors.
  //   - OP: The type of operators (functions from multivector to
  //     multivector).  A matrix (like Epetra_CrsMatrix) is an example
  //     of an operator; an Ifpack preconditioner is another example.
  //
  // Here, Scalar is double, MV is Epetra_MultiVector, and OP is
  // Epetra_Operator.
  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, MV> MVT;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init (&argc, &argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif // EPETRA_MPI

  const int MyPID = Comm.MyPID ();

  //
  // Set up the test problem
  //

  // Dimensionality of the spatial domain to discretize
  const int space_dim = 2;

  // Size of each of the dimensions of the (discrete) domain
  std::vector<double> brick_dim (space_dim);
  brick_dim[0] = 1.0;
  brick_dim[1] = 1.0;

  // Number of elements in each of the dimensions of the domain
  std::vector<int> elements (space_dim);
  elements[0] = 10;
  elements[1] = 10;

  // Create the test problem.
  RCP<ModalProblem> testCase =
    rcp (new ModeLaplace2DQ2 (Comm, brick_dim[0], elements[0],
                              brick_dim[1], elements[1]));

  // Get the stiffness and mass matrices.
  //
  // rcp (T*, false) returns a nonowning (doesn't deallocate) RCP.
  RCP<Epetra_CrsMatrix> K =
    rcp (const_cast<Epetra_CrsMatrix* > (testCase->getStiffness ()), false);
  RCP<Epetra_CrsMatrix> M =
    rcp (const_cast<Epetra_CrsMatrix* > (testCase->getMass ()), false);

  //
  // Create linear solver for linear systems with K
  //
  // Anasazi uses shift and invert, with a "shift" of zero, to find
  // the eigenvalues of least magnitude.  In this example, we
  // implement the "invert" part of shift and invert by using an
  // Amesos direct solver.
  //

  // Create Epetra linear problem class for solving linear systems
  // with K.  This implements the inverse operator for shift and
  // invert.
  Epetra_LinearProblem AmesosProblem;
  // Tell the linear problem about the matrix K.  Epetra_LinearProblem
  // doesn't know about RCP, so we have to give it a raw pointer.
  AmesosProblem.SetOperator (K.get ());

  // Create Amesos factory and solver for solving linear systems with
  // K.  The solver uses the KLU library to do a sparse LU
  // factorization.
  //
  // Note that the AmesosProblem object "absorbs" K.  Anasazi doesn't
  // see K, just the operator that implements K^{-1} M.
  Amesos amesosFactory;
  RCP<Amesos_BaseSolver> AmesosSolver;

  // Amesos can interface to many different solvers.  The following
  // loop picks a solver that Amesos supports.  The loop order
  // reflects solver preference, only in the sense that using LAPACK
  // here is a suboptimal fall-back.  (With the LAPACK option, Amesos
  // makes a dense version of the sparse matrix and uses LAPACK to
  // compute the factorization.  The other options are true sparse
  // direct factorizations.)
  const int numSolverNames = 9;
  const char* solverNames[9] = {
    "Klu", "Umfpack", "Superlu", "Superludist", "Mumps",
    "Paradiso", "Taucs", "CSparse", "Lapack"
  };
  for (int k = 0; k < numSolverNames; ++k) {
    const char* const solverName = solverNames[k];
    if (amesosFactory.Query (solverName)) {
      AmesosSolver = rcp (amesosFactory.Create (solverName, AmesosProblem));
      if (MyPID == 0) {
        cout << "Amesos solver: \"" << solverName << "\"" << endl;
      }
    }
  }
  if (AmesosSolver.is_null ()) {
    throw std::runtime_error ("Amesos appears not to have any solvers enabled.");
  }

  // The AmesosGenOp class assumes that the symbolic and numeric
  // factorizations have already been performed on the linear problem.
  AmesosSolver->SymbolicFactorization ();
  AmesosSolver->NumericFactorization ();

  //
  // Set parameters for the block Krylov-Schur eigensolver
  //

  double tol = 1.0e-8; // convergence tolerance
  int nev = 10; // number of eigenvalues for which to solve
  int blockSize = 3; // block size (number of eigenvectors processed at once)
  int numBlocks = 3 * nev / blockSize; // restart length
  int maxRestarts = 5; // maximum number of restart cycles

  // We're looking for the largest-magnitude eigenvalues of the
  // _inverse_ operator, thus, the smallest-magnitude eigenvalues of
  // the original operator.
  std::string which = "LM";
  int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;

  // Create ParameterList to pass into eigensolver
  Teuchos::ParameterList MyPL;
  MyPL.set ("Verbosity", verbosity);
  MyPL.set ("Which", which);
  MyPL.set ("Block Size", blockSize);
  MyPL.set ("Num Blocks", numBlocks);
  MyPL.set ("Maximum Restarts", maxRestarts);
  MyPL.set ("Convergence Tolerance", tol);

  // Create an initial set of vectors to start the eigensolver.  Note:
  // This needs to have the same number of columns as the block size.
  RCP<MV> ivec = rcp (new MV (K->Map (), blockSize));
  ivec->Random ();

  // Create the Epetra_Operator for the spectral transformation using
  // the Amesos direct solver.
  //
  // The AmesosGenOp object is the operator we give to Anasazi.  Thus,
  // Anasazi just sees an operator that computes y = K^{-1} M x.  The
  // matrix K got absorbed into AmesosProblem (the
  // Epetra_LinearProblem object).  Later, when we set up the Anasazi
  // eigensolver, we will need to tell it about M, so that it can
  // orthogonalize basis vectors with respect to the inner product
  // defined by M (since it is symmetric positive definite).
  RCP<AmesosGenOp> Aop = rcp (new AmesosGenOp (AmesosSolver, M));

  // Create the eigenproblem.  This object holds all the stuff about
  // your problem that Anasazi will see.
  //
  // Anasazi only needs M so that it can orthogonalize basis vectors
  // with respect to the M inner product.  Wouldn't it be nice if
  // Anasazi didn't require M in two different places?  Alas, this is
  // not currently the case.
  RCP<Anasazi::BasicEigenproblem<double,MV,OP> > MyProblem =
    rcp (new Anasazi::BasicEigenproblem<double,MV,OP> (Aop, M, ivec));

  // Tell the eigenproblem that the matrix pencil (K,M) is symmetric.
  MyProblem->setHermitian (true);

  // Set the number of eigenvalues requested
  MyProblem->setNEV (nev);

  // Tell the eigenproblem that you are finished passing it information.
  const bool boolret = MyProblem->setProblem ();
  if (boolret != true) {
    if (MyPID == 0) {
      cerr << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
    }
#ifdef EPETRA_MPI
    MPI_Finalize ();
#endif // EPETRA_MPI
    return -1;
  }

  // Create the Block Krylov-Schur eigensolver.
  Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr (MyProblem, MyPL);

  // Solve the eigenvalue problem.
  //
  // Note that creating the eigensolver is separate from solving it.
  // After creating the eigensolver, you may call solve() multiple
  // times with different parameters or initial vectors.  This lets
  // you reuse intermediate state, like allocated basis vectors.
  Anasazi::ReturnType returnCode = MySolverMgr.solve ();
  if (returnCode != Anasazi::Converged && MyPID == 0) {
    cout << "Anasazi eigensolver did not converge." << endl;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem.
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution ();
  // Anasazi returns eigenvalues as Anasazi::Value, so that if
  // Anasazi's Scalar type is real-valued (as it is in this case), but
  // some eigenvalues are complex, you can still access the
  // eigenvalues correctly.  In this case, there are no complex
  // eigenvalues, since the matrix pencil is symmetric.
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  RCP<MV> evecs = sol.Evecs;
  int numev = sol.numVecs;

  if (numev > 0) {
    // Reconstruct the eigenvalues.  The ones that Anasazi gave back
    // are the inverses of the original eigenvalues.  Reconstruct the
    // eigenvectors too.
    MV tempvec (K->Map (), MVT::GetNumberVecs (*evecs));
    K->Apply (*evecs, tempvec);
    Teuchos::SerialDenseMatrix<int,double> dmatr (numev, numev);
    MVT::MvTransMv (1.0, tempvec, *evecs, dmatr);

    if (MyPID == 0) {
      double compeval = 0.0;
      cout.setf (std::ios_base::right, std::ios_base::adjustfield);
      cout << "Actual Eigenvalues (obtained by Rayleigh quotient) : " << endl;
      cout << "------------------------------------------------------" << endl;
      cout << std::setw(16) << "Real Part"
           << std::setw(16) << "Rayleigh Error" << endl;
      cout << "------------------------------------------------------" << endl;
      for (int i = 0; i < numev; ++i) {
        compeval = dmatr(i,i);
        cout << std::setw(16) << compeval
             << std::setw(16)
             << std::fabs (compeval - 1.0/evals[i].realpart)
             << endl;
      }
      cout << "------------------------------------------------------" << endl;
    }
  }

#ifdef EPETRA_MPI
  MPI_Finalize ();
#endif // EPETRA_MPI

  return 0;
}
Example #27
0
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int MyPID = comm.MyPID();

  bool verbose = false;
  bool verbose1 = false; 
  // Check if we should print results to standard out
  if (argc > 1) {
    if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
      verbose1 = true;
      if (MyPID==0) verbose = true;
    }
  }
  if (verbose)
    std::cout << EpetraExt::EpetraExt_Version() << std::endl << std::endl;

  if (verbose1) std::cout << comm << std::endl;


  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //comm.Barrier();

  Epetra_Map * map;
  Epetra_CrsMatrix * A; 
  Epetra_Vector * x; 
  Epetra_Vector * b;
  Epetra_Vector * xexact;

  int nx = 20*comm.NumProc();
  int ny = 30;
  int npoints = 7;
  int xoff[] = {-1,  0,  1, -1,  0,  1,  0};
  int yoff[] = {-1, -1, -1,  0,  0,  0,  1};

   
  int ierr = 0;
  // Call routine to read in HB problem 0-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  // Call routine to read in HB problem 1-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact, 1);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  // Call routine to read in HB problem -1-base
  Trilinos_Util_GenerateCrsProblem(nx, ny, npoints, xoff, yoff, comm, map, A, x, b, xexact, -1);

  ierr += runTests(*map, *A, *x, *b, *xexact, verbose);

  delete A;
  delete x;
  delete b;
  delete xexact;
  delete map;

  int nx1 = 5;
  int ny1 = 4;
  Poisson2dOperator Op(nx1, ny1, comm);
  ierr += runOperatorTests(Op, verbose);

  generateHyprePrintOut("MyMatrixFile", comm);

  EPETRA_CHK_ERR(EpetraExt::HypreFileToCrsMatrix("MyMatrixFile", comm, A));
  
  runHypreTest(*A);
  delete A;

  #ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

  return(ierr);
}
Example #28
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();
  bool verbose = false; 
  if (MyPID==0) verbose = true;

  Teuchos::ParameterList GaleriList;
  int nx = 30; 

  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );
  Teuchos::RefCountPtr<Epetra_MultiVector> LHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  Teuchos::RefCountPtr<Epetra_MultiVector> RHS = Teuchos::rcp( new Epetra_MultiVector(*Map, 1) );
  LHS->PutScalar(0.0); RHS->Random();

  // ============================ //
  // Construct ILU preconditioner //
  // ---------------------------- //

  // I wanna test funky values to be sure that they have the same
  // influence on the algorithms, both old and new
  int    LevelFill = 2;
  double DropTol = 0.3333;
  double Athresh = 0.0123;
  double Rthresh = 0.9876;
  double Relax   = 0.1;
  int    Overlap = 2;
  
  Teuchos::RefCountPtr<Ifpack_IlukGraph> Graph;
  Teuchos::RefCountPtr<Ifpack_CrsRiluk> RILU;

  Graph = Teuchos::rcp( new Ifpack_IlukGraph(A->Graph(), LevelFill, Overlap) );
  int ierr;
  ierr = Graph->ConstructFilledGraph();
  IFPACK_CHK_ERR(ierr);

  RILU = Teuchos::rcp( new Ifpack_CrsRiluk(*Graph) );
  RILU->SetAbsoluteThreshold(Athresh);
  RILU->SetRelativeThreshold(Rthresh);
  RILU->SetRelaxValue(Relax);
  int initerr = RILU->InitValues(*A);
  if (initerr!=0) cout << Comm << "*ERR* InitValues = " << initerr;

  RILU->Factor();

  // Define label for printing out during the solve phase
  string label = "Ifpack_CrsRiluk Preconditioner: LevelFill = " + toString(LevelFill) +
                                                 " Overlap = " + toString(Overlap) +
                                                 " Athresh = " + toString(Athresh) +
                                                 " Rthresh = " + toString(Rthresh);
  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  int Niters = 1200;

  AztecOO solver;
  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*RILU);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int OldIters = solver.NumIters();

  // now rebuild the same preconditioner using RILU, we expect the same
  // number of iterations

  Ifpack Factory;
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create("ILU", &*A, Overlap) );

  Teuchos::ParameterList List;
  List.get("fact: level-of-fill", LevelFill);
  List.get("fact: drop tolerance", DropTol);
  List.get("fact: absolute threshold", Athresh);
  List.get("fact: relative threshold", Rthresh);
  List.get("fact: relax value", Relax);

  IFPACK_CHK_ERR(Prec->SetParameters(List));
  IFPACK_CHK_ERR(Prec->Compute());

  // Here we create an AztecOO object
  LHS->PutScalar(0.0);

  solver.SetUserMatrix(&*A);
  solver.SetLHS(&*LHS);
  solver.SetRHS(&*RHS);
  solver.SetAztecOption(AZ_solver,AZ_gmres);
  solver.SetPrecOperator(&*Prec);
  solver.SetAztecOption(AZ_output, 16); 
  solver.Iterate(Niters, 5.0e-5);

  int NewIters = solver.NumIters();

  if (OldIters != NewIters)
    IFPACK_CHK_ERR(-1);


#ifdef HAVE_IFPACK_SUPERLU
  // Now test w/ SuperLU's ILU, if we've got it
  Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec2 = Teuchos::rcp( Factory.Create("SILU", &*A,0) ); 
  Teuchos::ParameterList SList;
  SList.set("fact: drop tolerance",1e-4);
  SList.set("fact: zero pivot threshold",.1);
  SList.set("fact: maximum fill factor",10.0);
  // NOTE: There is a bug in SuperLU 4.0 which will crash the code if the maximum fill factor is set too low.
  // This bug was reported to Sherry Li on 4/8/10.
  SList.set("fact: silu drop rule",9);

  IFPACK_CHK_ERR(Prec2->SetParameters(SList));
  IFPACK_CHK_ERR(Prec2->Compute());

  LHS->PutScalar(0.0);
  solver.SetPrecOperator(&*Prec2);
  solver.Iterate(Niters, 5.0e-5);
  Prec2->Print(cout);

#endif


#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(EXIT_SUCCESS);
}