コード例 #1
0
ファイル: ex1.cpp プロジェクト: cakeisalie/oomphlib_003
int main(int argc, char *argv[]) {
    
#ifdef HAVE_MPI
  // Initialize MPI and setup an Epetra communicator
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  // If we aren't using MPI, then setup a serial communicator.
  Epetra_SerialComm Comm;
#endif

  // Some typedefs for oft-used data types
  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Belos::MultiVecTraits<double, Epetra_MultiVector> MVT;

  bool ierr;

  // Get our processor ID (0 if running serial)
  int MyPID = Comm.MyPID();

  // Verbose flag: only processor 0 should print
  bool verbose = (MyPID==0);

  // Initialize a Gallery object, from which we will select a matrix
  // Select a 2-D laplacian, of order 100
  Trilinos_Util::CrsMatrixGallery Gallery("laplace_2d", Comm);
  Gallery.Set("problem_size", 100);

  // Say hello and print some information about the gallery matrix
  if (verbose) {
    cout << "Belos Example: Block CG" << endl;
    cout << "Problem info:" << endl;
    cout << Gallery;
    cout << endl;
  }

  // Setup some more problem/solver parameters:

  // Block size
  int blocksize = 4;

  // Get a pointer to the system matrix, inside of a Teuchos::RCP
  // The Teuchos::RCP is a reference counting pointer that handles
  // garbage collection for us, so that we can perform memory allocation without
  // having to worry about freeing memory manually.
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Gallery.GetMatrix(), false );

  // Create an Belos MultiVector, based on Epetra MultiVector
  const Epetra_Map * Map = &(A->RowMap());
  Teuchos::RCP<Epetra_MultiVector> B = 
    Teuchos::rcp( new Epetra_MultiVector(*Map,blocksize) );
  Teuchos::RCP<Epetra_MultiVector> X = 
    Teuchos::rcp( new Epetra_MultiVector(*Map,blocksize) );

  // Initialize the solution with zero and right-hand side with random entries
  X->PutScalar( 0.0 );
  B->Random();

  // Setup the linear problem, with the matrix A and the vectors X and B
  Teuchos::RCP< Belos::LinearProblem<double,MV,OP> > myProblem = 
    Teuchos::rcp( new Belos::LinearProblem<double,MV,OP>(A, X, B) );

  // The 2-D laplacian is symmetric. Specify this in the linear problem.
  myProblem->setHermitian();

  // Signal that we are done setting up the linear problem
  ierr = myProblem->setProblem();

  // Check the return from setProblem(). If this is true, there was an
  // error. This probably means we did not specify enough information for
  // the eigenproblem.
  assert(ierr == true);

  // Specify the verbosity level. Options include:
  // Belos::Errors 
  //   This option is always set
  // Belos::Warnings 
  //   Warnings (less severe than errors)
  // Belos::IterationDetails 
  //   Details at each iteration, such as the current eigenvalues
  // Belos::OrthoDetails 
  //   Details about orthogonality
  // Belos::TimingDetails
  //   A summary of the timing info for the solve() routine
  // Belos::FinalSummary 
  //   A final summary 
  // Belos::Debug 
  //   Debugging information
  int verbosity = Belos::Warnings + Belos::Errors + Belos::FinalSummary + Belos::TimingDetails;

  // Create the parameter list for the eigensolver
  Teuchos::RCP<Teuchos::ParameterList> myPL = Teuchos::rcp( new Teuchos::ParameterList() );
  myPL->set( "Verbosity", verbosity );
  myPL->set( "Block Size", blocksize );
  myPL->set( "Maximum Iterations", 100 );
  myPL->set( "Convergence Tolerance", 1.0e-8 );

  // Create the Block CG solver
  // This takes as inputs the linear problem and the solver parameters
  Belos::BlockCGSolMgr<double,MV,OP> mySolver(myProblem, myPL);

  // Solve the linear problem, and save the return code
  Belos::ReturnType solverRet = mySolver.solve();

  // Check return code of the solver: Unconverged, Failed, or OK
  switch (solverRet) {

  // UNCONVERGED
  case Belos::Unconverged:
    if (verbose) 
      cout << "Belos::BlockCGSolMgr::solve() did not converge!" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
    break;

  // CONVERGED
  case Belos::Converged:
    if (verbose) 
      cout << "Belos::BlockCGSolMgr::solve() converged!" << endl;
    break;
  }

  // Test residuals
  Epetra_MultiVector R( B->Map(), blocksize );

  // R = A*X
  A->Apply( *X, R );

  // R -= B 
  MVT::MvAddMv( -1.0, *B, 1.0, R, R );

  // Compute the 2-norm of each vector in the MultiVector
  // and store them to a std::vector<double>
  std::vector<double> normR(blocksize), normB(blocksize);
  MVT::MvNorm( R, normR );
  MVT::MvNorm( *B, normB );

  // Output results to screen
  if(verbose) {
    cout << scientific << setprecision(6) << showpoint;
    cout << "******************************************************\n"
         << "           Results (outside of linear solver)           \n" 
         << "------------------------------------------------------\n"
         << "  Linear System\t\tRelative Residual\n"
         << "------------------------------------------------------\n";
    for( int i=0 ; i<blocksize ; ++i ) {
      cout << "  " << i+1 << "\t\t\t" << normR[i]/normB[i] << endl;
    }
    cout << "******************************************************\n" << endl;
  }


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
コード例 #2
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30; 
  GaleriList.set("n", nx * nx);
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx);
  Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Linear", Comm, GaleriList) );
  Teuchos::RCP<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  //  Variables used for the Block Davidson Method
  const int    nev         = 4;
  const int    blockSize   = 5;
  const int    numBlocks   = 8;
  const int    maxRestarts = 100;
  const double tol         = 1.0e-8;

  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, Epetra_MultiVector> MVT;

  // Create an Epetra_MultiVector for an initial vector to start the solver.
  // Note:  This needs to have the same number of columns as the blocksize.
  //
  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(*Map, blockSize) );
  ivec->Random();

  // Create the eigenproblem.
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > problem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(A, ivec) );

  // Inform the eigenproblem that the operator A is symmetric
  problem->setHermitian(true);

  // Set the number of eigenvalues requested
  problem->setNEV( nev );

  // Inform the eigenproblem that you are finishing passing it information
  bool boolret = problem->setProblem();
  if (boolret != true) {
    std::cout<<"Anasazi::BasicEigenproblem::setProblem() returned an error." << std::endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return -1;
  }

  // Create parameter list to pass into the solver manager
  Teuchos::ParameterList anasaziPL;
  anasaziPL.set( "Which", "LM" );
  anasaziPL.set( "Block Size", blockSize );
  anasaziPL.set( "Maximum Iterations", 500 );
  anasaziPL.set( "Convergence Tolerance", tol );
  anasaziPL.set( "Verbosity", Anasazi::Errors+Anasazi::Warnings+Anasazi::TimingDetails+Anasazi::FinalSummary );

  // Create the solver manager
  Anasazi::LOBPCGSolMgr<double, MV, OP> anasaziSolver(problem, anasaziPL);

  // Solve the problem
  Anasazi::ReturnType returnCode = anasaziSolver.solve();

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = problem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;

  // Compute residuals.
  std::vector<double> normR(sol.numVecs);
  if (sol.numVecs > 0) {
    Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
    Epetra_MultiVector tempAevec( *Map, sol.numVecs );
    T.putScalar(0.0); 
    for (int i=0; i<sol.numVecs; i++) {
      T(i,i) = evals[i].realpart;
    }
    A->Apply( *evecs, tempAevec );
    MVT::MvTimesMatAddMv( -1.0, *evecs, T, 1.0, tempAevec );
    MVT::MvNorm( tempAevec, normR );
  }

  // Print the results
  std::cout<<"Solver manager returned " << (returnCode == Anasazi::Converged ? "converged." : "unconverged.") << std::endl;
  std::cout<<std::endl;
  std::cout<<"------------------------------------------------------"<<std::endl;
  std::cout<<std::setw(16)<<"Eigenvalue"
           <<std::setw(18)<<"Direct Residual"
           <<std::endl;
  std::cout<<"------------------------------------------------------"<<std::endl;
  for (int i=0; i<sol.numVecs; i++) {
    std::cout<<std::setw(16)<<evals[i].realpart
             <<std::setw(18)<<normR[i]/evals[i].realpart
             <<std::endl;
  }
  std::cout<<"------------------------------------------------------"<<std::endl;

#ifdef HAVE_MPI
  MPI_Finalize() ; 
#endif

  return 0;
}