int
AmesosGenOp::Apply (const Epetra_MultiVector& X, Epetra_MultiVector& Y) const
{
  if (problem_ == NULL) {
    throw std::logic_error ("AmesosGenOp::Apply: problem_ is NULL");
  }
  if (massMtx_.is_null ()) {
    throw std::logic_error ("AmesosGenOp::Apply: massMtx_ is null");
  }
  if (solver_.is_null ()) {
    throw std::logic_error ("AmesosGenOp::Apply: solver_ is null");
  }

  if (! useTranspose_) {
    // Storage for M*X
    Epetra_MultiVector MX (X.Map (), X.NumVectors ());

    // Apply M*X
    massMtx_->Apply (X, MX);
    Y.PutScalar (0.0);

    // Set the LHS and RHS
    problem_->SetRHS (&MX);
    problem_->SetLHS (&Y);

    // Solve the linear system A*Y = MX
    solver_->Solve ();
  }
  else { // apply the transposed operator
    // Storage for A^{-T}*X
    Epetra_MultiVector ATX (X.Map (), X.NumVectors ());
    Epetra_MultiVector tmpX = const_cast<Epetra_MultiVector&> (X);

    // Set the LHS and RHS
    problem_->SetRHS (&tmpX);
    problem_->SetLHS (&ATX);

    // Solve the linear system A^T*Y = X
    solver_->Solve ();

    // Apply M*ATX
    massMtx_->Apply (ATX, Y);
  }

  return 0; // the method completed correctly
}
int AmesosGenOp::Apply(const Epetra_MultiVector& X, Epetra_MultiVector& Y ) const 
{
  if (!useTranspose_) {
    
    // Storage for M*X
    Epetra_MultiVector MX(X.Map(),X.NumVectors());
    
    // Apply M*X
    massMtx_->Apply(X, MX);
    Y.PutScalar(0.0);
    
    // Set the LHS and RHS
    problem_->SetRHS(&MX);
    problem_->SetLHS(&Y);

    // Solve the linear system A*Y = MX
    solver_->Solve();
  }
  else {
    // Storage for A^{-T}*X
    Epetra_MultiVector ATX(X.Map(),X.NumVectors());
    Epetra_MultiVector tmpX = const_cast<Epetra_MultiVector&>(X);
    
    // Set the LHS and RHS
    problem_->SetRHS(&tmpX);
    problem_->SetLHS(&ATX);
    
    // Solve the linear system A^T*Y = X 
    solver_->Solve();
    
    // Apply M*ATX
    massMtx_->Apply(ATX, Y);
  }
  
  return 0;
}
  void apply_block(const int i, const int j, Teuchos::RCP<Epetra_Vector>& output)
  {
    const Teuchos::RCP<const Epetra_Operator> block = blocked_op->GetBlock(i,j);
    Epetra_Vector testvec(block->OperatorDomainMap());
    int num_entries = block->OperatorDomainMap().NumMyElements();
    std::vector<int> indices(num_entries);
    for(int i = 0; i != num_entries; ++i)
      indices[i] = i;
    if(j ==0)
      testvec.ReplaceMyValues(num_entries, &u_test_vec[0], &indices[0]);
    else
      testvec.ReplaceMyValues(num_entries, &p_test_vec[0], &indices[0]);

    output = Teuchos::rcp(new Epetra_Vector(block->OperatorRangeMap()));
    block->Apply(testvec, *output);
  }
int AmesosBucklingOp::Apply(const Epetra_MultiVector& X, Epetra_MultiVector& Y ) const 
{
    
  // Storage for A*X
  Epetra_MultiVector AX(X.Map(),X.NumVectors());
    
  // Apply A*X
  stiffMtx_->Apply(X, AX);
  Y.PutScalar(0.0);
    
  // Set the LHS and RHS
  problem_->SetRHS(&AX);
  problem_->SetLHS(&Y);

  // Solve the linear system (A-sigma*M)*Y = AX
  solver_->Solve();
  
  return 0;
}
void 
QCAD::GenEigensolver::evalModel(const InArgs& inArgs,
			const OutArgs& outArgs ) const
{  
  // type definitions
  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, Epetra_MultiVector> MVT;
  
  // Get the stiffness and mass matrices
  InArgs model_inArgs = model->createInArgs();
  OutArgs model_outArgs = model->createOutArgs();

  //input args
  model_inArgs.set_t(0.0);

  Teuchos::RCP<const Epetra_Vector> x = model->get_x_init();
  Teuchos::RCP<const Epetra_Vector> x_dot = model->get_x_dot_init();
  model_inArgs.set_x(x);
  model_inArgs.set_x_dot(x_dot);

  model_inArgs.set_alpha(0.0);
  model_inArgs.set_beta(1.0);

  for(int i=0; i<model_num_p; i++)
    model_inArgs.set_p(i, inArgs.get_p(i));
  
  //output args
  Teuchos::RCP<Epetra_CrsMatrix> K = 
    Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(model->create_W(), true);
  model_outArgs.set_W(K); 

  model->evalModel(model_inArgs, model_outArgs); //compute K matrix

  // reset alpha and beta to compute the mass matrix
  model_inArgs.set_alpha(1.0);
  model_inArgs.set_beta(0.0);
  Teuchos::RCP<Epetra_CrsMatrix> M = 
    Teuchos::rcp_dynamic_cast<Epetra_CrsMatrix>(model->create_W(), true);
  model_outArgs.set_W(M); 

  model->evalModel(model_inArgs, model_outArgs); //compute M matrix

  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->OperatorDomainMap(), blockSize) );
  ivec->Random();

  // Create the eigenproblem.
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > eigenProblem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(K, M, ivec) );

  // Inform the eigenproblem that the operator A is symmetric
  eigenProblem->setHermitian(bHermitian);

  // Set the number of eigenvalues requested
  eigenProblem->setNEV( nev );

  // Inform the eigenproblem that you are finishing passing it information
  bool bSuccess = eigenProblem->setProblem();
  TEUCHOS_TEST_FOR_EXCEPTION(!bSuccess, Teuchos::Exceptions::InvalidParameter,
     "Anasazi::BasicEigenproblem::setProblem() returned an error.\n" << std::endl);

  // Create parameter list to pass into the solver manager
  //
  Teuchos::ParameterList eigenPL;
  eigenPL.set( "Which", which );
  eigenPL.set( "Block Size", blockSize );
  eigenPL.set( "Maximum Iterations", maxIters );
  eigenPL.set( "Convergence Tolerance", conv_tol );
  eigenPL.set( "Full Ortho", true );
  eigenPL.set( "Use Locking", true );
  eigenPL.set( "Verbosity", Anasazi::IterationDetails );

  // Create the solver manager
  Anasazi::LOBPCGSolMgr<double, MV, OP> eigenSolverMan(eigenProblem, eigenPL);

  // Solve the problem
  Anasazi::ReturnType returnCode = eigenSolverMan.solve();

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = eigenProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;

  std::vector<double> evals_real(sol.numVecs);
  for(int i=0; i<sol.numVecs; i++) evals_real[i] = evals[i].realpart;

  // Compute residuals.
  std::vector<double> normR(sol.numVecs);
  if (sol.numVecs > 0) {
    Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
    Epetra_MultiVector Kvec( K->OperatorDomainMap(), evecs->NumVectors() );
    Epetra_MultiVector Mvec( M->OperatorDomainMap(), evecs->NumVectors() );
    T.putScalar(0.0); 
    for (int i=0; i<sol.numVecs; i++) {
      T(i,i) = evals_real[i];
    }
    K->Apply( *evecs, Kvec );  
    M->Apply( *evecs, Mvec );  
    MVT::MvTimesMatAddMv( -1.0, Mvec, T, 1.0, Kvec );
    MVT::MvNorm( Kvec, normR );
  }

  // Print the results
  std::ostringstream os;
  os.setf(std::ios_base::right, std::ios_base::adjustfield);
  os<<"Solver manager returned " << (returnCode == Anasazi::Converged ? "converged." : "unconverged.") << std::endl;
  os<<std::endl;
  os<<"------------------------------------------------------"<<std::endl;
  os<<std::setw(16)<<"Eigenvalue"
    <<std::setw(18)<<"Direct Residual"
    <<std::endl;
  os<<"------------------------------------------------------"<<std::endl;
  for (int i=0; i<sol.numVecs; i++) {
    os<<std::setw(16)<<evals_real[i]
      <<std::setw(18)<<normR[i]/evals_real[i]
      <<std::endl;
  }
  os<<"------------------------------------------------------"<<std::endl;

  std::cout << Anasazi::Anasazi_Version() << std::endl << std::endl;
  std::cout << os.str();



  // Package the results in an eigendata structure and "observe" them 
  //   (put them into the user-supplied StateManager object)  (see Albany_SaveEigenData.cpp)
  Teuchos::RCP<Albany::EigendataStruct> eigenData = Teuchos::rcp( new Albany::EigendataStruct );
  eigenData->eigenvalueIm = Teuchos::null;  // eigenvalues are real
  eigenData->eigenvectorIm = Teuchos::null; // eigenvectors are real

  Teuchos::RCP<Albany::AbstractDiscretization> disc = 
    observer->getDiscretization();

  eigenData->eigenvalueRe = Teuchos::rcp( new std::vector<double>(evals_real) );
  for(int i=0; i<sol.numVecs; i++) (*(eigenData->eigenvalueRe))[i] *= -1; 
      //make eigenvals --> neg_eigenvals to mimic historic LOCA eigensolver (TODO: remove this and switch convention)

  if (sol.numVecs > 0) {
    // Store *overlapped* eigenvectors in EigendataStruct
    eigenData->eigenvectorRe = 
      Teuchos::rcp(new Epetra_MultiVector(*(disc->getOverlapMap()), sol.numVecs));

    // Importer for overlapped data
    Teuchos::RCP<Epetra_Import> importer =
      Teuchos::rcp(new Epetra_Import(*(disc->getOverlapMap()), *(disc->getMap())));

    // Overlapped eigenstate vectors
    for(int i=0; i<sol.numVecs; i++)
      (*(eigenData->eigenvectorRe))(i)->Import( *((*evecs)(i)), *importer, Insert );
  }

  observer->setEigenData(eigenData);
  
}
Beispiel #6
0
int main(int argc, char *argv[]) {
    
#ifdef HAVE_MPI
  // Initialize MPI and setup an Epetra communicator
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  // If we aren't using MPI, then setup a serial communicator.
  Epetra_SerialComm Comm;
#endif

  // Some typedefs for oft-used data types
  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Belos::MultiVecTraits<double, Epetra_MultiVector> MVT;

  bool ierr;

  // Get our processor ID (0 if running serial)
  int MyPID = Comm.MyPID();

  // Verbose flag: only processor 0 should print
  bool verbose = (MyPID==0);

  // Initialize a Gallery object, from which we will select a matrix
  // Select a 2-D laplacian, of order 100
  Trilinos_Util::CrsMatrixGallery Gallery("laplace_2d", Comm);
  Gallery.Set("problem_size", 100);

  // Say hello and print some information about the gallery matrix
  if (verbose) {
    cout << "Belos Example: Block CG" << endl;
    cout << "Problem info:" << endl;
    cout << Gallery;
    cout << endl;
  }

  // Setup some more problem/solver parameters:

  // Block size
  int blocksize = 4;

  // Get a pointer to the system matrix, inside of a Teuchos::RCP
  // The Teuchos::RCP is a reference counting pointer that handles
  // garbage collection for us, so that we can perform memory allocation without
  // having to worry about freeing memory manually.
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Gallery.GetMatrix(), false );

  // Create an Belos MultiVector, based on Epetra MultiVector
  const Epetra_Map * Map = &(A->RowMap());
  Teuchos::RCP<Epetra_MultiVector> B = 
    Teuchos::rcp( new Epetra_MultiVector(*Map,blocksize) );
  Teuchos::RCP<Epetra_MultiVector> X = 
    Teuchos::rcp( new Epetra_MultiVector(*Map,blocksize) );

  // Initialize the solution with zero and right-hand side with random entries
  X->PutScalar( 0.0 );
  B->Random();

  // Setup the linear problem, with the matrix A and the vectors X and B
  Teuchos::RCP< Belos::LinearProblem<double,MV,OP> > myProblem = 
    Teuchos::rcp( new Belos::LinearProblem<double,MV,OP>(A, X, B) );

  // The 2-D laplacian is symmetric. Specify this in the linear problem.
  myProblem->setHermitian();

  // Signal that we are done setting up the linear problem
  ierr = myProblem->setProblem();

  // Check the return from setProblem(). If this is true, there was an
  // error. This probably means we did not specify enough information for
  // the eigenproblem.
  assert(ierr == true);

  // Specify the verbosity level. Options include:
  // Belos::Errors 
  //   This option is always set
  // Belos::Warnings 
  //   Warnings (less severe than errors)
  // Belos::IterationDetails 
  //   Details at each iteration, such as the current eigenvalues
  // Belos::OrthoDetails 
  //   Details about orthogonality
  // Belos::TimingDetails
  //   A summary of the timing info for the solve() routine
  // Belos::FinalSummary 
  //   A final summary 
  // Belos::Debug 
  //   Debugging information
  int verbosity = Belos::Warnings + Belos::Errors + Belos::FinalSummary + Belos::TimingDetails;

  // Create the parameter list for the eigensolver
  Teuchos::RCP<Teuchos::ParameterList> myPL = Teuchos::rcp( new Teuchos::ParameterList() );
  myPL->set( "Verbosity", verbosity );
  myPL->set( "Block Size", blocksize );
  myPL->set( "Maximum Iterations", 100 );
  myPL->set( "Convergence Tolerance", 1.0e-8 );

  // Create the Block CG solver
  // This takes as inputs the linear problem and the solver parameters
  Belos::BlockCGSolMgr<double,MV,OP> mySolver(myProblem, myPL);

  // Solve the linear problem, and save the return code
  Belos::ReturnType solverRet = mySolver.solve();

  // Check return code of the solver: Unconverged, Failed, or OK
  switch (solverRet) {

  // UNCONVERGED
  case Belos::Unconverged:
    if (verbose) 
      cout << "Belos::BlockCGSolMgr::solve() did not converge!" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
    break;

  // CONVERGED
  case Belos::Converged:
    if (verbose) 
      cout << "Belos::BlockCGSolMgr::solve() converged!" << endl;
    break;
  }

  // Test residuals
  Epetra_MultiVector R( B->Map(), blocksize );

  // R = A*X
  A->Apply( *X, R );

  // R -= B 
  MVT::MvAddMv( -1.0, *B, 1.0, R, R );

  // Compute the 2-norm of each vector in the MultiVector
  // and store them to a std::vector<double>
  std::vector<double> normR(blocksize), normB(blocksize);
  MVT::MvNorm( R, normR );
  MVT::MvNorm( *B, normB );

  // Output results to screen
  if(verbose) {
    cout << scientific << setprecision(6) << showpoint;
    cout << "******************************************************\n"
         << "           Results (outside of linear solver)           \n" 
         << "------------------------------------------------------\n"
         << "  Linear System\t\tRelative Residual\n"
         << "------------------------------------------------------\n";
    for( int i=0 ; i<blocksize ; ++i ) {
      cout << "  " << i+1 << "\t\t\t" << normR[i]/normB[i] << endl;
    }
    cout << "******************************************************\n" << endl;
  }


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Beispiel #7
0
int main(int argc, char *argv[]) 
{
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif
 
  bool verbose = false;

  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
#ifdef HAVE_MPI
  int NumProc = Comm.NumProc();
#endif

  // define the parameters of the nonlinear PDE problem
  int nx = 5;
  int ny = 6;  
  double lambda = 1.0;

  PDEProblem Problem(nx,ny,lambda,&Comm);
 
  // starting solution, here a zero vector
  Epetra_Vector InitialGuess(Problem.GetMatrix()->Map());
  InitialGuess.PutScalar(0.0);

  // random vector upon which to apply each operator being tested
  Epetra_Vector directionVec(Problem.GetMatrix()->Map());
  directionVec.Random();

  // Set up the problem interface
  Teuchos::RCP<SimpleProblemInterface> interface = 
    Teuchos::rcp(new SimpleProblemInterface(&Problem) );
  
  // Set up theolver options parameter list
  Teuchos::RCP<Teuchos::ParameterList> noxParamsPtr = Teuchos::rcp(new Teuchos::ParameterList);
  Teuchos::ParameterList & noxParams = *(noxParamsPtr.get());

  // Set the nonlinear solver method
  noxParams.set("Nonlinear Solver", "Line Search Based");

  // Set up the printing utilities
  // Only print output if the "-v" flag is set on the command line
  Teuchos::ParameterList& printParams = noxParams.sublist("Printing");
  printParams.set("MyPID", MyPID); 
  printParams.set("Output Precision", 5);
  printParams.set("Output Processor", 0);
  if( verbose )
    printParams.set("Output Information", 
		NOX::Utils::OuterIteration + 
		NOX::Utils::OuterIterationStatusTest + 
		NOX::Utils::InnerIteration +
		NOX::Utils::Parameters + 
		NOX::Utils::Details + 
		NOX::Utils::Warning +
		NOX::Utils::TestDetails);
  else
    printParams.set("Output Information", NOX::Utils::Error +
		NOX::Utils::TestDetails);

  NOX::Utils printing(printParams);


  // Identify the test problem
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Starting epetra/NOX_Operators/NOX_Operators.exe" << std::endl;

  // Identify processor information
#ifdef HAVE_MPI
  if (printing.isPrintType(NOX::Utils::TestDetails)) {
    printing.out() << "Parallel Run" << std::endl;
    printing.out() << "Number of processors = " << NumProc << std::endl;
    printing.out() << "Print Process = " << MyPID << std::endl;
  }
  Comm.Barrier();
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Process " << MyPID << " is alive!" << std::endl;
  Comm.Barrier();
#else
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Serial Run" << std::endl;
#endif

  int status = 0;

  Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;

  // Need a NOX::Epetra::Vector for constructor
  NOX::Epetra::Vector noxInitGuess(InitialGuess, NOX::DeepCopy);

  // Analytic matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Problem.GetMatrix(), false );

  Epetra_Vector A_resultVec(Problem.GetMatrix()->Map());
  interface->computeJacobian( InitialGuess, *A );
  A->Apply( directionVec, A_resultVec );

  // FD operator
  Teuchos::RCP<Epetra_CrsGraph> graph = Teuchos::rcp( const_cast<Epetra_CrsGraph*>(&A->Graph()), false );
  Teuchos::RCP<NOX::Epetra::FiniteDifference> FD = Teuchos::rcp(
    new NOX::Epetra::FiniteDifference(printParams, iReq, noxInitGuess, graph) );
  
  Epetra_Vector FD_resultVec(Problem.GetMatrix()->Map());
  FD->computeJacobian(InitialGuess, *FD);
  FD->Apply( directionVec, FD_resultVec );

  // Matrix-Free operator
  Teuchos::RCP<NOX::Epetra::MatrixFree> MF = Teuchos::rcp(
    new NOX::Epetra::MatrixFree(printParams, iReq, noxInitGuess) );

  Epetra_Vector MF_resultVec(Problem.GetMatrix()->Map());
  MF->computeJacobian(InitialGuess, *MF);
  MF->Apply( directionVec, MF_resultVec );

  // Need NOX::Epetra::Vectors for tests
  NOX::Epetra::Vector noxAvec ( A_resultVec , NOX::DeepCopy );
  NOX::Epetra::Vector noxFDvec( FD_resultVec, NOX::DeepCopy );
  NOX::Epetra::Vector noxMFvec( MF_resultVec, NOX::DeepCopy );

  // Create a TestCompare class
  NOX::Epetra::TestCompare tester( printing.out(), printing);
  double abstol = 1.e-4;
  double reltol = 1.e-4 ;
  //NOX::TestCompare::CompareType aComp = NOX::TestCompare::Absolute;

  status += tester.testVector( noxFDvec, noxAvec, reltol, abstol,
                              "Finite-Difference Operator Apply Test" );
  status += tester.testVector( noxMFvec, noxAvec, reltol, abstol,
                              "Matrix-Free Operator Apply Test" );

  // Summarize test results  
  if( status == 0 )
    printing.out() << "Test passed!" << std::endl;
  else 
    printing.out() << "Test failed!" << std::endl;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  // Final return value (0 = successfull, non-zero = failure)
  return status;

}
int main(int argc, char *argv[]) {
  int i, j, info;
  const double one = 1.0;
  const double zero = 0.0;
  Teuchos::LAPACK<int,double> lapack;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  //  Dimension of the matrix
  int m = 500;
  int n = 100;

  // Construct a Map that puts approximately the same number of
  // equations on each processor.

  Epetra_Map RowMap(m, 0, Comm);
  Epetra_Map ColMap(n, 0, Comm);

  // Get update list and number of local equations from newly created Map.

  int NumMyRowElements = RowMap.NumMyElements();
  
  std::vector<int> MyGlobalRowElements(NumMyRowElements);
  RowMap.MyGlobalElements(&MyGlobalRowElements[0]);

  /* We are building an m by n matrix with entries
    
              A(i,j) = k*(si)*(tj - 1) if i <= j
               = k*(tj)*(si - 1) if i  > j
  
     where si = i/(m+1) and tj = j/(n+1) and k = 1/(n+1).
  */

  // Create an Epetra_Matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, RowMap, n) );

  // Compute coefficients for discrete integral operator
  std::vector<double> Values(n);
  std::vector<int> Indices(n);
  double inv_mp1 = one/(m+1);
  double inv_np1 = one/(n+1);
  for (i=0; i<n; i++) { Indices[i] = i; }
  
  for (i=0; i<NumMyRowElements; i++) {
    //
    for (j=0; j<n; j++) {
      //
      if ( MyGlobalRowElements[i] <= j ) {
        Values[j] = inv_np1 * ( (MyGlobalRowElements[i]+one)*inv_mp1 ) * ( (j+one)*inv_np1 - one );  // k*(si)*(tj-1)
      }
      else {
        Values[j] = inv_np1 * ( (j+one)*inv_np1 ) * ( (MyGlobalRowElements[i]+one)*inv_mp1 - one );  // k*(tj)*(si-1)
      }
    }
    info = A->InsertGlobalValues(MyGlobalRowElements[i], n, &Values[0], &Indices[0]);
    assert( info==0 );
  }

  // Finish up
  info = A->FillComplete(ColMap, RowMap);
  assert( info==0 );
  info = A->OptimizeStorage();
  assert( info==0 );
  A->SetTracebackMode(1); // Shutdown Epetra Warning tracebacks

  //************************************
  // Start the block Arnoldi iteration
  //***********************************
  //
  //  Variables used for the Block Arnoldi Method
  //
  int nev = 4;
  int blockSize = 1;
  int numBlocks = 10;
  int maxRestarts = 20;
  int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;
  double tol = lapack.LAMCH('E');
  std::string which = "LM";
  //
  // Create parameter list to pass into solver
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Num Blocks", numBlocks );
  MyPL.set( "Maximum Restarts", maxRestarts );
  MyPL.set( "Convergence Tolerance", tol );

  typedef Anasazi::MultiVec<double> MV;
  typedef Anasazi::Operator<double> OP;

  // Create an Anasazi::EpetraMultiVec for an initial vector to start the solver. 
  // Note:  This needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Anasazi::EpetraMultiVec> ivec = Teuchos::rcp( new Anasazi::EpetraMultiVec(ColMap, blockSize) );
  ivec->MvRandom();

  // Call the constructor for the (A^T*A) operator
  Teuchos::RCP<Anasazi::EpetraSymOp>  Amat = Teuchos::rcp( new Anasazi::EpetraSymOp(A) );  
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(Amat, ivec) );

  // Inform the eigenproblem that the matrix A is symmetric
  MyProblem->setHermitian(true);

  // Set the number of eigenvalues requested and the blocksize the solver should use
  MyProblem->setNEV( nev );

  // Inform the eigenproblem that you are finished passing it information
  bool boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (MyPID == 0) {
      cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the Block Arnoldi solver
  Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);
  
  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  if (returnCode != Anasazi::Converged && MyPID==0) {
    cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  int numev = sol.numVecs;

  if (numev > 0) {
    
    // Compute singular values/vectors and direct residuals.
    //
    // Compute singular values which are the square root of the eigenvalues
    if (MyPID==0) {
      cout<<"------------------------------------------------------"<<endl;
      cout<<"Computed Singular Values: "<<endl;
      cout<<"------------------------------------------------------"<<endl;
    }
    for (i=0; i<numev; i++) { evals[i].realpart = Teuchos::ScalarTraits<double>::squareroot( evals[i].realpart ); }
    //
    // Compute left singular vectors :  u = Av/sigma
    //
    std::vector<double> tempnrm(numev), directnrm(numev);
    std::vector<int> index(numev);
    for (i=0; i<numev; i++) { index[i] = i; }
    Anasazi::EpetraMultiVec Av(RowMap,numev), u(RowMap,numev);
    Anasazi::EpetraMultiVec* evecs = dynamic_cast<Anasazi::EpetraMultiVec* >(sol.Evecs->CloneViewNonConst( index ));
    Teuchos::SerialDenseMatrix<int,double> S(numev,numev);
    A->Apply( *evecs, Av );
    Av.MvNorm( tempnrm );
    for (i=0; i<numev; i++) { S(i,i) = one/tempnrm[i]; };
    u.MvTimesMatAddMv( one, Av, S, zero );
    //
    // Compute direct residuals : || Av - sigma*u ||
    //
    for (i=0; i<numev; i++) { S(i,i) = evals[i].realpart; }
    Av.MvTimesMatAddMv( -one, u, S, one );
    Av.MvNorm( directnrm );
    if (MyPID==0) {
      cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      cout<<std::setw(16)<<"Singular Value"
        <<std::setw(20)<<"Direct Residual"
        <<endl;
      cout<<"------------------------------------------------------"<<endl;
      for (i=0; i<numev; i++) {
        cout<<std::setw(16)<<evals[i].realpart
          <<std::setw(20)<<directnrm[i] 
          <<endl;
      }  
      cout<<"------------------------------------------------------"<<endl;
    }
  }
  
#ifdef EPETRA_MPI
    MPI_Finalize() ;
#endif
  
  return 0;
}
Beispiel #9
0
int main(int argc, char *argv[])
{

  std::cout << Epetra_Version() << std::endl << std::endl;

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif


  Teuchos::RCP<Teuchos::FancyOStream> fos = getFancyOStream(Teuchos::rcpFromRef(std::cout));

  // Construct a Map with NumElements and index base of 0
  Teuchos::RCP<Epetra_Map> rgMap = Teuchos::rcp(new Epetra_Map(63, 0, Comm));
  Teuchos::RCP<Epetra_Map> doMap = Teuchos::rcp(new Epetra_Map(20, 0, Comm));

  Epetra_CrsMatrix* Pin = NULL;
  EpetraExt::MatrixMarketFileToCrsMatrix("Test.mat",
      *rgMap, *rgMap, *doMap,
      Pin, false,
      true);
  Teuchos::RCP<Epetra_CrsMatrix> P = Teuchos::rcp(Pin);

  Epetra_CrsMatrix* A = NULL;
  A = TridiagMatrix(rgMap.get(), 1.0, -2.0, 1.0);
  Teuchos::RCP<Epetra_CrsMatrix> rcpA = Teuchos::rcp(A);

  ////////////////////////////////////////////
  // plain Epetra
  ////////////////////////////////////////////

  // Create x and b vectors
  Teuchos::RCP<Epetra_Vector> x = Teuchos::rcp(new Epetra_Vector(*doMap));
  Teuchos::RCP<Epetra_Vector> x2 = Teuchos::rcp(new Epetra_Vector(*rgMap));
  Teuchos::RCP<Epetra_Vector> b = Teuchos::rcp(new Epetra_Vector(*rgMap));
  Teuchos::RCP<Epetra_Vector> b2 = Teuchos::rcp(new Epetra_Vector(*rgMap));

  x->PutScalar(1.0);
  x2->PutScalar(1.0);
  double normx = 0.0;
  x->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "||x|| = " << normx << std::endl;
  x2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "||x2|| = " << normx << std::endl;

  /*P->Apply(*x,*b);
  normx = 0.0;
  b->Norm1(&normx);*/
  //if (Comm.MyPID() == 0) std::cout << "||Px||_1 = " << normx << std::endl;

  /*Epetra_RowMatrixTransposer et(&*P);
        Epetra_CrsMatrix* PT;
        int rv = et.CreateTranspose(true,PT);
        if (rv != 0) {
          std::ostringstream buf;
          buf << rv;
          std::string msg = "Utils::Transpose: Epetra::RowMatrixTransposer returned value of " + buf.str();
          std::cout << msg << std::endl;
        }

  Teuchos::RCP<Epetra_CrsMatrix> rcpPT(PT);

  rcpPT->Apply(*x2,*b2);
  normx = 0.0;
  b2->Norm1(&normx);*/
  //if (Comm.MyPID() == 0) std::cout << "||P^T x||_1 = " << normx << std::endl;

  // matrix-matrix multiply
  Teuchos::RCP<Epetra_CrsMatrix> AP = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->RangeMap(),1));
  EpetraExt::MatrixMatrix::Multiply(*rcpA,false,*P,false,*AP, *fos);
  //  AP->FillComplete(P->DomainMap(),rcpA->RangeMap());
  //std::cout << *AP << std::endl;
  AP->Apply(*x,*b2);
  normx = 0.0;
  b2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "Epetra: ||AP x||_1 = " << normx << std::endl;

  // build A^T explicitely
  Epetra_RowMatrixTransposer etA(&*rcpA);
        Epetra_CrsMatrix* AT;
        int rv3 = etA.CreateTranspose(true,AT);
        if (rv3 != 0) {
          std::ostringstream buf;
          buf << rv3;
          std::string msg = "Utils::Transpose: Epetra::RowMatrixTransposer returned value of " + buf.str();
          std::cout << msg << std::endl;
        }
  Teuchos::RCP<Epetra_CrsMatrix> rcpAT(AT);

  // calculate A^T Px
  Teuchos::RCP<Epetra_CrsMatrix> APexpl = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->DomainMap(),20));
  EpetraExt::MatrixMatrix::Multiply(*rcpAT,false,*P,false,*APexpl, *fos);
  //  APexpl->FillComplete(P->DomainMap(),rcpA->DomainMap()); // check me
  APexpl->Apply(*x,*b2);
  normx = 0.0;
  b2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "Epetra: ||A^T_expl P x||_1 = " << normx << std::endl;

  // calculate A^T Px
  Teuchos::RCP<Epetra_CrsMatrix> APimpl = Teuchos::rcp(new Epetra_CrsMatrix(Copy,rcpA->RangeMap(),20));
  EpetraExt::MatrixMatrix::Multiply(*rcpA,true,*P,false,*APimpl, *fos);
  //  APimpl->FillComplete(P->DomainMap(),APimpl->RangeMap()); // check me
  APimpl->Apply(*x,*b2);
  normx = 0.0;
  b2->Norm1(&normx);
  if (Comm.MyPID() == 0) std::cout << "Epetra: ||A^T_impl P x||_1 = " << normx << std::endl;


  ///////////////////////////////////////
  // Xpetra
  ///////////////////////////////////////


   // wrap original matrix to Matrix
   Teuchos::RCP<Xpetra::EpetraMap> xrgMap = Teuchos::rcp(new Xpetra::EpetraMap(rgMap));
   Teuchos::RCP<Xpetra::EpetraMap> xdoMap = Teuchos::rcp(new Xpetra::EpetraMap(doMap));

   Teuchos::RCP<Xpetra::EpetraCrsMatrix> Pop = Teuchos::rcp(new Xpetra::EpetraCrsMatrix(P) );
   Teuchos::RCP<Xpetra::CrsMatrix<double> > crsP = Teuchos::rcp_implicit_cast<Xpetra::CrsMatrix<double> >(Pop);
   Teuchos::RCP<Xpetra::CrsMatrixWrap<double> > crsPOp = Teuchos::rcp( new Xpetra::CrsMatrixWrap<double>(crsP) );
   crsPOp->fillComplete(xdoMap,xrgMap);

   Teuchos::RCP<Xpetra::EpetraCrsMatrix> Aop = Teuchos::rcp(new Xpetra::EpetraCrsMatrix(rcpA) );
   Teuchos::RCP<Xpetra::CrsMatrix<double> > crsA = Teuchos::rcp_implicit_cast<Xpetra::CrsMatrix<double> >(Aop);
   Teuchos::RCP<Xpetra::CrsMatrixWrap<double> > crsAOp = Teuchos::rcp( new Xpetra::CrsMatrixWrap<double>(crsA) );
   crsAOp->fillComplete(xrgMap,xrgMap);

   // wrap vectors
   Teuchos::RCP<Xpetra::EpetraVector> xx = Teuchos::rcp(new Xpetra::EpetraVector(x));
   Teuchos::RCP<Xpetra::EpetraVector> xx2 = Teuchos::rcp(new Xpetra::EpetraVector(x2));
   Teuchos::RCP<Xpetra::EpetraVector> bb1 = Teuchos::rcp(new Xpetra::EpetraVector(b));
   Teuchos::RCP<Xpetra::EpetraVector> bb2 = Teuchos::rcp(new Xpetra::EpetraVector(b2));

   bb1->putScalar(0.0);
   bb2->putScalar(0.0);

   //crsPOp->apply(*xx,*bb1);

   // if (Comm.MyPID() == 0) std::cout << "||Pop x||_1 = " << bb1->norm1() << std::endl;

   //Teuchos::RCP<Xpetra::Matrix<double> > crsPOpT = MueLu::Utils2<double>::Transpose(crsPOp,false);

   //crsPOpT->apply(*xx2,*bb2);
   //if (Comm.MyPID() == 0) std::cout << "||Pop^T x||_1 = " << bb2->norm1() << std::endl;

   // calculate APx
   Teuchos::RCP<Xpetra::Matrix<double> > crsAP = MueLu::Utils<double>::Multiply(*crsAOp,false,*crsPOp,false, *fos);
   //crsAP->describe(*fos,Teuchos::VERB_EXTREME);
   bb1->putScalar(0.0);
   crsAP->apply(*xx,*bb1);
   normx = bb1->norm1();
   if (Comm.MyPID() == 0) std::cout << "Xpetra: ||A Pop x||_1 = " << normx << std::endl;

   // calculate A^T P x explicitely
   Teuchos::RCP<Xpetra::Matrix<double> > crsAOpT = MueLu::Utils2<double>::Transpose(*crsAOp,  false);
   Teuchos::RCP<Xpetra::Matrix<double> > AtPexpl = MueLu::Utils<double> ::Multiply (*crsAOpT, false, *crsPOp, false, *fos);
   bb1->putScalar(0.0);
   AtPexpl->apply(*xx,*bb1);
   normx = bb1->norm1();
   if (Comm.MyPID() == 0)
       std::cout << "Xpetra: ||A^T_expl Pop x||_1 = " << normx << std::endl;

   // calculate A^T P x
   Teuchos::RCP<Xpetra::Matrix<double> > AtPimpl = MueLu::Utils<double>::Multiply(*crsAOp,true,*crsPOp,false, *fos);
   bb1->putScalar(0.0);
   AtPimpl->apply(*xx,*bb1);
   normx = bb1->norm1();
   if (Comm.MyPID() == 0)
       std::cout << "Xpetra: ||A^T_impl Pop x||_1 = " << normx << std::endl;


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;
}
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30; 
  GaleriList.set("n", nx * nx);
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx);
  Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Linear", Comm, GaleriList) );
  Teuchos::RCP<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  //  Variables used for the Block Davidson Method
  const int    nev         = 4;
  const int    blockSize   = 5;
  const int    numBlocks   = 8;
  const int    maxRestarts = 100;
  const double tol         = 1.0e-8;

  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, Epetra_MultiVector> MVT;

  // Create an Epetra_MultiVector for an initial vector to start the solver.
  // Note:  This needs to have the same number of columns as the blocksize.
  //
  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(*Map, blockSize) );
  ivec->Random();

  // Create the eigenproblem.
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > problem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(A, ivec) );

  // Inform the eigenproblem that the operator A is symmetric
  problem->setHermitian(true);

  // Set the number of eigenvalues requested
  problem->setNEV( nev );

  // Inform the eigenproblem that you are finishing passing it information
  bool boolret = problem->setProblem();
  if (boolret != true) {
    std::cout<<"Anasazi::BasicEigenproblem::setProblem() returned an error." << std::endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return -1;
  }

  // Create parameter list to pass into the solver manager
  Teuchos::ParameterList anasaziPL;
  anasaziPL.set( "Which", "LM" );
  anasaziPL.set( "Block Size", blockSize );
  anasaziPL.set( "Maximum Iterations", 500 );
  anasaziPL.set( "Convergence Tolerance", tol );
  anasaziPL.set( "Verbosity", Anasazi::Errors+Anasazi::Warnings+Anasazi::TimingDetails+Anasazi::FinalSummary );

  // Create the solver manager
  Anasazi::LOBPCGSolMgr<double, MV, OP> anasaziSolver(problem, anasaziPL);

  // Solve the problem
  Anasazi::ReturnType returnCode = anasaziSolver.solve();

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = problem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;

  // Compute residuals.
  std::vector<double> normR(sol.numVecs);
  if (sol.numVecs > 0) {
    Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
    Epetra_MultiVector tempAevec( *Map, sol.numVecs );
    T.putScalar(0.0); 
    for (int i=0; i<sol.numVecs; i++) {
      T(i,i) = evals[i].realpart;
    }
    A->Apply( *evecs, tempAevec );
    MVT::MvTimesMatAddMv( -1.0, *evecs, T, 1.0, tempAevec );
    MVT::MvNorm( tempAevec, normR );
  }

  // Print the results
  std::cout<<"Solver manager returned " << (returnCode == Anasazi::Converged ? "converged." : "unconverged.") << std::endl;
  std::cout<<std::endl;
  std::cout<<"------------------------------------------------------"<<std::endl;
  std::cout<<std::setw(16)<<"Eigenvalue"
           <<std::setw(18)<<"Direct Residual"
           <<std::endl;
  std::cout<<"------------------------------------------------------"<<std::endl;
  for (int i=0; i<sol.numVecs; i++) {
    std::cout<<std::setw(16)<<evals[i].realpart
             <<std::setw(18)<<normR[i]/evals[i].realpart
             <<std::endl;
  }
  std::cout<<"------------------------------------------------------"<<std::endl;

#ifdef HAVE_MPI
  MPI_Finalize() ; 
#endif

  return 0;
}
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  // Initialize MPI
  //
  MPI_Init(&argc,&argv);
#endif

  bool success = false;
  try {
    // Create an Epetra communicator
    //
#ifdef HAVE_MPI
    Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
    Epetra_SerialComm Comm;
#endif

    // Create an Anasazi output manager
    //
    BasicOutputManager<double> printer;
    printer.stream(Errors) << Anasazi_Version() << std::endl << std::endl;

    // Get the sorting std::string from the command line
    //
    std::string which("LM");
    Teuchos::CommandLineProcessor cmdp(false,true);
    cmdp.setOption("sort",&which,"Targetted eigenvalues (SM or LM).");
    if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) {
      throw -1;
    }


    typedef Epetra_MultiVector MV;
    typedef Epetra_Operator OP;
    typedef MultiVecTraits<double, Epetra_MultiVector> MVT;

    // Number of dimension of the domain
    const int space_dim = 2;

    // Size of each of the dimensions of the domain
    std::vector<double> brick_dim( space_dim );
    brick_dim[0] = 1.0;
    brick_dim[1] = 1.0;

    // Number of elements in each of the dimensions of the domain
    std::vector<int> elements( space_dim );
    elements[0] = 10;
    elements[1] = 10;

    // Create problem
    Teuchos::RCP<ModalProblem> testCase =
      Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );

    // Get the stiffness and mass matrices
    Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
    Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );

    // Eigensolver parameters
    int nev = 10;
    int blockSize = 5;
    int maxIters = 500;
    double tol = 1.0e-8;

    Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->OperatorDomainMap(), blockSize) );
    ivec->Random();

    // Create the eigenproblem.
    Teuchos::RCP<BasicEigenproblem<double, MV, OP> > MyProblem =
      Teuchos::rcp( new BasicEigenproblem<double, MV, OP>(K, M, ivec) );

    // Inform the eigenproblem that the operator A is symmetric
    MyProblem->setHermitian(true);

    // Set the number of eigenvalues requested
    MyProblem->setNEV( nev );

    // Inform the eigenproblem that you are finishing passing it information
    bool boolret = MyProblem->setProblem();
    if (boolret != true) {
      printer.print(Errors,"Anasazi::BasicEigenproblem::setProblem() returned an error.\n");
      throw -1;
    }

    // Create parameter list to pass into the solver manager
    //
    Teuchos::ParameterList MyPL;
    MyPL.set( "Which", which );
    MyPL.set( "Block Size", blockSize );
    MyPL.set( "Maximum Iterations", maxIters );
    MyPL.set( "Convergence Tolerance", tol );
    MyPL.set( "Full Ortho", true );
    MyPL.set( "Use Locking", true );
    //
    // Create the solver manager
    LOBPCGSolMgr<double, MV, OP> MySolverMan(MyProblem, MyPL);

    // Solve the problem
    //
    ReturnType returnCode = MySolverMan.solve();

    // Get the eigenvalues and eigenvectors from the eigenproblem
    //
    Eigensolution<double,MV> sol = MyProblem->getSolution();
    std::vector<Value<double> > evals = sol.Evals;
    Teuchos::RCP<MV> evecs = sol.Evecs;

    // Compute residuals.
    //
    std::vector<double> normR(sol.numVecs);
    if (sol.numVecs > 0) {
      Teuchos::SerialDenseMatrix<int,double> T(sol.numVecs, sol.numVecs);
      Epetra_MultiVector Kvec( K->OperatorDomainMap(), evecs->NumVectors() );
      Epetra_MultiVector Mvec( M->OperatorDomainMap(), evecs->NumVectors() );
      T.putScalar(0.0);
      for (int i=0; i<sol.numVecs; i++) {
        T(i,i) = evals[i].realpart;
      }
      K->Apply( *evecs, Kvec );
      M->Apply( *evecs, Mvec );
      MVT::MvTimesMatAddMv( -1.0, Mvec, T, 1.0, Kvec );
      MVT::MvNorm( Kvec, normR );
    }

    // Print the results
    //
    std::ostringstream os;
    os.setf(std::ios_base::right, std::ios_base::adjustfield);
    os<<"Solver manager returned " << (returnCode == Converged ? "converged." : "unconverged.") << std::endl;
    os<<std::endl;
    os<<"------------------------------------------------------"<<std::endl;
    os<<std::setw(16)<<"Eigenvalue"
      <<std::setw(18)<<"Direct Residual"
      <<std::endl;
    os<<"------------------------------------------------------"<<std::endl;
    for (int i=0; i<sol.numVecs; i++) {
      os<<std::setw(16)<<evals[i].realpart
        <<std::setw(18)<<normR[i]/evals[i].realpart
        <<std::endl;
    }
    os<<"------------------------------------------------------"<<std::endl;
    printer.print(Errors,os.str());

    success = true;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}