int main(int argc, char *argv[]) {
  //
  bool haveM = false;

#ifdef EPETRA_MPI  
  // Initialize MPI  
  MPI_Init(&argc,&argv);   
  Epetra_MpiComm Comm( MPI_COMM_WORLD );  
#else  
  Epetra_SerialComm Comm;  
#endif
  
  int MyPID = Comm.MyPID();

  int nev = 5;
  int blockSize = 5;
  int maxIterations = 1000;
  double tol = 1.0e-8;
  bool verbose=false, locking=false, fullOrtho=true;
  std::string k_filename = "";
  std::string m_filename = "";
  std::string which = "SM";
  Teuchos::CommandLineProcessor cmdp(false,true);
  cmdp.setOption("nev",&nev,"Number of eigenvalues to compute.");
  cmdp.setOption("blocksize",&blockSize,"Block size used in LOBPCG.");
  cmdp.setOption("maxiters",&maxIterations,"Maximum number of iterations used in LOBPCG.");
  cmdp.setOption("tol",&tol,"Convergence tolerance requested for computed eigenvalues.");
  cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
  cmdp.setOption("locking","nolocking",&locking,"Use locking of converged eigenvalues.");
  cmdp.setOption("fullortho","nofullortho",&fullOrtho,"Use full orthogonalization.");
  cmdp.setOption("sort",&which,"Targetted eigenvalues (SM,LM,SR,or LR).");
  cmdp.setOption("K-filename",&k_filename,"Filename and path of the stiffness matrix.");
  cmdp.setOption("M-filename",&m_filename,"Filename and path of the mass matrix.");
  if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return -1;
  }
  if (k_filename=="") {
    cout << "The matrix K must be supplied through an input file!!!" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return -1;
  }
  if (m_filename!="") {
    haveM = true;
  }
  //
  //**********************************************************************
  //******************Set up the problem to be solved*********************
  //**********************************************************************
  //
  // *****Read in matrix from file******
  //
  Teuchos::RCP<Epetra_Map> Map;
  Teuchos::RCP<Epetra_CrsMatrix> K, M;
  EpetraExt::readEpetraLinearSystem( k_filename, Comm, &K, &Map );

  if (haveM) {
    EpetraExt::readEpetraLinearSystem( m_filename, Comm, &M, &Map );
  }
  //
  //************************************
  // Start the block Davidson iteration 
  //***********************************         
  //
  //  Variables used for the LOBPCG Method
  // 
  // Set verbosity level
  int verbosity = Anasazi::Errors + Anasazi::Warnings;
  if (verbose) {
    verbosity += Anasazi::FinalSummary + Anasazi::TimingDetails;
  }
  //
  // Create parameter list to pass into solver
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Maximum Iterations", maxIterations );
  MyPL.set( "Convergence Tolerance", tol );
  MyPL.set( "Use Locking", locking );
  MyPL.set( "Locking Tolerance", tol/10 );
  MyPL.set( "Full Ortho", fullOrtho );

  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, MV> MVT;
  typedef Anasazi::OperatorTraits<double, MV, OP> OPT;
  //
  // Create the eigenproblem to be solved.
  //
  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(*Map, blockSize) );
  ivec->Random();
  
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem;
  if (haveM) {
    MyProblem = Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>( K, M, ivec ) );
  } 
  else {
    MyProblem = Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>( K, ivec ) );
  } 
 
  // Inform the eigenproblem that (K,M) is Hermitian
  MyProblem->setHermitian(true);

  // Set the number of eigenvalues requested 
  MyProblem->setNEV( nev );

  // Inform the eigenproblem that you are finished passing it information
  bool boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (verbose && MyPID == 0) {
      cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the LOBPCG solver
  Anasazi::LOBPCGSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);
    
  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  if (returnCode != Anasazi::Converged && MyPID==0 && verbose) {
    cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl;
  }
  
  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;
  std::vector<int> index = sol.index;
  int numev = sol.numVecs;

  if (numev > 0) {
    // Compute residuals.
    Teuchos::LAPACK<int,double> lapack;
    std::vector<double> normEV(numev);
    
    // Get storage
    Teuchos::RCP<Epetra_MultiVector> Kevecs, Mevecs;
    Teuchos::SerialDenseMatrix<int,double> B(numev,numev);
    B.putScalar(0.0); 
    for (int i=0; i<numev; i++) {B(i,i) = evals[i].realpart;}
    
    // Compute K*evecs
    Kevecs = Teuchos::rcp(new Epetra_MultiVector(*Map,numev) );
    OPT::Apply( *K, *evecs, *Kevecs );

    // Compute M*evecs
    if (haveM) {
      Mevecs = Teuchos::rcp(new Epetra_MultiVector(*Map,numev) );
      OPT::Apply( *M, *evecs, *Mevecs );
    }
    else {
      Mevecs = evecs;
    }

    // Compute K*evecs - lambda*M*evecs and its norm
    MVT::MvTimesMatAddMv( -1.0, *Mevecs, B, 1.0, *Kevecs );
    MVT::MvNorm( *Kevecs, normEV );
    
    // Scale the norms by the eigenvalue
    for (int i=0; i<numev; i++) {
      normEV[i] /= Teuchos::ScalarTraits<double>::magnitude( evals[i].realpart );
    }
    
    // Output computed eigenvalues and their direct residuals
    if (verbose && MyPID==0) {
      cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      cout<<endl<< "Actual Residuals"<<endl;
      cout<< std::setw(16) << "Real Part"
        << std::setw(20) << "Direct Residual"<< endl;
      cout<<"-----------------------------------------------------------"<<endl;
      for (int i=0; i<numev; i++) {
        cout<< std::setw(16) << evals[i].realpart 
          << std::setw(20) << normEV[i] << endl;
      }  
      cout<<"-----------------------------------------------------------"<<endl;
    }
  }
  
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif
  return 0;
  
} // end LOBPCGEpetraExFile.cpp
Exemple #2
0
int main(int argc, char *argv[]) {

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  bool testFailed;
  bool boolret;
  int MyPID = Comm.MyPID();

  bool verbose = true;
  bool debug = false;
  std::string which("SM");

  Teuchos::CommandLineProcessor cmdp(false,true);
  cmdp.setOption("verbose","quiet",&verbose,"Print messages and results.");
  cmdp.setOption("debug","nodebug",&debug,"Print debugging information.");
  cmdp.setOption("sort",&which,"Targetted eigenvalues (SM,LM,SR,LR,SI,or LI).");
  if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return -1;
  }

  typedef double ScalarType;
  typedef Teuchos::ScalarTraits<ScalarType>          ScalarTypeTraits;
  typedef ScalarTypeTraits::magnitudeType            MagnitudeType;
  typedef Epetra_MultiVector                         MV;
  typedef Epetra_Operator                            OP;
  typedef Anasazi::MultiVecTraits<ScalarType,MV>     MVTraits;
  typedef Anasazi::OperatorTraits<ScalarType,MV,OP>  OpTraits;

  //  Dimension of the matrix
  int nx = 10;        // Discretization points in any one direction.
  int NumGlobalElements = nx*nx;  // Size of matrix nx*nx

  // Construct a Map that puts approximately the same number of
  // equations on each processor.

  Epetra_Map Map(NumGlobalElements, 0, Comm);

  // Get update list and number of local equations from newly created Map.

  int NumMyElements = Map.NumMyElements();

  std::vector<int> MyGlobalElements(NumMyElements);
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation
  // on this processor
  std::vector<int> NumNz(NumMyElements);

  /* We are building a matrix of block structure:

      | T -I          |
      |-I  T -I       |
      |   -I  T       |
      |        ...  -I|
      |           -I T|

   where each block is dimension nx by nx and the matrix is on the order of
   nx*nx.  The block T is a tridiagonal matrix.
  */

  for (int i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i] == 0 || MyGlobalElements[i] == NumGlobalElements-1 ||
        MyGlobalElements[i] == nx-1 || MyGlobalElements[i] == nx*(nx-1) ) {
      NumNz[i] = 3;
    }
    else if (MyGlobalElements[i] < nx || MyGlobalElements[i] > nx*(nx-1) ||
             MyGlobalElements[i]%nx == 0 || (MyGlobalElements[i]+1)%nx == 0) {
      NumNz[i] = 4;
    }
    else {
      NumNz[i] = 5;
    }
  }

  // Create an Epetra_Matrix

  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, Map, &NumNz[0]) );

  // Diffusion coefficient, can be set by user.
  // When rho*h/2 <= 1, the discrete convection-diffusion operator has real eigenvalues.
  // When rho*h/2 > 1, the operator has complex eigenvalues.
  double rho = 2*(nx+1);

  // Compute coefficients for discrete convection-diffution operator
  const double one = 1.0;
  std::vector<double> Values(4);
  std::vector<int> Indices(4);
  double h = one /(nx+1);
  double h2 = h*h;
  double c = 5.0e-01*rho/ h;
  Values[0] = -one/h2 - c; Values[1] = -one/h2 + c; Values[2] = -one/h2; Values[3]= -one/h2;
  double diag = 4.0 / h2;
  int NumEntries, info;

  for (int i=0; i<NumMyElements; i++)
  {
    if (MyGlobalElements[i]==0)
    {
      Indices[0] = 1;
      Indices[1] = nx;
      NumEntries = 2;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[1], &Indices[0]);
      assert( info==0 );
    }
    else if (MyGlobalElements[i] == nx*(nx-1))
    {
      Indices[0] = nx*(nx-1)+1;
      Indices[1] = nx*(nx-2);
      NumEntries = 2;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[1], &Indices[0]);
      assert( info==0 );
    }
    else if (MyGlobalElements[i] == nx-1)
    {
      Indices[0] = nx-2;
      NumEntries = 1;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert( info==0 );
      Indices[0] = 2*nx-1;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[2], &Indices[0]);
      assert( info==0 );
    }
    else if (MyGlobalElements[i] == NumGlobalElements-1)
    {
      Indices[0] = NumGlobalElements-2;
      NumEntries = 1;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert( info==0 );
      Indices[0] = nx*(nx-1)-1;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[2], &Indices[0]);
      assert( info==0 );
    }
    else if (MyGlobalElements[i] < nx)
    {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      Indices[2] = MyGlobalElements[i]+nx;
      NumEntries = 3;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert( info==0 );
    }
    else if (MyGlobalElements[i] > nx*(nx-1))
    {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      Indices[2] = MyGlobalElements[i]-nx;
      NumEntries = 3;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert( info==0 );
    }
    else if (MyGlobalElements[i]%nx == 0)
    {
      Indices[0] = MyGlobalElements[i]+1;
      Indices[1] = MyGlobalElements[i]-nx;
      Indices[2] = MyGlobalElements[i]+nx;
      NumEntries = 3;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[1], &Indices[0]);
      assert( info==0 );
    }
    else if ((MyGlobalElements[i]+1)%nx == 0)
    {
      Indices[0] = MyGlobalElements[i]-nx;
      Indices[1] = MyGlobalElements[i]+nx;
      NumEntries = 2;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[2], &Indices[0]);
      assert( info==0 );
      Indices[0] = MyGlobalElements[i]-1;
      NumEntries = 1;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert( info==0 );
    }
    else
    {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      Indices[2] = MyGlobalElements[i]-nx;
      Indices[3] = MyGlobalElements[i]+nx;
      NumEntries = 4;
      info = A->InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0]);
      assert( info==0 );
    }
    // Put in the diagonal entry
    info = A->InsertGlobalValues(MyGlobalElements[i], 1, &diag, &MyGlobalElements[i]);
    assert( info==0 );
  }

  // Finish up
  info = A->FillComplete();
  assert( info==0 );
  A->SetTracebackMode(1); // Shutdown Epetra Warning tracebacks

  //************************************
  // Start the block Davidson iteration
  //***********************************
  //
  //  Variables used for the Generalized Davidson Method
  //
  int nev = 4;
  int blockSize = 1;
  int maxDim = 50;
  int restartDim = 10;
  int maxRestarts = 500;
  double tol = 1e-10;

  // Set verbosity level
  int verbosity = Anasazi::Errors + Anasazi::Warnings;
  if (verbose) {
    verbosity += Anasazi::FinalSummary + Anasazi::TimingDetails;
  }
  if (debug) {
    verbosity += Anasazi::Debug;
  }
  //
  // Create parameter list to pass into solver manager
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Maximum Subspace Dimension", maxDim);
  MyPL.set( "Restart Dimension", restartDim);
  MyPL.set( "Maximum Restarts", maxRestarts );
  MyPL.set( "Convergence Tolerance", tol );
  MyPL.set( "Relative Convergence Tolerance", true );
  MyPL.set( "Initial Guess", "User" );

  // Create an Epetra_MultiVector for an initial vector to start the solver.
  // Note:  This needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(Map, blockSize) );
  ivec->Random();

  // Create the eigenproblem.
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem = Teuchos::rcp(
    new Anasazi::BasicEigenproblem<double,MV,OP>() );
  MyProblem->setA(A);
  MyProblem->setInitVec(ivec);

  // Inform the eigenproblem that the operator A is non-Hermitian
  MyProblem->setHermitian(false);

  // Set the number of eigenvalues requested
  MyProblem->setNEV( nev );

  // Inform the eigenproblem that you are finishing passing it information
  boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (verbose && MyPID == 0) {
      std::cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << std::endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the Block Arnoldi solver
  Anasazi::GeneralizedDavidsonSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);

  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  testFailed = false;
  if (returnCode != Anasazi::Converged && MyPID==0 && verbose) {
    testFailed = true;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<ScalarType,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<ScalarType> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;
  std::vector<int> index = sol.index;
  int numev = sol.numVecs;

  // Output computed eigenvalues and their direct residuals
  if (verbose && MyPID==0) {
    int numritz = (int)evals.size();
    std::cout.setf(std::ios_base::right, std::ios_base::adjustfield);
    std::cout<<std::endl<< "Computed Ritz Values"<< std::endl;
    std::cout<< std::setw(16) << "Real Part"
        << std::setw(16) << "Imag Part"
        << std::endl;
    std::cout<<"-----------------------------------------------------------"<<std::endl;
    for (int i=0; i<numritz; i++) {
      std::cout<< std::setw(16) << evals[i].realpart
          << std::setw(16) << evals[i].imagpart
          << std::endl;
    }
    std::cout<<"-----------------------------------------------------------"<<std::endl;
  }

  if (numev > 0) {
    // Compute residuals.
    Teuchos::LAPACK<int,double> lapack;
    std::vector<double> normA(numev);

    // The problem is non-Hermitian.
    int i=0;
    std::vector<int> curind(1);
    std::vector<double> resnorm(1), tempnrm(1);
    Teuchos::RCP<MV> tempAevec;
    Teuchos::RCP<const MV> evecr, eveci;
    Epetra_MultiVector Aevec(Map,numev);

    // Compute A*evecs
    OpTraits::Apply( *A, *evecs, Aevec );

    Teuchos::SerialDenseMatrix<int,double> Breal(1,1), Bimag(1,1);
    while (i<numev) {
      if (index[i]==0) {
        // Get a view of the current eigenvector (evecr)
        curind[0] = i;
        evecr = MVTraits::CloneView( *evecs, curind );

        // Get a copy of A*evecr
        tempAevec = MVTraits::CloneCopy( Aevec, curind );

        // Compute A*evecr - lambda*evecr
        Breal(0,0) = evals[i].realpart;
        MVTraits::MvTimesMatAddMv( -1.0, *evecr, Breal, 1.0, *tempAevec );

        // Compute the norm of the residual and increment counter
        MVTraits::MvNorm( *tempAevec, resnorm );
        normA[i] = resnorm[0] / Teuchos::ScalarTraits<MagnitudeType>::magnitude( evals[i].realpart );
        i++;
      } else {
        // Get a view of the real part of the eigenvector (evecr)
        curind[0] = i;
        evecr = MVTraits::CloneView( *evecs, curind );

        // Get a copy of A*evecr
        tempAevec = MVTraits::CloneCopy( Aevec, curind );

        // Get a view of the imaginary part of the eigenvector (eveci)
        curind[0] = i+1;
        eveci = MVTraits::CloneView( *evecs, curind );

        // Set the eigenvalue into Breal and Bimag
        Breal(0,0) = evals[i].realpart;
        Bimag(0,0) = evals[i].imagpart;

        // Compute A*evecr - evecr*lambdar + eveci*lambdai
        MVTraits::MvTimesMatAddMv( -1.0, *evecr, Breal, 1.0, *tempAevec );
        MVTraits::MvTimesMatAddMv( 1.0, *eveci, Bimag, 1.0, *tempAevec );
        MVTraits::MvNorm( *tempAevec, tempnrm );

        // Get a copy of A*eveci
        tempAevec = MVTraits::CloneCopy( Aevec, curind );

        // Compute A*eveci - eveci*lambdar - evecr*lambdai
        MVTraits::MvTimesMatAddMv( -1.0, *evecr, Bimag, 1.0, *tempAevec );
        MVTraits::MvTimesMatAddMv( -1.0, *eveci, Breal, 1.0, *tempAevec );
        MVTraits::MvNorm( *tempAevec, resnorm );

        // Compute the norms and scale by magnitude of eigenvalue
        normA[i] = lapack.LAPY2( tempnrm[0], resnorm[0] ) /
          lapack.LAPY2( evals[i].realpart, evals[i].imagpart );
        normA[i+1] = normA[i];

        i=i+2;
      }
    }

    // Output computed eigenvalues and their direct residuals
    if (verbose && MyPID==0) {
      std::cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      std::cout<<std::endl<< "Actual Residuals"<<std::endl;
      std::cout<< std::setw(16) << "Real Part"
          << std::setw(16) << "Imag Part"
          << std::setw(20) << "Direct Residual"<< std::endl;
      std::cout<<"-----------------------------------------------------------"<<std::endl;
      for (int j=0; j<numev; j++) {
        std::cout<< std::setw(16) << evals[j].realpart
            << std::setw(16) << evals[j].imagpart
            << std::setw(20) << normA[j] << std::endl;
        if ( normA[j] > tol ) {
          testFailed = true;
        }
      }
      std::cout<<"-----------------------------------------------------------"<<std::endl;
    }
  }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  if (testFailed) {
    if (verbose && MyPID==0) {
      std::cout << "End Result: TEST FAILED" << std::endl;
    }
    return -1;
  }
  //
  // Default return value
  //
  if (verbose && MyPID==0) {
    std::cout << "End Result: TEST PASSED" << std::endl;
  }

  return 0;
}
int main(int argc, char *argv[]) {
  int i;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  // Number of dimension of the domain
  int space_dim = 2;
  
  // Size of each of the dimensions of the domain
  std::vector<double> brick_dim( space_dim );
  brick_dim[0] = 1.0;
  brick_dim[1] = 1.0;
  
  // Number of elements in each of the dimensions of the domain
  std::vector<int> elements( space_dim );
  elements[0] = 10;
  elements[1] = 10;
  
  // Create problem
  Teuchos::RCP<ModalProblem> testCase = Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );
  
  // Get the stiffness and mass matrices
  Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
  Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );
	
  //
  // *******************************************************
  // Set up Amesos direct solver for inner iteration
  // *******************************************************
  //

  // Create the shifted system K - sigma * M.
  // For the buckling transformation, this shift must be nonzero.

  double sigma = 1.0;
  Epetra_CrsMatrix Kcopy( *K );

  int addErr = EpetraExt::MatrixMatrix::Add( *M, false, -sigma, Kcopy, 1.0 );
  if (addErr != 0) {
    if (MyPID == 0) {
      std::cout << "EpetraExt::MatrixMatrix::Add returned with error: " << addErr << std::endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Create Epetra linear problem class to solve "x = b"
  Epetra_LinearProblem AmesosProblem;
  AmesosProblem.SetOperator(&Kcopy);
  
  // Create Amesos factory and solver for solving "(K - sigma*M)x = b" using a direct factorization
  Amesos amesosFactory;
  Teuchos::RCP<Amesos_BaseSolver> AmesosSolver = 
    Teuchos::rcp( amesosFactory.Create( "Klu", AmesosProblem ) );

  // The AmesosBucklingOp class assumes that the symbolic/numeric factorizations have already
  // been performed on the linear problem.
  AmesosSolver->SymbolicFactorization();
  AmesosSolver->NumericFactorization();

  //
  // ************************************
  // Start the block Arnoldi iteration
  // ************************************
  //
  //  Variables used for the Block Arnoldi Method
  //
  int nev = 10;
  int blockSize = 3;  
  int numBlocks = 3*nev/blockSize;
  int maxRestarts = 5;
  //int step = 5;
  double tol = 1.0e-8;
  std::string which = "LM";
  int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;
  //
  // Create parameter list to pass into solver
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Num Blocks", numBlocks );
  MyPL.set( "Maximum Restarts", maxRestarts );
  MyPL.set( "Convergence Tolerance", tol );
  //MyPL.set( "Step Size", step );
  
  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, MV> MVT;
  typedef Anasazi::OperatorTraits<double, MV, OP> OPT;
  
  // Create an Epetra_MultiVector for an initial vector to start the solver.
  // Note:  This needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->Map(), blockSize) );
  MVT::MvRandom( *ivec );
  
  // Create the Epetra_Operator for the buckling transformation using the Amesos direct solver.
  Teuchos::RCP<AmesosBucklingOp> BucklingOp 
    = Teuchos::rcp( new AmesosBucklingOp(AmesosProblem, AmesosSolver, K) );
  
  Teuchos::RCP<Anasazi::BasicEigenproblem<double,MV,OP> > MyProblem = 
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double,MV,OP>(BucklingOp, K, ivec) );
  
  // Inform the eigenproblem that the matrix pencil (K,M) is symmetric
  MyProblem->setHermitian(true);
  
  // Set the number of eigenvalues requested 
  MyProblem->setNEV( nev );
  
  // Inform the eigenproblem that you are finished passing it information
  bool boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (MyPID == 0) {
      std::cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << std::endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the Block Arnoldi solver
  Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);
  
  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  if (returnCode != Anasazi::Converged && MyPID==0) {
    std::cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << std::endl;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;
  int numev = sol.numVecs;
  
  if (numev > 0) {

    // Undo buckling transformation; computed eigenvalues are real
    std::vector<double> compEvals(numev);
    for (i=0; i<numev; ++i) {
      compEvals[i] = sigma*evals[i].realpart/(evals[i].realpart-1.0);
    }
    
    // Remember, eigenvectors are constructed K-orthogonal to preserve symmetry,
    // so numerator of the Rayleigh quotient is 1.0.
    Teuchos::SerialDenseMatrix<int,double> dmatr(numev,numev);
    Epetra_MultiVector tempvec(M->Map(), MVT::GetNumberVecs( *evecs ));
    OPT::Apply( *M, *evecs, tempvec );
    MVT::MvTransMv( 1.0, tempvec, *evecs, dmatr );
    
    if (MyPID==0) {
      double rq_eval = 0.0;
      std::cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      std::cout<<"Actual Eigenvalues (obtained by Rayleigh quotient) : "<<std::endl;
      std::cout<<"------------------------------------------------------"<<std::endl;
      std::cout<<std::setw(16)<<"Real Part"
        <<std::setw(16)<<"Rayleigh Error"<<std::endl;
      std::cout<<"------------------------------------------------------"<<std::endl;
      for (i=0; i<numev; i++) {
        rq_eval = 1.0 / dmatr(i,i);
        std::cout<<std::setw(16)<<rq_eval
          <<std::setw(16)<<Teuchos::ScalarTraits<double>::magnitude(rq_eval-compEvals[i])
          <<std::endl;
      }
      std::cout<<"------------------------------------------------------"<<std::endl;
    }
    
  }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return 0;
}
int main(int argc, char *argv[]) {
  int i, j, info;
  const double one = 1.0;
  const double zero = 0.0;
  Teuchos::LAPACK<int,double> lapack;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  //  Dimension of the matrix
  int m = 500;
  int n = 100;

  // Construct a Map that puts approximately the same number of
  // equations on each processor.

  Epetra_Map RowMap(m, 0, Comm);
  Epetra_Map ColMap(n, 0, Comm);

  // Get update list and number of local equations from newly created Map.

  int NumMyRowElements = RowMap.NumMyElements();
  
  std::vector<int> MyGlobalRowElements(NumMyRowElements);
  RowMap.MyGlobalElements(&MyGlobalRowElements[0]);

  /* We are building an m by n matrix with entries
    
              A(i,j) = k*(si)*(tj - 1) if i <= j
               = k*(tj)*(si - 1) if i  > j
  
     where si = i/(m+1) and tj = j/(n+1) and k = 1/(n+1).
  */

  // Create an Epetra_Matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, RowMap, n) );

  // Compute coefficients for discrete integral operator
  std::vector<double> Values(n);
  std::vector<int> Indices(n);
  double inv_mp1 = one/(m+1);
  double inv_np1 = one/(n+1);
  for (i=0; i<n; i++) { Indices[i] = i; }
  
  for (i=0; i<NumMyRowElements; i++) {
    //
    for (j=0; j<n; j++) {
      //
      if ( MyGlobalRowElements[i] <= j ) {
        Values[j] = inv_np1 * ( (MyGlobalRowElements[i]+one)*inv_mp1 ) * ( (j+one)*inv_np1 - one );  // k*(si)*(tj-1)
      }
      else {
        Values[j] = inv_np1 * ( (j+one)*inv_np1 ) * ( (MyGlobalRowElements[i]+one)*inv_mp1 - one );  // k*(tj)*(si-1)
      }
    }
    info = A->InsertGlobalValues(MyGlobalRowElements[i], n, &Values[0], &Indices[0]);
    assert( info==0 );
  }

  // Finish up
  info = A->FillComplete(ColMap, RowMap);
  assert( info==0 );
  info = A->OptimizeStorage();
  assert( info==0 );
  A->SetTracebackMode(1); // Shutdown Epetra Warning tracebacks

  //************************************
  // Start the block Arnoldi iteration
  //***********************************
  //
  //  Variables used for the Block Arnoldi Method
  //
  int nev = 4;
  int blockSize = 1;
  int numBlocks = 10;
  int maxRestarts = 20;
  int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;
  double tol = lapack.LAMCH('E');
  std::string which = "LM";
  //
  // Create parameter list to pass into solver
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Num Blocks", numBlocks );
  MyPL.set( "Maximum Restarts", maxRestarts );
  MyPL.set( "Convergence Tolerance", tol );

  typedef Anasazi::MultiVec<double> MV;
  typedef Anasazi::Operator<double> OP;

  // Create an Anasazi::EpetraMultiVec for an initial vector to start the solver. 
  // Note:  This needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Anasazi::EpetraMultiVec> ivec = Teuchos::rcp( new Anasazi::EpetraMultiVec(ColMap, blockSize) );
  ivec->MvRandom();

  // Call the constructor for the (A^T*A) operator
  Teuchos::RCP<Anasazi::EpetraSymOp>  Amat = Teuchos::rcp( new Anasazi::EpetraSymOp(A) );  
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(Amat, ivec) );

  // Inform the eigenproblem that the matrix A is symmetric
  MyProblem->setHermitian(true);

  // Set the number of eigenvalues requested and the blocksize the solver should use
  MyProblem->setNEV( nev );

  // Inform the eigenproblem that you are finished passing it information
  bool boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (MyPID == 0) {
      cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the Block Arnoldi solver
  Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);
  
  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  if (returnCode != Anasazi::Converged && MyPID==0) {
    cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  int numev = sol.numVecs;

  if (numev > 0) {
    
    // Compute singular values/vectors and direct residuals.
    //
    // Compute singular values which are the square root of the eigenvalues
    if (MyPID==0) {
      cout<<"------------------------------------------------------"<<endl;
      cout<<"Computed Singular Values: "<<endl;
      cout<<"------------------------------------------------------"<<endl;
    }
    for (i=0; i<numev; i++) { evals[i].realpart = Teuchos::ScalarTraits<double>::squareroot( evals[i].realpart ); }
    //
    // Compute left singular vectors :  u = Av/sigma
    //
    std::vector<double> tempnrm(numev), directnrm(numev);
    std::vector<int> index(numev);
    for (i=0; i<numev; i++) { index[i] = i; }
    Anasazi::EpetraMultiVec Av(RowMap,numev), u(RowMap,numev);
    Anasazi::EpetraMultiVec* evecs = dynamic_cast<Anasazi::EpetraMultiVec* >(sol.Evecs->CloneViewNonConst( index ));
    Teuchos::SerialDenseMatrix<int,double> S(numev,numev);
    A->Apply( *evecs, Av );
    Av.MvNorm( tempnrm );
    for (i=0; i<numev; i++) { S(i,i) = one/tempnrm[i]; };
    u.MvTimesMatAddMv( one, Av, S, zero );
    //
    // Compute direct residuals : || Av - sigma*u ||
    //
    for (i=0; i<numev; i++) { S(i,i) = evals[i].realpart; }
    Av.MvTimesMatAddMv( -one, u, S, one );
    Av.MvNorm( directnrm );
    if (MyPID==0) {
      cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      cout<<std::setw(16)<<"Singular Value"
        <<std::setw(20)<<"Direct Residual"
        <<endl;
      cout<<"------------------------------------------------------"<<endl;
      for (i=0; i<numev; i++) {
        cout<<std::setw(16)<<evals[i].realpart
          <<std::setw(20)<<directnrm[i] 
          <<endl;
      }  
      cout<<"------------------------------------------------------"<<endl;
    }
  }
  
#ifdef EPETRA_MPI
    MPI_Finalize() ;
#endif
  
  return 0;
}