int main( int argc, char* argv[] )
{

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Create command line processor
  Teuchos::CommandLineProcessor RBGen_CLP;
  RBGen_CLP.recogniseAllOptions( false );
  RBGen_CLP.throwExceptions( false );

  // Generate list of acceptable command line options
  bool verbose = false;
  std::string xml_file = "";
  RBGen_CLP.setOption("verbose", "quiet", &verbose, "Print messages and results.");
  RBGen_CLP.setOption("xml-file", &xml_file, "XML Input File");

  // Process command line.
  Teuchos::CommandLineProcessor::EParseCommandLineReturn
    parseReturn= RBGen_CLP.parse( argc, argv );
  if( parseReturn == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED ) {
    return 0;
  }
  if( parseReturn != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL   ) {
#ifdef EPETRA_MPI
    MPI_Finalize();
#endif
    return -1; // Error!
  }

  // Check to make sure an XML input file was provided
  TEUCHOS_TEST_FOR_EXCEPTION(xml_file == "", std::invalid_argument, "ERROR:  An XML file was not provided; use --xml-file to provide an XML input file for this RBGen driver.");

  Teuchos::Array<Teuchos::RCP<Teuchos::Time> > timersRBGen;
  //
  // ---------------------------------------------------------------
  //  CREATE THE INITIAL PARAMETER LIST FROM THE INPUT XML FILE
  // ---------------------------------------------------------------
  //
  Teuchos::RCP<Teuchos::ParameterList> BasisParams = RBGen::createParams( xml_file );
  if (verbose && Comm.MyPID() == 0) 
  {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"Input Parameter List: "<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
    BasisParams->print();
  } 
  //
  // ---------------------------------------------------------------
  //  CREATE THE FILE I/O HANDLER
  // ---------------------------------------------------------------
  //
  //  - First create the abstract factory for the file i/o handler.
  //
  RBGen::EpetraMVFileIOFactory fio_factory;
  //
  //  - Then use the abstract factory to create the file i/o handler specified in the parameter list.
  //
  Teuchos::RCP<Teuchos::Time> timerFileIO = Teuchos::rcp( new Teuchos::Time("Create File I/O Handler") );
  timersRBGen.push_back( timerFileIO );
  //
  Teuchos::RCP< RBGen::FileIOHandler<Epetra_MultiVector> > mvFileIO;
  Teuchos::RCP< RBGen::FileIOHandler<Epetra_Operator> > opFileIO =
    Teuchos::rcp( new RBGen::EpetraCrsMatrixFileIOHandler() ); 
  {
    Teuchos::TimeMonitor lcltimer( *timerFileIO );
    mvFileIO = fio_factory.create( *BasisParams );
    //					    
    // Initialize file IO handlers
    //
    mvFileIO->Initialize( BasisParams );
    opFileIO->Initialize( BasisParams );
  }    
  if (verbose && Comm.MyPID() == 0) 
  {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"File I/O Handlers Generated"<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
  }
  //
  // ---------------------------------------------------------------
  //  READ IN THE DATA SET / SNAPSHOT SET & PREPROCESS
  //  ( this will be a separate abstract class type )
  // ---------------------------------------------------------------
  //
  Teuchos::RCP<std::vector<std::string> > filenames = RBGen::genFileList( *BasisParams );
  Teuchos::RCP<Teuchos::Time> timerSnapshotIn = Teuchos::rcp( new Teuchos::Time("Reading in Snapshot Set") );
  timersRBGen.push_back( timerSnapshotIn );
  //
  Teuchos::RCP<Epetra_MultiVector> testMV;
  {
    Teuchos::TimeMonitor lcltimer( *timerSnapshotIn );
    testMV = mvFileIO->Read( *filenames );
  } 

  RBGen::EpetraMVPreprocessorFactory preprocess_factory;

  Teuchos::RCP<Teuchos::Time> timerCreatePreprocessor = Teuchos::rcp( new Teuchos::Time("Create Preprocessor") );
  timersRBGen.push_back( timerCreatePreprocessor );
  Teuchos::RCP<RBGen::Preprocessor<Epetra_MultiVector> > prep;
  {
    Teuchos::TimeMonitor lcltimer( *timerCreatePreprocessor );
    prep = preprocess_factory.create( *BasisParams );
    //
    // Initialize preprocessor.
    //
    prep->Initialize( BasisParams, mvFileIO );
  }

  Teuchos::RCP<Teuchos::Time> timerPreprocess = Teuchos::rcp( new Teuchos::Time("Preprocess Snapshot Set") );  
  timersRBGen.push_back( timerPreprocess );
  {
    Teuchos::TimeMonitor lcltimer( *timerPreprocess );
    prep->Preprocess( testMV );
  }

  if (verbose && Comm.MyPID() == 0) 
  {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"Snapshot Set Imported and Preprocessed"<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
  }
  //
  // ---------------------------------------------------------------
  //  COMPUTE THE REDUCED BASIS
  // ---------------------------------------------------------------
  //
  //  - First create the abstract factory for the reduced basis methods.
  //
  RBGen::EpetraMVMethodFactory mthd_factory;
  //
  //  - Then use the abstract factory to create the method specified in the parameter list.
  //
  Teuchos::RCP<Teuchos::Time> timerCreateMethod = Teuchos::rcp( new Teuchos::Time("Create Reduced Basis Method") );
  timersRBGen.push_back( timerCreateMethod );
  Teuchos::RCP<RBGen::Method<Epetra_MultiVector,Epetra_Operator> > method;
  {
    Teuchos::TimeMonitor lcltimer( *timerCreateMethod );  
    method = mthd_factory.create( *BasisParams );
    //
    // Initialize reduced basis method.
    //
    method->Initialize( BasisParams, testMV, opFileIO );
  }
  //
  //  - Call the computeBasis method on the reduced basis method object.
  //
  Teuchos::RCP<Teuchos::Time> timerComputeBasis = Teuchos::rcp( new Teuchos::Time("Reduced Basis Computation") );
  timersRBGen.push_back( timerComputeBasis );
  {
    Teuchos::TimeMonitor lcltimer( *timerComputeBasis );  
    method->computeBasis();
  }
  //
  //  - Retrieve the computed basis from the method object.
  //
  Teuchos::RCP<const Epetra_MultiVector> basisMV = method->getBasis();
  //
  //  Since we're using a POD method, we can dynamic cast to get the singular values.
  //
  Teuchos::RCP<RBGen::PODMethod<double> > pod_method = Teuchos::rcp_dynamic_cast<RBGen::PODMethod<double> >( method );
  const std::vector<double> sv = pod_method->getSingularValues();
  //
  if (verbose && Comm.MyPID() == 0) {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"Computed Singular Values : "<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
    for (unsigned int i=0; i<sv.size(); ++i) { std::cout << sv[i] << std::endl; }
  }      
  
  if (Comm.MyPID() == 0) {
    std::cout<<"-------------------------------------------------------"<<std::endl;
    std::cout<<"RBGen Computation Time Breakdown (seconds) : "<<std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
    for (unsigned int i=0; i<timersRBGen.size(); ++i)
      std::cout << std::left << std::setw(40) << timersRBGen[i]->name() << " : "
	   << std::setw(15) << timersRBGen[i]->totalElapsedTime() << std::endl;
    std::cout<<"-------------------------------------------------------"<<std::endl;
  }
  //
  // ---------------------------------------------------------------
  //  POSTPROCESS BASIS (not necessary right now)
  // ---------------------------------------------------------------
  //
  //
  // ---------------------------------------------------------------
  //  WRITE OUT THE REDUCED BASIS
  // ---------------------------------------------------------------
  //
  if ( BasisParams->isSublist( "File IO" ) ) {
    Teuchos::ParameterList fileio_params = BasisParams->sublist( "File IO" );
    if ( fileio_params.isParameter( "Reduced Basis Output File" ) ) {
      std::string outfile = Teuchos::getParameter<std::string>( fileio_params, "Reduced Basis Output File" );
      mvFileIO->Write( basisMV, outfile );
    }
  }
  //
#ifdef EPETRA_MPI
  // Finalize MPI
  MPI_Finalize();
#endif

  return 0;
}
int main(int argc, char *argv[])
{

  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Check verbosity level
  bool verbose = false;
  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else
    NumGlobalElements = 101;

  // The number of unknowns must be at least equal to the
  // number of processors.
  if (NumGlobalElements < NumProc) {
    std::cout << "numGlobalBlocks = " << NumGlobalElements
      << " cannot be < number of processors = " << NumProc << std::endl;
    std::cout << "Test failed!" << std::endl;
    throw "NOX Error";
  }

  bool success = false;

  try {
    // Create the interface between NOX and the application
    // This object is derived from NOX::Epetra::Interface
    Teuchos::RCP<Interface> interface =
      Teuchos::rcp(new Interface(NumGlobalElements, Comm));

    // Set the PDE factor (for nonlinear forcing term).  This could be specified
    // via user input.
    interface->setPDEfactor(1000.0);

    // Begin Nonlinear Solver ************************************

    // Create the top level parameter list
    Teuchos::RCP<Teuchos::ParameterList> IfpackParamsPtr =
      Teuchos::rcp(new Teuchos::ParameterList);

    // Set the printing parameters in the "Printing" sublist
    Teuchos::ParameterList printParams;
    printParams.set("MyPID", MyPID);
    printParams.set("Output Precision", 3);
    printParams.set("Output Processor", 0);
    if (verbose)
      printParams.set("Output Information",
          NOX::Utils::OuterIteration +
          NOX::Utils::OuterIterationStatusTest +
          NOX::Utils::InnerIteration +
          NOX::Utils::LinearSolverDetails +
          NOX::Utils::Parameters +
          NOX::Utils::Details +
          NOX::Utils::Warning +
          NOX::Utils::Debug +
          NOX::Utils::TestDetails +
          NOX::Utils::Error);
    else
      printParams.set("Output Information", NOX::Utils::Error +
          NOX::Utils::TestDetails);

    // Create a print class for controlling output below
    NOX::Utils p(printParams);


    // *******************************
    // Setup Test Objects
    // *******************************

    // Create Linear Objects
    // Get the vector from the Problem
    if (verbose)
      p.out() << "Creating Vectors and Matrices" << std::endl;
    Teuchos::RCP<Epetra_Vector> solution_vec =
      interface->getSolution();
    Teuchos::RCP<Epetra_Vector> rhs_vec =
      Teuchos::rcp(new Epetra_Vector(*solution_vec));
    Teuchos::RCP<Epetra_Vector> lhs_vec =
      Teuchos::rcp(new Epetra_Vector(*solution_vec));
    Teuchos::RCP<Epetra_CrsMatrix> jacobian_matrix =
      interface->getJacobian();


    if (verbose)
      p.out() << "Evaluating F and J" << std::endl;
    solution_vec->PutScalar(1.0);
    interface->computeF(*solution_vec, *rhs_vec);
    rhs_vec->Scale(-1.0);
    interface->computeJacobian(*solution_vec, *jacobian_matrix);

    double norm =0.0;
    rhs_vec->Norm2(&norm);
    if (verbose)
      p.out() << "Step 0, ||F|| = " << norm << std::endl;


    if (verbose)
      p.out() << "Creating Ifpack preconditioner" << std::endl;

    Ifpack Factory;
    Teuchos::RCP<Ifpack_Preconditioner> PreconditionerPtr;
    PreconditionerPtr = Teuchos::rcp(Factory.Create("ILU",
          jacobian_matrix.get(),0));
    Teuchos::ParameterList teuchosParams;
    PreconditionerPtr->SetParameters(teuchosParams);
    PreconditionerPtr->Initialize();
    PreconditionerPtr->Compute();


    if (verbose)
      p.out() << "Creating Aztec Solver" << std::endl;

    Teuchos::RCP<AztecOO> aztecSolverPtr = Teuchos::rcp(new AztecOO());
    if (verbose)
      aztecSolverPtr->SetAztecOption(AZ_output, AZ_last);
    else
      aztecSolverPtr->SetAztecOption(AZ_output, AZ_none);

    // *******************************
    // Reuse Test
    // *******************************

    if (verbose) {
      p.out() << "**********************************************" << std::endl;
      p.out() << "Testing Newton solve with prec reuse" << std::endl;
      p.out() << "**********************************************" << std::endl;
    }

    int step_number = 0;
    int max_steps = 20;
    bool converged = false;
    int total_linear_iterations = 0;
    while (norm > 1.0e-8 && step_number < max_steps) {

      step_number++;

      if (verbose)
        p.out() << "Step " << step_number << ", ||F|| = " << norm << std::endl;

      aztecSolverPtr->SetUserMatrix(jacobian_matrix.get(), false);
      aztecSolverPtr->SetPrecOperator(PreconditionerPtr.get());
      aztecSolverPtr->SetRHS(rhs_vec.get());
      aztecSolverPtr->SetLHS(lhs_vec.get());

      aztecSolverPtr->Iterate(400, 1.0e-4);

      solution_vec->Update(1.0, *lhs_vec, 1.0);

      interface->computeF(*solution_vec, *rhs_vec);
      rhs_vec->Scale(-1.0);
      interface->computeJacobian(*solution_vec, *jacobian_matrix);

      rhs_vec->Norm2(&norm);

      total_linear_iterations += aztecSolverPtr->NumIters();

      if (norm < 1.0e-8)
        converged = true;
    }

    if (verbose) {
      p.out() << "Final Step " << step_number << ", ||F|| = " << norm << std::endl;
      if (converged)
        p.out() << "Converged!!" << std::endl;
      else
        p.out() << "Failed!!" << std::endl;
    }

    // Tests
    int status = 0; // Converged

    if (verbose)
      p.out() << "Total Number of Linear Iterations = "
        << total_linear_iterations << std::endl;

    if (Comm.NumProc() == 1 && total_linear_iterations != 157)
      status = 1;

    if (!converged)
      status = 2;

    success = converged && status == 0;

    // Summarize test results
    if (success)
      p.out() << "Test passed!" << std::endl;
    else
      p.out() << "Test failed!" << std::endl;

    if (verbose)
      p.out() << "Status = " << status << std::endl;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}
int main(int argc, char *argv[]) {
  using std::cout;
  using std::endl;
  int i;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  // Number of dimension of the domain
  int space_dim = 2;

  // Size of each of the dimensions of the domain
  std::vector<double> brick_dim( space_dim );
  brick_dim[0] = 1.0;
  brick_dim[1] = 1.0;

  // Number of elements in each of the dimensions of the domain
  std::vector<int> elements( space_dim );
  elements[0] = 10;
  elements[1] = 10;

  // Create problem
  Teuchos::RCP<ModalProblem> testCase = Teuchos::rcp( new ModeLaplace2DQ2(Comm, brick_dim[0], elements[0], brick_dim[1], elements[1]) );

  // Get the stiffness and mass matrices
  Teuchos::RCP<Epetra_CrsMatrix> K = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getStiffness()), false );
  Teuchos::RCP<Epetra_CrsMatrix> M = Teuchos::rcp( const_cast<Epetra_CrsMatrix *>(testCase->getMass()), false );

  //
  // ************Construct preconditioner*************
  //
  Teuchos::ParameterList ifpackList;

  // allocates an IFPACK factory. No data is associated
  // to this object (only method Create()).
  Ifpack Factory;

  // create the preconditioner. For valid PrecType values,
  // please check the documentation
  std::string PrecType = "ICT"; // incomplete Cholesky
  int OverlapLevel = 0; // must be >= 0. If Comm.NumProc() == 1,
                        // it is ignored.

  Teuchos::RCP<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*K, OverlapLevel) );
  assert(Prec != Teuchos::null);

  // specify parameters for ICT
  ifpackList.set("fact: drop tolerance", 1e-4);
  ifpackList.set("fact: ict level-of-fill", 0.);
  // the combine mode is on the following:
  // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax"
  // Their meaning is as defined in file Epetra_CombineMode.h
  ifpackList.set("schwarz: combine mode", "Add");
  // sets the parameters
  IFPACK_CHK_ERR(Prec->SetParameters(ifpackList));

  // initialize the preconditioner. At this point the matrix must
  // have been FillComplete()'d, but actual values are ignored.
  IFPACK_CHK_ERR(Prec->Initialize());

  // Builds the preconditioners, by looking for the values of
  // the matrix.
  IFPACK_CHK_ERR(Prec->Compute());

  //
  //*******************************************************/
  // Set up Belos Block CG operator for inner iteration
  //*******************************************************/
  //
  int blockSize = 3; // block size used by linear solver and eigensolver [ not required to be the same ]
  int maxits = K->NumGlobalRows(); // maximum number of iterations to run
  //
  // Create the Belos::LinearProblem
  //
  Teuchos::RCP<Belos::LinearProblem<double,Epetra_MultiVector,Epetra_Operator> >
    My_LP = Teuchos::rcp( new Belos::LinearProblem<double,Epetra_MultiVector,Epetra_Operator>() );
  My_LP->setOperator( K );

  // Create the Belos preconditioned operator from the Ifpack preconditioner.
  // NOTE:  This is necessary because Belos expects an operator to apply the
  //        preconditioner with Apply() NOT ApplyInverse().
  Teuchos::RCP<Epetra_Operator> belosPrec = Teuchos::rcp( new Epetra_InvOperator( Prec.get() ) );
  My_LP->setLeftPrec( belosPrec );
  //
  // Create the ParameterList for the Belos Operator
  //
  Teuchos::RCP<Teuchos::ParameterList> My_List = Teuchos::rcp( new Teuchos::ParameterList() );
  My_List->set( "Solver", "BlockCG" );
  My_List->set( "Maximum Iterations", maxits );
  My_List->set( "Block Size", 1 );
  My_List->set( "Convergence Tolerance", 1e-12 );
  //
  // Create the Belos::EpetraOperator
  //
  Teuchos::RCP<Belos::EpetraOperator> BelosOp =
    Teuchos::rcp( new Belos::EpetraOperator( My_LP, My_List ));
  //
  // ************************************
  // Start the block Arnoldi iteration
  // ************************************
  //
  //  Variables used for the Block Arnoldi Method
  //
  double tol = 1.0e-8;
  int nev = 10;
  int numBlocks = 3*nev/blockSize;
  int maxRestarts = 5;
  //int step = 5;
  std::string which = "LM";
  int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;
  //
  // Create parameter list to pass into solver
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Num Blocks", numBlocks );
  MyPL.set( "Maximum Restarts", maxRestarts );
  MyPL.set( "Convergence Tolerance", tol );
  //MyPL.set( "Step Size", step );

  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Anasazi::MultiVecTraits<double, MV> MVT;
  typedef Anasazi::OperatorTraits<double, MV, OP> OPT;

  // Create an Epetra_MultiVector for an initial vector to start the solver.
  // Note:  This needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Epetra_MultiVector> ivec = Teuchos::rcp( new Epetra_MultiVector(K->Map(), blockSize) );
  MVT::MvRandom( *ivec );

  // Call the ctor that calls the petra ctor for a matrix
  Teuchos::RCP<Anasazi::EpetraGenOp> Aop = Teuchos::rcp( new Anasazi::EpetraGenOp(BelosOp, M, false) );

  Teuchos::RCP<Anasazi::BasicEigenproblem<double,MV,OP> > MyProblem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double,MV,OP>(Aop, M, ivec) );

  // Inform the eigenproblem that the matrix pencil (K,M) is symmetric
  MyProblem->setHermitian(true);

  // Set the number of eigenvalues requested
  MyProblem->setNEV( nev );

  // Inform the eigenproblem that you are finished passing it information
  bool boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (MyPID == 0) {
      cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the Block Arnoldi solver
  Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);

  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  if (returnCode != Anasazi::Converged && MyPID==0) {
    cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  Teuchos::RCP<MV> evecs = sol.Evecs;
  int numev = sol.numVecs;

  if (numev > 0) {

    Teuchos::SerialDenseMatrix<int,double> dmatr(numev,numev);
    Epetra_MultiVector tempvec(K->Map(), MVT::GetNumberVecs( *evecs ));
    OPT::Apply( *K, *evecs, tempvec );
    MVT::MvTransMv( 1.0, tempvec, *evecs, dmatr );

    if (MyPID==0) {
      double compeval = 0.0;
      cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      cout<<"Actual Eigenvalues (obtained by Rayleigh quotient) : "<<endl;
      cout<<"------------------------------------------------------"<<endl;
      cout<<std::setw(16)<<"Real Part"
        <<std::setw(16)<<"Rayleigh Error"<<endl;
      cout<<"------------------------------------------------------"<<endl;
      for (i=0; i<numev; i++) {
        compeval = dmatr(i,i);
        cout<<std::setw(16)<<compeval
          <<std::setw(16)<<Teuchos::ScalarTraits<double>::magnitude(compeval-1.0/evals[i].realpart)
          <<endl;
      }
      cout<<"------------------------------------------------------"<<endl;
    }

  }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return 0;
}