Beispiel #1
0
/* ml_epetra_data_pack_status - This function does a status query on the
   ML_EPETRA_DATA_PACK passed in.
   Returns: IS_TRUE
*/
int ml_epetra_data_pack::status(){
  mexPrintf("**** Problem ID %d [ML_Epetra] ****\n",id);
  if(A) mexPrintf("Matrix: %dx%d w/ %d nnz\n",A->NumGlobalRows(),A->NumGlobalCols(),A->NumMyNonzeros()); 
  mexPrintf(" Operator complexity = %e\n",operator_complexity);
  if(List){mexPrintf("Parameter List:\n");List->print(cout,1);}
  mexPrintf("\n");
  return IS_TRUE;
}/*end status*/
Beispiel #2
0
shared_ptr<Epetra_CrsMatrix> sparseCholesky(const Epetra_CrsMatrix &mat) {
  // Note: we assume the matrix mat is symmetric and positive-definite
  size_t size = mat.NumGlobalCols();
  if (mat.NumGlobalRows() != size)
    throw std::invalid_argument("sparseCholesky(): matrix must be square");

  int *rowOffsets = 0;
  int *colIndices = 0;
  double *values = 0;
  mat.ExtractCrsDataPointers(rowOffsets, colIndices, values);

  Epetra_SerialComm comm;
  Epetra_LocalMap rowMap(static_cast<int>(size), 0 /* index_base */, comm);
  Epetra_LocalMap columnMap(static_cast<int>(size), 0 /* index_base */, comm);
  shared_ptr<Epetra_CrsMatrix> result = boost::make_shared<Epetra_CrsMatrix>(
      Copy, rowMap, columnMap, mat.GlobalMaxNumEntries());

  arma::Mat<double> localMat;
  arma::Mat<double> localCholesky;
  std::vector<bool> processed(size, false);
  for (size_t r = 0; r < size; ++r) {
    if (processed[r])
      continue;
    int localSize = rowOffsets[r + 1] - rowOffsets[r];
    localMat.set_size(localSize, localSize);
    localMat.fill(0.);
    localCholesky.set_size(localSize, localSize);
    for (int s = 0; s < localSize; ++s) {
      int row = colIndices[rowOffsets[r] + s];
      for (int c = 0; c < localSize; ++c) {
        int col = colIndices[rowOffsets[row] + c];
        if (col != colIndices[rowOffsets[r] + c])
          throw std::invalid_argument("sparseCholesky(): matrix is not "
                                      "block-diagonal");
        localMat(s, c) = values[rowOffsets[row] + c];
      }
    }
    assert(arma::norm(localMat - localMat.t(), "fro") <
           1e-12 * arma::norm(localMat, "fro"));
    localCholesky = arma::chol(localMat); // localCholesky: U
    for (int s = 0; s < localSize; ++s) {
      int row = colIndices[rowOffsets[r] + s];
      processed[row] = true;
#ifndef NDEBUG
      int errorCode =
#endif
          result->InsertGlobalValues(row, s + 1 /* number of values */,
                                     localCholesky.colptr(s),
                                     colIndices + rowOffsets[r]);
      assert(errorCode == 0);
    }
  }
  result->FillComplete(columnMap, rowMap);

  return result;
}
Beispiel #3
0
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
    Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
    Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
    Epetra_SerialComm Comm;
#endif
    int nProcs, myPID ;
    Teuchos::ParameterList pLUList ;        // ParaLU parameters
    Teuchos::ParameterList isoList ;        // Isorropia parameters
    Teuchos::ParameterList shyLUList ;    // shyLU parameters
    Teuchos::ParameterList ifpackList ;    // shyLU parameters
    string ipFileName = "ShyLU.xml";       // TODO : Accept as i/p

    nProcs = mpiSession.getNProc();
    myPID = Comm.MyPID();

    if (myPID == 0)
    {
        cout <<"Parallel execution: nProcs="<< nProcs << endl;
    }

    // =================== Read input xml file =============================
    Teuchos::updateParametersFromXmlFile(ipFileName, &pLUList);
    isoList = pLUList.sublist("Isorropia Input");
    shyLUList = pLUList.sublist("ShyLU Input");
    shyLUList.set("Outer Solver Library", "AztecOO");
    // Get matrix market file name
    string MMFileName = Teuchos::getParameter<string>(pLUList, "mm_file");
    string prec_type = Teuchos::getParameter<string>(pLUList, "preconditioner");
    int maxiters = Teuchos::getParameter<int>(pLUList, "Outer Solver MaxIters");
    double tol = Teuchos::getParameter<double>(pLUList, "Outer Solver Tolerance");
    string rhsFileName = pLUList.get<string>("rhs_file", "");

    if (myPID == 0)
    {
        cout << "Input :" << endl;
        cout << "ParaLU params " << endl;
        pLUList.print(std::cout, 2, true, true);
        cout << "Matrix market file name: " << MMFileName << endl;
    }

    // ==================== Read input Matrix ==============================
    Epetra_CrsMatrix *A;
    Epetra_MultiVector *b1;

    int err = EpetraExt::MatrixMarketFileToCrsMatrix(MMFileName.c_str(), Comm,
                                                        A);
    //EpetraExt::MatlabFileToCrsMatrix(MMFileName.c_str(), Comm, A);
    //assert(err != 0);
    //cout <<"Done reading the matrix"<< endl;
    int n = A->NumGlobalRows();
    //cout <<"n="<< n << endl;

    // Create input vectors
    Epetra_Map vecMap(n, 0, Comm);
    if (rhsFileName != "")
    {
        err = EpetraExt::MatrixMarketFileToMultiVector(rhsFileName.c_str(),
                                         vecMap, b1);
    }
    else
    {
        b1 = new Epetra_MultiVector(vecMap, 1, false);
        b1->PutScalar(1.0);
    }

    Epetra_MultiVector x(vecMap, 1);
    //cout << "Created the vectors" << endl;

    // Partition the matrix with hypergraph partitioning and redisstribute
    Isorropia::Epetra::Partitioner *partitioner = new
                            Isorropia::Epetra::Partitioner(A, isoList, false);
    partitioner->partition();
    Isorropia::Epetra::Redistributor rd(partitioner);

    Epetra_CrsMatrix *newA;
    Epetra_MultiVector *newX, *newB; 
    rd.redistribute(*A, newA);
    delete A;
    A = newA;

    rd.redistribute(x, newX);
    rd.redistribute(*b1, newB);

    Epetra_LinearProblem problem(A, newX, newB);

    AztecOO solver(problem);

    ifpackList ;
    Ifpack_Preconditioner *prec;
    ML_Epetra::MultiLevelPreconditioner *MLprec;
    if (prec_type.compare("ShyLU") == 0)
    {
        prec = new Ifpack_ShyLU(A);
        prec->SetParameters(shyLUList);
        prec->Initialize();
        prec->Compute();
        //(dynamic_cast<Ifpack_ShyLU *>(prec))->JustTryIt();
        //cout << " Going to set it in solver" << endl ;
        solver.SetPrecOperator(prec);
        //cout << " Done setting the solver" << endl ;
    }
    else if (prec_type.compare("ILU") == 0)
    {
        ifpackList.set( "fact: level-of-fill", 1 );
        prec = new Ifpack_ILU(A);
        prec->SetParameters(ifpackList);
        prec->Initialize();
        prec->Compute();
        solver.SetPrecOperator(prec);
    }
    else if (prec_type.compare("ILUT") == 0)
    {
        ifpackList.set( "fact: ilut level-of-fill", 2 );
        ifpackList.set( "fact: drop tolerance", 1e-8);
        prec = new Ifpack_ILUT(A);
        prec->SetParameters(ifpackList);
        prec->Initialize();
        prec->Compute();
        solver.SetPrecOperator(prec);
    }
    else if (prec_type.compare("ML") == 0)
    {
        Teuchos::ParameterList mlList; // TODO : Take it from i/p
        MLprec = new ML_Epetra::MultiLevelPreconditioner(*A, mlList, true);
        solver.SetPrecOperator(MLprec);
    }

    solver.SetAztecOption(AZ_solver, AZ_gmres);
    solver.SetMatrixName(333);
    //solver.SetAztecOption(AZ_output, 1);
    //solver.SetAztecOption(AZ_conv, AZ_Anorm);
    //cout << "Going to iterate for the global problem" << endl;

    solver.Iterate(maxiters, tol);

    // compute ||Ax - b||
    double Norm;
    Epetra_MultiVector Ax(vecMap, 1);

    Epetra_MultiVector *newAx; 
    rd.redistribute(Ax, newAx);
    A->Multiply(false, *newX, *newAx);
    newAx->Update(1.0, *newB, -1.0);
    newAx->Norm2(&Norm);
    double ANorm = A->NormOne();

    cout << "|Ax-b |/|A| = " << Norm/ANorm << endl;

    delete newAx;
    if (prec_type.compare("ML") == 0)
    {
        delete MLprec;
    }
    else
    {
        delete prec;
    }

    delete b1;
    delete newX;
    delete newB;
    delete A;
    delete partitioner;
}
Beispiel #4
0
bool CrsMatrixInfo( const Epetra_CrsMatrix & A,
		    ostream & os ) 

{

  int MyPID = A.Comm().MyPID(); 

  // take care that matrix is already trasformed
  bool IndicesAreGlobal = A.IndicesAreGlobal();
  if( IndicesAreGlobal == true ) {
    if( MyPID == 0 ) {
      os << "WARNING : matrix must be transformed to local\n";
      os << "          before calling CrsMatrixInfo\n";
      os << "          Now returning...\n";
    }
    return false;
  }

  int NumGlobalRows = A.NumGlobalRows();
  int NumGlobalNonzeros = A.NumGlobalNonzeros();
  int NumGlobalCols = A.NumGlobalCols();
  double NormInf = A.NormInf();
  double NormOne = A.NormOne();
  int NumGlobalDiagonals = A.NumGlobalDiagonals();
  int GlobalMaxNumEntries = A.GlobalMaxNumEntries();
  int IndexBase = A.IndexBase();
  bool StorageOptimized = A.StorageOptimized();
  bool LowerTriangular = A.LowerTriangular();
  bool UpperTriangular = A.UpperTriangular();
  bool NoDiagonal = A.NoDiagonal();

  // these variables identifies quantities I have to compute,
  // since not provided by Epetra_CrsMatrix

  double MyFrobeniusNorm( 0.0 ), FrobeniusNorm( 0.0 );
  double MyMinElement( DBL_MAX ), MinElement( DBL_MAX );
  double MyMaxElement( DBL_MIN ), MaxElement( DBL_MIN );
  double MyMinAbsElement( DBL_MAX ), MinAbsElement( DBL_MAX );
  double MyMaxAbsElement( 0.0 ), MaxAbsElement( 0.0 );

  int NumMyRows = A.NumMyRows();
  int * NzPerRow = new int[NumMyRows];
  int Row; // iterator on rows
  int Col; // iterator on cols
  int MaxNumEntries = A.MaxNumEntries();
  double * Values = new double[MaxNumEntries];
  int * Indices = new int[MaxNumEntries];
  double Element, AbsElement; // generic nonzero element and its abs value
  int NumEntries;
  double * Diagonal = new double [NumMyRows];
  // SumOffDiagonal is the sum of absolute values for off-diagonals
  double * SumOffDiagonal = new double [NumMyRows];  
  for( Row=0 ;  Row<NumMyRows ; ++Row ) {
    SumOffDiagonal[Row] = 0.0;
  }
  int * IsDiagonallyDominant = new int [NumMyRows];
  int GlobalRow;

  // cycle over all matrix elements
  for( Row=0 ; Row<NumMyRows ; ++Row ) {
    GlobalRow = A.GRID(Row);
    NzPerRow[Row] = A.NumMyEntries(Row);
    A.ExtractMyRowCopy(Row,NzPerRow[Row],NumEntries,Values,Indices);
    for( Col=0 ; Col<NumEntries ; ++Col ) {
      Element = Values[Col];
      AbsElement = abs(Element);
      if( Element<MyMinElement ) MyMinElement = Element;
      if( Element>MyMaxElement ) MyMaxElement = Element;
      if( AbsElement<MyMinAbsElement ) MyMinAbsElement = AbsElement;
      if( AbsElement>MyMaxAbsElement ) MyMaxAbsElement = AbsElement;
      if( Indices[Col] == Row ) Diagonal[Row] = Element;
      else
	SumOffDiagonal[Row] += abs(Element);
      MyFrobeniusNorm += pow(Element,2);
    }
  }   

  // analise storage per row 
  int MyMinNzPerRow( NumMyRows ), MinNzPerRow( NumMyRows );
  int MyMaxNzPerRow( 0 ), MaxNzPerRow( 0 );

  for( Row=0 ; Row<NumMyRows ; ++Row ) {
    if( NzPerRow[Row]<MyMinNzPerRow ) MyMinNzPerRow=NzPerRow[Row];
    if( NzPerRow[Row]>MyMaxNzPerRow ) MyMaxNzPerRow=NzPerRow[Row];
  }

  // a test to see if matrix is diagonally-dominant

  int MyDiagonalDominance( 0 ), DiagonalDominance( 0 );
  int MyWeakDiagonalDominance( 0 ), WeakDiagonalDominance( 0 );

  for( Row=0 ; Row<NumMyRows ; ++Row ) {
    if( abs(Diagonal[Row])>SumOffDiagonal[Row] ) 
      ++MyDiagonalDominance;
    else if( abs(Diagonal[Row])==SumOffDiagonal[Row] ) 
      ++MyWeakDiagonalDominance;
  }

  // reduction operations
  
  A.Comm().SumAll(&MyFrobeniusNorm, &FrobeniusNorm, 1);
  A.Comm().MinAll(&MyMinElement, &MinElement, 1);
  A.Comm().MaxAll(&MyMaxElement, &MaxElement, 1);
  A.Comm().MinAll(&MyMinAbsElement, &MinAbsElement, 1);
  A.Comm().MaxAll(&MyMaxAbsElement, &MaxAbsElement, 1);
  A.Comm().MinAll(&MyMinNzPerRow, &MinNzPerRow, 1);
  A.Comm().MaxAll(&MyMaxNzPerRow, &MaxNzPerRow, 1);
  A.Comm().SumAll(&MyDiagonalDominance, &DiagonalDominance, 1);
  A.Comm().SumAll(&MyWeakDiagonalDominance, &WeakDiagonalDominance, 1);

  // free memory

  delete Values;
  delete Indices;
  delete Diagonal;
  delete SumOffDiagonal;
  delete IsDiagonallyDominant;
  delete NzPerRow;

  // simply no output for MyPID>0, only proc 0 write on os
  if( MyPID != 0 ) return true;

  os << "*** general Information about the matrix\n";
  os << "Number of Global Rows = " << NumGlobalRows << endl;
  os << "Number of Global Cols = " << NumGlobalCols << endl;
  os << "is the matrix square  = " <<
    ((NumGlobalRows==NumGlobalCols)?"yes":"no") << endl;
  os << "||A||_\\infty          = " << NormInf << endl;
  os << "||A||_1               = " << NormOne << endl;
  os << "||A||_F               = " << sqrt(FrobeniusNorm) << endl;
  os << "Number of nonzero diagonal entries = "
     << NumGlobalDiagonals
     << "( " << 1.0* NumGlobalDiagonals/NumGlobalRows*100
     << " %)\n";
  os << "Nonzero per row : min = " << MinNzPerRow 
     << " average = " << 1.0*NumGlobalNonzeros/NumGlobalRows
     << " max = " << MaxNzPerRow << endl; 
  os << "Maximum number of nonzero elements/row = " 
     << GlobalMaxNumEntries << endl;
  os << "min( a_{i,j} )      = " << MinElement << endl;
  os << "max( a_{i,j} )      = " << MaxElement << endl;
  os << "min( abs(a_{i,j}) ) = " << MinAbsElement << endl;
  os << "max( abs(a_{i,j}) ) = " << MaxAbsElement << endl;
  os << "Number of diagonal dominant rows        = " << DiagonalDominance 
     << " (" << 100.0*DiagonalDominance/NumGlobalRows << " % of total)\n";
  os << "Number of weakly diagonal dominant rows = " 
     << WeakDiagonalDominance 
     << " (" << 100.0*WeakDiagonalDominance/NumGlobalRows << " % of total)\n";

  os << "*** Information about the Trilinos storage\n";
  os << "Base Index                 = " << IndexBase << endl;
  os << "is storage optimized       = " 
     << ((StorageOptimized==true)?"yes":"no") << endl;
  os << "are indices global         = "
     << ((IndicesAreGlobal==true)?"yes":"no") << endl;
  os << "is matrix lower triangular = " 
     << ((LowerTriangular==true)?"yes":"no") << endl;
  os << "is matrix upper triangular = " 
     << ((UpperTriangular==true)?"yes":"no") << endl;
  os << "are there diagonal entries = " 
     <<  ((NoDiagonal==false)?"yes":"no") << endl;

  return true;

}
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
    Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
    Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
    Epetra_SerialComm Comm;
#endif
    typedef double                            ST;
    typedef Teuchos::ScalarTraits<ST>        SCT;
    typedef SCT::magnitudeType                MT;
    typedef Epetra_MultiVector                MV;
    typedef Epetra_Operator                   OP;
    typedef Belos::MultiVecTraits<ST,MV>     MVT;
    typedef Belos::OperatorTraits<ST,MV,OP>  OPT;
    using Teuchos::RCP;
    using Teuchos::rcp;


    bool success = true;
    string pass = "******";
    string fail = "End Result: TEST FAILED";



    bool verbose = false, proc_verbose = true;
    bool leftprec = false;      // left preconditioning or right.
    int frequency = -1;        // frequency of status test output.
    int blocksize = 1;         // blocksize
    int numrhs = 1;            // number of right-hand sides to solve for
    int maxrestarts = 15;      // maximum number of restarts allowed
    int maxsubspace = 25;      // maximum number of blocks the solver can use
                               // for the subspace
    char file_name[100];

    int nProcs, myPID ;
    Teuchos::RCP <Teuchos::ParameterList> pLUList ;        // ParaLU parameters
    Teuchos::ParameterList isoList ;        // Isorropia parameters
    Teuchos::ParameterList shyLUList ;    // ShyLU parameters
    string ipFileName = "ShyLU.xml";       // TODO : Accept as i/p

#ifdef HAVE_MPI
    nProcs = mpiSession.getNProc();
    myPID = Comm.MyPID();
#else
    nProcs = 1;
    myPID = 0;
#endif

    if (myPID == 0)
    {
        cout <<"Parallel execution: nProcs="<< nProcs << endl;
    }

    // =================== Read input xml file =============================
    pLUList = Teuchos::getParametersFromXmlFile(ipFileName);
    isoList = pLUList->sublist("Isorropia Input");
    shyLUList = pLUList->sublist("ShyLU Input");
    shyLUList.set("Outer Solver Library", "Belos");
    // Get matrix market file name
    string MMFileName = Teuchos::getParameter<string>(*pLUList, "mm_file");
    string prec_type = Teuchos::getParameter<string>(*pLUList, "preconditioner");
    int maxiters = Teuchos::getParameter<int>(*pLUList, "Outer Solver MaxIters");
    MT tol = Teuchos::getParameter<double>(*pLUList, "Outer Solver Tolerance");
    string rhsFileName = pLUList->get<string>("rhs_file", "");


    int maxFiles = pLUList->get<int>("Maximum number of files to read in", 1);
    int startFile = pLUList->get<int>("Number of initial file", 1);
    int file_number = startFile;

    if (myPID == 0)
    {
        cout << "Input :" << endl;
        cout << "ParaLU params " << endl;
        pLUList->print(std::cout, 2, true, true);
        cout << "Matrix market file name: " << MMFileName << endl;
    }

    if (maxFiles > 1)
    {
        MMFileName += "%d.mm";
        sprintf( file_name, MMFileName.c_str(), file_number );
    }
    else
    {
        strcpy( file_name, MMFileName.c_str());
    }

    // ==================== Read input Matrix ==============================
    Epetra_CrsMatrix *A;
    Epetra_MultiVector *b1;

    int err = EpetraExt::MatrixMarketFileToCrsMatrix(file_name, Comm, A);
    if (err != 0 && myPID == 0)
      {
        cout << "Matrix file could not be read in!!!, info = "<< err << endl;
        success = false;
      }

    int n = A->NumGlobalRows();

    // ==================== Read input rhs  ==============================
    if (rhsFileName != "" && maxFiles > 1)
    {
        rhsFileName += "%d.mm";
        sprintf( file_name, rhsFileName.c_str(), file_number );
    }
    else
    {
        strcpy( file_name, rhsFileName.c_str());
    }

    Epetra_Map vecMap(n, 0, Comm);
    bool allOneRHS = false;
    if (rhsFileName != "")
    {
        err = EpetraExt::MatrixMarketFileToMultiVector(file_name, vecMap, b1);
    }
    else
    {
        b1 = new Epetra_MultiVector(vecMap, 1, false);
        b1->Random();
        allOneRHS = true;
    }

    Epetra_MultiVector x(vecMap, 1);

    // Partition the matrix with hypergraph partitioning and redisstribute
    Isorropia::Epetra::Partitioner *partitioner = new
                            Isorropia::Epetra::Partitioner(A, isoList, false);
    partitioner->partition();
    Isorropia::Epetra::Redistributor rd(partitioner);

    Epetra_CrsMatrix *newA;
    Epetra_MultiVector *newX, *newB;
    rd.redistribute(*A, newA);
    delete A;
    A = newA;
    RCP<Epetra_CrsMatrix> rcpA(A, false);

    rd.redistribute(x, newX);
    rd.redistribute(*b1, newB);
    delete b1;
    RCP<Epetra_MultiVector> rcpx (newX, false);
    RCP<Epetra_MultiVector> rcpb (newB, false);
    //OPT::Apply(*rcpA, *rcpx, *rcpb );


    Epetra_CrsMatrix *iterA = 0;
    Epetra_CrsMatrix *redistA = 0;
    Epetra_MultiVector *iterb1 = 0;
    Ifpack_Preconditioner *prec;
    ML_Epetra::MultiLevelPreconditioner *MLprec;
//#ifdef TIMING_OUTPUT
        Teuchos::Time ftime("solve time");
//#endif
    while(file_number < maxFiles+startFile)
    {

        if (prec_type.compare("ShyLU") == 0)
        {
            if (file_number == startFile)
            {
//#ifdef TIMING_OUTPUT
        ftime.start();
//#endif
                prec = new Ifpack_ShyLU(A);
#ifdef HAVE_IFPACK_DYNAMIC_FACTORY
                Teuchos::ParameterList shyluParameters;
                shyluParameters.set<Teuchos::ParameterList>("ShyLU list", shyLUList);
                prec->SetParameters(shyluParameters);
#else
                prec->SetParameters(shyLUList);
#endif
                prec->Initialize();
//#ifdef TIMING_OUTPUT
        ftime.stop();
//#endif
            }
//#ifdef TIMING_OUTPUT
        ftime.start();
//#endif
            prec->Compute();
//#ifdef TIMING_OUTPUT
        ftime.stop();
//#endif
            //cout << " Going to set it in solver" << endl ;
            //solver.SetPrecOperator(prec);
            //cout << " Done setting the solver" << endl ;
        }
        else if (prec_type.compare("ILU") == 0)
        {
            prec = new Ifpack_ILU(A);
            prec->Initialize();
            prec->Compute();
            //solver.SetPrecOperator(prec);
        }
        else if (prec_type.compare("ILUT") == 0)
        {
            prec = new Ifpack_ILUT(A);
            prec->Initialize();
            prec->Compute();
            //solver.SetPrecOperator(prec);
        }
        else if (prec_type.compare("ML") == 0)
        {
            Teuchos::ParameterList mlList; // TODO : Take it from i/p
            MLprec = new ML_Epetra::MultiLevelPreconditioner(*A, mlList, true);
            //solver.SetPrecOperator(MLprec);
        }

        RCP<Ifpack_Preconditioner> rcpPrec(prec, false);
        RCP<Belos::EpetraPrecOp> belosPrec = rcp(new Belos::EpetraPrecOp(rcpPrec));

        const int NumGlobalElements = rcpb->GlobalLength();
        Teuchos::ParameterList belosList;
         //belosList.set( "Flexible Gmres", true );
        belosList.set( "Num Blocks", maxsubspace );// Maximum number of blocks in Krylov factorization
        belosList.set( "Block Size", blocksize );  // Blocksize to be used by iterative solver
        belosList.set( "Maximum Iterations", maxiters ); // Maximum number of iterations allowed
        belosList.set( "Maximum Restarts", maxrestarts );// Maximum number of restarts allowed
        belosList.set( "Convergence Tolerance", tol );   // Relative convergence tolerance requested
        if (numrhs > 1) {
        belosList.set( "Show Maximum Residual Norm Only", true );  // Show only the maximum residual norm
        }
        if (verbose) {
        belosList.set( "Verbosity", Belos::Errors + Belos::Warnings +
               Belos::TimingDetails + Belos::StatusTestDetails );
        if (frequency > 0)
          belosList.set( "Output Frequency", frequency );
        }
        else
        belosList.set( "Verbosity", Belos::Errors + Belos::Warnings );
        //
        // *******Construct a preconditioned linear problem********
        //

        rcpx->PutScalar(0.0);
        RCP<Belos::LinearProblem<double,MV,OP> > problem
        = rcp( new Belos::LinearProblem<double,MV,OP>( rcpA, rcpx, rcpb ) );
        if (leftprec) {
        problem->setLeftPrec( belosPrec );
        }
        else {
        problem->setRightPrec( belosPrec );
        }
        bool set = problem->setProblem();
        if (set == false) {
        if (proc_verbose)
          {
          cout << endl << "ERROR:  Belos::LinearProblem failed to set up correctly!" << endl;
          }
          cout << fail << endl;
          success = false;
          return -1;
        }

        // Create an iterative solver manager.
        RCP< Belos::SolverManager<double,MV,OP> > solver
        = rcp( new Belos::BlockGmresSolMgr<double,MV,OP>(problem,
                rcp(&belosList,false)));

        //
        // *******************************************************************
        // *************Start the block Gmres iteration*************************
        // *******************************************************************
        //
        if (proc_verbose)
        {
            cout << std::endl << std::endl;
            cout << "Dimension of matrix: " << NumGlobalElements << endl;
            cout << "Number of right-hand sides: " << numrhs << endl;
            cout << "Block size used by solver: " << blocksize << endl;
            cout << "Number of restarts allowed: " << maxrestarts << endl;
            cout << "Max number of Gmres iterations per restart cycle: " <<
                        maxiters << endl;
            cout << "Relative residual tolerance: " << tol << endl;
            cout << endl;
        }

        if(tol > 1e-5)
          {
            success = false;
          }



        //
        // Perform solve
        //
//#ifdef TIMING_OUTPUT
        ftime.start();
//#endif
        // mfh 26 Mar 2015: Don't introduce a variable (like 'ret')
        // unless you plan to use it.  The commented-out code causes a
        // build warning.
        //
        //Belos::ReturnType ret = solver->solve();
        solver->solve ();
//#ifdef TIMING_OUTPUT
        ftime.stop();
//#endif
        //
        // Get the number of iterations for this solve.
        //
        int numIters = solver->getNumIters();
        if (proc_verbose)
        {
            cout << "Number of iterations performed for this solve: " <<
                     numIters << endl;
        }
        //
        // Compute actual residuals.
        //
        //bool badRes = false; // unused
        std::vector<double> actual_resids( numrhs );
        std::vector<double> rhs_norm( numrhs );
        Epetra_MultiVector resid((*rcpA).RowMap(), numrhs);
        OPT::Apply( *rcpA, *rcpx, resid );
        MVT::MvAddMv( -1.0, resid, 1.0, *rcpb, resid );
        MVT::MvNorm( resid, actual_resids );
        MVT::MvNorm( *rcpb, rhs_norm );
        if (proc_verbose)
        {
            cout<< "------ Actual Residuals (normalized) -------"<<endl;
            for ( int i=0; i<numrhs; i++)
            {
                double actRes = actual_resids[i]/rhs_norm[i];
                std::cout<<"Problem "<<i<<" : \t"<< actRes <<std::endl;
                if (actRes > tol) {
                  //badRes = true; // unused
                  success = false;
                }
            }
        }

        file_number++;
        if (file_number >= maxFiles+startFile)
        {
          break;
        }
        else
        {
            sprintf(file_name, MMFileName.c_str(), file_number);

            if (redistA != NULL) delete redistA;
            // Load the new matrix
            err = EpetraExt::MatrixMarketFileToCrsMatrix(file_name,
                            Comm, iterA);
            if (err != 0)
            {
                if (myPID == 0)
                  {
                    cout << "Could not open file: "<< file_name << endl;

                  }
                success = false;
            }
            else
            {
                rd.redistribute(*iterA, redistA);
                delete iterA;
                InitMatValues(*redistA, A);
            }

            // Load the new rhs
            if (!allOneRHS)
            {
                sprintf(file_name, rhsFileName.c_str(), file_number);

                if (iterb1 != NULL) delete iterb1;
                err = EpetraExt::MatrixMarketFileToMultiVector(file_name,
                        vecMap, b1);
                if (err != 0)
                {
                    if (myPID==0)
                      {
                        cout << "Could not open file: "<< file_name << endl;
                        success = false;
                      }
                }
                else
                {
                    rd.redistribute(*b1, iterb1);
                    delete b1;
                    InitMVValues( *iterb1, newB );
                }
            }
        }
    }
//#ifdef TIMING_OUTPUT
        cout << "Time to solve: " << ftime.totalElapsedTime() << endl;
        if(success)
          {
            cout << pass << endl;
          }
        else
          {
            cout << fail << endl;
          }

//#endif
    if (redistA != NULL) delete redistA;
    if (iterb1 != NULL) delete iterb1;


    if (prec_type.compare("ML") == 0)
    {
        delete MLprec;
    }
    else
    {
        delete prec;
    }
    delete newX;
    delete newB;
    delete A;
    delete partitioner;
}
Beispiel #6
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm comm (MPI_COMM_WORLD);
#else
  Epetra_SerialComm comm;
#endif

  int MyPID = comm.MyPID();

  bool verbose = false;
  bool verbose1 = false; 
  // Check if we should print results to standard out
  if (argc > 1) {
    if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
      verbose1 = true;
      if (MyPID==0) verbose = true;
    }
  }

  if (verbose1) cout << comm << endl;


  // Uncomment the next three lines to debug in mpi mode
  //int tmp;
  //if (MyPID==0) cin >> tmp;
  //comm.Barrier();

  Epetra_CrsMatrix * A; 
  EPETRA_CHK_ERR(EpetraExt::MatlabFileToCrsMatrix("A.dat", comm, A));

  Epetra_Vector  x(A->OperatorDomainMap()); 
  Epetra_Vector  b(A->OperatorRangeMap());
  x.Random();
  A->Apply(x,b); // Generate RHS from x
  Epetra_Vector xx(x); // Copy x to xx for later use

  Epetra_LinearProblem problem(A, &x, &b);
  // Construct a solver object for this problem

  AztecOO solver(problem);
  solver.SetAztecOption(AZ_precond, AZ_none);
  if (!verbose1) solver.SetAztecOption(AZ_output, AZ_none);
  solver.SetAztecOption(AZ_kspace, A->NumGlobalRows());
  AztecOO_Operator AOpInv(&solver, A->NumGlobalRows());
  Epetra_InvOperator AInvOp(&AOpInv);

  EPETRA_CHK_ERR(EpetraExt::OperatorToMatlabFile("Ainv.dat", AInvOp));

  comm.Barrier();

  Epetra_CrsMatrix * AInv; 
  EPETRA_CHK_ERR(EpetraExt::MatlabFileToCrsMatrix("Ainv.dat", comm, AInv));

  EPETRA_CHK_ERR(AInv->Apply(b,x));

  EPETRA_CHK_ERR(x.Update(1.0, xx, -1.0));
  double residual = 0.0;
  EPETRA_CHK_ERR(x.Norm2(&residual));
  if (verbose) cout << "Norm of difference between computed x and exact x = " << residual << endl;
  int ierr = checkValues(residual,0.0,"Norm of difference between computed A1x1 and A1x1 from file", verbose);

  
  delete A;
  delete AInv;


  #ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return(ierr);
}
int main(int argc, char** argv) {

  int rc=0, fail = 0;
#ifdef HAVE_EPETRAEXT
  bool verbose = false;
  int localProc = 0;
//   std::string *fstr;

#ifdef HAVE_MPI
  int numProcs;
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
  const Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  const Epetra_SerialComm Comm;
#endif

  Teuchos::CommandLineProcessor clp(false,true);

  // --f=fileName provides a different matrix market file for input
  // --v will print out the partitioning (small files only)

  std::string *inputFile = new std::string("simple.mtx");
  bool runAll = false;

  clp.setOption( "f", inputFile,
		"Name of input matrix market file");
  clp.setOption( "run-all", "abort", &runAll,
		"Don't abort if one test fails, run all of them.");
  clp.setOption( "v", "q", &verbose,
		"Display matrix before and after partitioning.");

  Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return =
    clp.parse(argc,argv);

  if( parse_return == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED){
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
  }
  if( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 1;
  }

  const char *fname = inputFile->c_str();

  // Read in the matrix market file and distribute its rows across the
  // processes.
  //
  // This reader uses the default Epetra_Map for number of rows for the
  // RowMap() and for the RangeMap().  For non-square matrices it uses
  // the default Epetra_Map for the number of columns for the DomainMap(),
  // otherwise it uses the RowMap().
  //
  // The maps can be specified with other versions of MMFtoCrsMatrix().


  Epetra_CrsMatrix *matrixPtr;
  rc = EpetraExt::MatrixMarketFileToCrsMatrix(fname, Comm, matrixPtr);
  if (rc < 0){
    if (localProc==0){
      std::cout << "error reading input file" << std::endl << "FAIL" << std::endl;
    }
    exit(1);
  }

  bool square = (matrixPtr->NumGlobalRows() == matrixPtr->NumGlobalCols());
  // If matrix is square, determine if it's symmetric  TODO


  // Run some partitioning tests
  //   Test graph and hypergraph partitioning
  //   Test with and without application supplied weights
  //   Test the Epetra_CrsMatrix interface and also the Epetra_CrsGraph interface
  //   Do tests where the vertex or edge weights vary widely

  Teuchos::RCP<Epetra_CrsMatrix> testm = Teuchos::rcp(matrixPtr);
  int failures = 0;

#ifdef SHORT_TEST
  fail = run_test(testm,
	     verbose,
	     false,                 // do not test #partitions < #processes
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_CRSGRAPH);

  CHECK_FAILED();
  goto Report;

#else

  if (square){
#ifdef HAVE_ISORROPIA_ZOLTAN
    fail = run_test(testm,            // test matrix
	       verbose,               // display matrix before and after?
	       false,                 // do not test #partitions < #processes
	       GRAPH_PARTITIONING,    // perform zoltan graph partitioning
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_LINEARPROBLEM); // use linear problem interface of isorropia

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,            // draw graph before and after partitioning?
	       false,                 // do not test #partitions < #processes
	       HYPERGRAPH_PARTITIONING,      // do graph partitioning
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_CRSMATRIX);       // use the Epetra_CrsMatrix interface

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,
	       true,                 // test #partitions < #processes
	       GRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_CRSMATRIX);

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,
	       false,                 // do not test #partitions < #processes
	       GRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_LINEARPROBLEM);

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,
	       false,                 // do not test #partitions < #processes
	       GRAPH_PARTITIONING,
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_ROWMATRIX);

    CHECK_FAILED();
#else
  fail = 0;
  if (localProc == 0){
    std::cout << "Test not run because it requires EPETRA_EXT" << std::endl;
  }
#endif



#ifdef HAVE_ISORROPIA_ZOLTAN

  fail = run_test(testm,
	     verbose,
	     true,                 // test #partitions < #processes
	     HYPERGRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_CRSGRAPH);

   CHECK_FAILED();

  fail = run_test(testm,
	     verbose,
	     false,                 // do not test #partitions < #processes
	     HYPERGRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_ROWMATRIX);

   CHECK_FAILED();

  fail = run_test(testm,
	     verbose,
	     false,                 // do not test #partitions < #processes
	     HYPERGRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_LINEARPROBLEM);

   CHECK_FAILED();

#endif
  }
#endif // SHORT_TEST

#else
  fail = 0;
  if (localProc == 0){
    std::cout << "Test not run because it requires EPETRA_EXT" << std::endl;
  }
#endif

Report:

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (localProc == 0){
    if (failures){
      if (failures > 1)
	std::cout << std::endl << failures << " FAILURES" << std::endl;
      else
	std::cout << std::endl << "1 FAILURE" << std::endl;

      if (!runAll){
	std::cout <<
       "(Use option --run-all if you do not want this test to abort on failure)" << std::endl;
      }
    }
    else
      std::cout << std::endl << "PASS" << std::endl;
  }

  return fail;
}
Beispiel #8
0
int check(Epetra_CrsMatrix& A, int NumMyRows1, int NumGlobalRows1, int NumMyNonzeros1,
					int NumGlobalNonzeros1, int* MyGlobalElements, bool verbose) 
{  
  (void)MyGlobalElements;
  int ierr = 0, forierr = 0;
  int NumGlobalIndices;
  int NumMyIndices;
	int* MyViewIndices = 0;
	int* GlobalViewIndices = 0;
  double* MyViewValues = 0;
	double* GlobalViewValues = 0;
  int MaxNumIndices = A.Graph().MaxNumIndices();
  int* MyCopyIndices = new int[MaxNumIndices];
  int* GlobalCopyIndices = new int[MaxNumIndices];
  double* MyCopyValues = new double[MaxNumIndices];
  double* GlobalCopyValues = new double[MaxNumIndices];

  // Test query functions

  int NumMyRows = A.NumMyRows();
  if (verbose) cout << "\n\nNumber of local Rows = " << NumMyRows << endl<< endl;

  EPETRA_TEST_ERR(!(NumMyRows==NumMyRows1),ierr);

  int NumMyNonzeros = A.NumMyNonzeros();
  if (verbose) cout << "\n\nNumber of local Nonzero entries = " << NumMyNonzeros << endl<< endl;

  EPETRA_TEST_ERR(!(NumMyNonzeros==NumMyNonzeros1),ierr);

  int NumGlobalRows = A.NumGlobalRows();
  if (verbose) cout << "\n\nNumber of global Rows = " << NumGlobalRows << endl<< endl;

  EPETRA_TEST_ERR(!(NumGlobalRows==NumGlobalRows1),ierr);

  int NumGlobalNonzeros = A.NumGlobalNonzeros();
  if (verbose) cout << "\n\nNumber of global Nonzero entries = " << NumGlobalNonzeros << endl<< endl;

  EPETRA_TEST_ERR(!(NumGlobalNonzeros==NumGlobalNonzeros1),ierr);

  // GlobalRowView should be illegal (since we have local indices)

  EPETRA_TEST_ERR(!(A.ExtractGlobalRowView(A.RowMap().MaxMyGID(), NumGlobalIndices, GlobalViewValues, GlobalViewIndices)==-2),ierr);

  // Other binary tests

  EPETRA_TEST_ERR(A.NoDiagonal(),ierr);
  EPETRA_TEST_ERR(!(A.Filled()),ierr);
  EPETRA_TEST_ERR(!(A.MyGRID(A.RowMap().MaxMyGID())),ierr);
  EPETRA_TEST_ERR(!(A.MyGRID(A.RowMap().MinMyGID())),ierr);
  EPETRA_TEST_ERR(A.MyGRID(1+A.RowMap().MaxMyGID()),ierr);
  EPETRA_TEST_ERR(A.MyGRID(-1+A.RowMap().MinMyGID()),ierr);
  EPETRA_TEST_ERR(!(A.MyLRID(0)),ierr);
  EPETRA_TEST_ERR(!(A.MyLRID(NumMyRows-1)),ierr);
  EPETRA_TEST_ERR(A.MyLRID(-1),ierr);
  EPETRA_TEST_ERR(A.MyLRID(NumMyRows),ierr);

  forierr = 0;
  for (int i = 0; i < NumMyRows; i++) {
    int Row = A.GRID(i);
    A.ExtractGlobalRowCopy(Row, MaxNumIndices, NumGlobalIndices, GlobalCopyValues, GlobalCopyIndices);
    A.ExtractMyRowView(i, NumMyIndices, MyViewValues, MyViewIndices); // this is where the problem comes from
    forierr += !(NumGlobalIndices == NumMyIndices);
    for(int j = 1; j < NumMyIndices; j++) {
			forierr += !(MyViewIndices[j-1] < MyViewIndices[j]); // this is where the test fails
		}
    for(int j = 0; j < NumGlobalIndices; j++) {
			forierr += !(GlobalCopyIndices[j] == A.GCID(MyViewIndices[j]));
			forierr += !(A.LCID(GlobalCopyIndices[j]) == MyViewIndices[j]);
			forierr += !(GlobalCopyValues[j] == MyViewValues[j]);
    }
  }
  EPETRA_TEST_ERR(forierr,ierr);

  forierr = 0;
  for (int i = 0; i < NumMyRows; i++) {
    int Row = A.GRID(i);
    A.ExtractGlobalRowCopy(Row, MaxNumIndices, NumGlobalIndices, GlobalCopyValues, GlobalCopyIndices);
    A.ExtractMyRowCopy(i, MaxNumIndices, NumMyIndices, MyCopyValues, MyCopyIndices);
    forierr += !(NumGlobalIndices == NumMyIndices);
    for (int j = 1; j < NumMyIndices; j++) 
			forierr += !(MyCopyIndices[j-1] < MyCopyIndices[j]);
    for (int j = 0; j < NumGlobalIndices; j++) {
			forierr += !(GlobalCopyIndices[j] == A.GCID(MyCopyIndices[j]));
			forierr += !(A.LCID(GlobalCopyIndices[j]) == MyCopyIndices[j]);
			forierr += !(GlobalCopyValues[j] == MyCopyValues[j]);
    }

  }
  EPETRA_TEST_ERR(forierr,ierr);

  delete [] MyCopyIndices;
  delete [] GlobalCopyIndices;
  delete [] MyCopyValues;
  delete [] GlobalCopyValues;

  if (verbose) cout << "\n\nRows sorted check OK" << endl<< endl;

  return (ierr);
}
int main(int argc, char** argv)
{

#ifdef HAVE_MPI
  Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif


  bool success = true;
  string pass = "******";
  string fail = "End Result: TEST FAILED";

  int myPID = Comm.MyPID();
  if(myPID == 0)
    {
      cout << "Starting Epetra interface test" << endl;
    }

  /*----------------Load a test matrix---------------*/
  string matrixFileName = "wathenSmall.mtx";
  Epetra_CrsMatrix *A;
  Epetra_CrsMatrix *AHat;
  Epetra_MultiVector *b;
  Epetra_MultiVector *bHat;
  Epetra_MultiVector *x;
  int n = 0;

  //Get Matrix
  int err = EpetraExt::MatrixMarketFileToCrsMatrix(matrixFileName.c_str(), Comm, A);
  if(err!=0 && myPID ==0)
    {
      cout << "Error reading matrix file, info = " << err << endl;
      cout << fail << endl;
      exit(1);
    }
  n = A->NumGlobalRows();

  //Make b vecotor

  Epetra_Map vecMap(n,0,Comm);
  b = new Epetra_MultiVector(vecMap,1,false);
  b->Random();
  x = new Epetra_MultiVector(vecMap,1,false);
  x->Random();

  cout << "Epetra matrices loaded" << endl;


  /*-----------------have_interface-----------------*/
  /*---The have_interface checks is all the parameter list makes sense---*/
  Teuchos::RCP <Teuchos::ParameterList> pLUList;
  string pListFileName = "ShyLU_epetra_interface.xml";
  pLUList = Teuchos::getParametersFromXmlFile(pListFileName);


  /*----------------partitioning_interface--------------*/
  /*-----------Will use check the epetra matrix on partition_interface------*/


  //Isorropia Test - graph/Parmetis
  pLUList->set("Partitioning Package","Isorropia");
  Teuchos::ParameterList ptemp;
  ptemp = pLUList->sublist("Isorropia Input");

  Teuchos::ParameterList pptemp;
  pptemp = ptemp.sublist("Zoltan");
  pptemp.set("GRAPH_PACKAGE", "Parmetis");
  pptemp.set("DEBUG_LEVEL", "1");

  ptemp.set("partitioning method", "graph");
  ptemp.set("Zoltan", pptemp);
  pLUList->set("Isorropia Input", ptemp);

  cout << " \n\n--------------------BIG BREAK --------------\n\n";
  Teuchos::writeParameterListToXmlOStream(*pLUList, std::cout);


  ShyLU::PartitionInterface<Epetra_CrsMatrix, Epetra_MultiVector> partI(A, pLUList.get());
  partI.partition();
  AHat = partI.reorderMatrix();
  bHat = partI.reorderVector(b);

  EpetraExt::RowMatrixToMatlabFile("Epetra_Isorropia_Parmetis.mat", *AHat);


   cout << "Done with graph - parmetis" << endl;

   /*

   //Isorropia Test - Graph/PT-Scotch
  pLUList->set("Partitioning Package","Isorropia");
  ptemp = pLUList->sublist("Isorropia Input");

  //Teuchos::ParameterList pptemp;
  pptemp = ptemp.sublist("Zoltan");
  pptemp.set("GRAPH_PACKAGE", "scotch");
  pptemp.set("DEBUG_LEVEL", "1");


  ptemp.set("partitioning method", "graph");
  ptemp.set("Zoltan", pptemp);
  pLUList->set("Isorropia Input", ptemp);

  cout << " \n\n--------------------BIG BREAK --------------\n\n";
  Teuchos::writeParameterListToXmlOStream(*pLUList, std::cout);

  PartitionInterface<Epetra_CrsMatrix, Epetra_MultiVector> partI2(A, pLUList.get());
  partI2.partition();
  AHat = partI2.reorderMatrix();
  bHat = partI2.reorderVector(b);
  cout << "Done with graph - pt-scotch" << endl;

   */

  //Zoltan2 Test

#if defined(HAVE_SHYLU_ZOLTAN2) || defined(HAVE_SHYLU_ZOLTAN2)

   //Isorropia Test - Graph/ParMetis
  pLUList->set("Partitioning Package","Zoltan2");
  ptemp = pLUList->sublist("Zoltan2 Input");
  ptemp.set("algorithm", "parmetis");
  ptemp.set("debug_level", "detailed_status");
  pLUList->set("Zoltan2 Input", ptemp);


  cout << " \n\n--------------------BIG BREAK --------------\n\n";
  Teuchos::writeParameterListToXmlOStream(*pLUList, std::cout);

  ShyLU::PartitionInterface<Epetra_CrsMatrix, Epetra_MultiVector> partI3(A, pLUList.get());
  partI3.partition();
  AHat = partI3.reorderMatrix();
  bHat = partI3.reorderVector(b);
  cout << "Done with graph - parmetis" << endl;

  EpetraExt::RowMatrixToMatlabFile("Epetra_Zoltan2_Parmetis.mat", *AHat);

#endif



  /*----------------------Direct Solver Interfaces----------------*/
  //#ifdef HAVE_SHYLU_AMESOS

  //Amesos - klu
  pLUList->set("Direct Solver Package", "Amesos");
  ptemp = pLUList->sublist("Amesos Input");
  pptemp = ptemp.sublist("Amesos_Klu Input");


  pptemp.set("PrintTiming", true);
  pptemp.set("PrintStatus", true);
  ptemp.set("Solver", "Amesos_Klu");
  ptemp.set("Amesos_Klu Input", pptemp);
  pLUList->set("Amesos Input", ptemp);


  cout << " \n\n--------------------BIG BREAK --------------\n\n";
  Teuchos::writeParameterListToXmlOStream(*pLUList, std::cout);

  ShyLU::DirectSolverInterface<Epetra_CrsMatrix, Epetra_MultiVector> directsolver(A, pLUList.get());

  directsolver.factor();
  directsolver.solve(b,x);

  cout << "Done with Amesos-KLU" << endl;

  //#endif

  //Amesos2 -klu2
#ifdef HAVE_SHYLU_AMESOS2

  pLUList->set("Direct Solver Package", "Amesos2");
  ptemp = pLUList->sublist("Amesos2 Input");
  //pptemp = ptemp.sublist("Amesos_Klu Input");


  pptemp.set("PrintTiming", true);
  pptemp.set("PrintStatus", true);
  ptemp.set("Solver", "KLU2");
  //ptemp.set("Amesos_Klu Input", pptemp);
  pLUList->set("Amesos2 Input", ptemp);


  cout << " \n\n--------------------BIG BREAK --------------\n\n";
  Teuchos::writeParameterListToXmlOStream(*pLUList, std::cout);


  

  ShyLU::DirectSolverInterface<Epetra_CrsMatrix, Epetra_MultiVector> directsolver2(A, pLUList.get());

  directsolver2.factor();
  directsolver2.solve(b,x);

  cout << "Done with Amesos-KLU2" << endl;

#endif

  if(success)
    {
      cout << pass << endl;
    }

}
Beispiel #10
0
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
    Teuchos::GlobalMPISession mpiSession(&argc, &argv, 0);
    Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
    Epetra_SerialComm Comm;
#endif
    int nProcs, myPID ;
    Teuchos::ParameterList pLUList ;        // ParaLU parameters
    Teuchos::ParameterList isoList ;        // Isorropia parameters
    string ipFileName = "ShyLU.xml";       // TODO : Accept as i/p

    nProcs = mpiSession.getNProc();
    myPID = Comm.MyPID();

    if (myPID == 0)
    {
        cout <<"Parallel execution: nProcs="<< nProcs << endl;
    }

    // =================== Read input xml file =============================
    Teuchos::updateParametersFromXmlFile(ipFileName, &pLUList);
    isoList = pLUList.sublist("Isorropia Input");
    // Get matrix market file name
    string MMFileName = Teuchos::getParameter<string>(pLUList, "mm_file");
    string prec_type = Teuchos::getParameter<string>(pLUList, "preconditioner");

    if (myPID == 0)
    {
        cout << "Input :" << endl;
        cout << "ParaLU params " << endl;
        pLUList.print(std::cout, 2, true, true);
        cout << "Matrix market file name: " << MMFileName << endl;
    }

    // ==================== Read input Matrix ==============================
    Epetra_CrsMatrix *A;

    int err = EpetraExt::MatrixMarketFileToCrsMatrix(MMFileName.c_str(), Comm, A);
    //EpetraExt::MatlabFileToCrsMatrix(MMFileName.c_str(), Comm, A);
    //assert(err != 0);
    cout <<"Done reading the matrix"<< endl;
    int n = A->NumGlobalRows();
    cout <<"n="<< n << endl;

    // Create input vectors
    Epetra_Map vecMap(n, 0, Comm);
    Epetra_MultiVector x(vecMap, 1);
    Epetra_MultiVector b(vecMap, 1, false);
    b.PutScalar(1.0); // TODO : Accept it as input

    // Partition the matrix with hypergraph partitioning and redisstribute
    Isorropia::Epetra::Partitioner *partitioner = new
                            Isorropia::Epetra::Partitioner(A, isoList, false);
    partitioner->partition();
    Isorropia::Epetra::Redistributor rd(partitioner);

    Epetra_CrsMatrix *newA;
    Epetra_MultiVector *newX, *newB; 
    rd.redistribute(*A, newA);
    delete A;
    A = newA;

    rd.redistribute(x, newX);
    rd.redistribute(b, newB);

    Epetra_LinearProblem problem(A, newX, newB);

    Amesos Factory;
    char* SolverType = "Amesos_Klu";
    bool IsAvailable = Factory.Query(SolverType);

    Epetra_LinearProblem *LP = new Epetra_LinearProblem();
    LP->SetOperator(A);
    LP->SetLHS(newX);
    LP->SetRHS(newB);
    Amesos_BaseSolver *Solver = Factory.Create(SolverType, *LP);


    Solver->SymbolicFactorization();
  Teuchos::Time ftime("setup time");
      ftime.start();
    Solver->NumericFactorization();
    cout << "Numeric Factorization" << endl;
    Solver->Solve();
    cout << "Solve done" << endl;

    ftime.stop();
    cout << "Time to setup" << ftime.totalElapsedTime() << endl;

    // compute ||Ax - b||
    double Norm;
    Epetra_MultiVector Ax(vecMap, 1);

    Epetra_MultiVector *newAx; 
    rd.redistribute(Ax, newAx);
    A->Multiply(false, *newX, *newAx);
    newAx->Update(1.0, *newB, -1.0);
    newAx->Norm2(&Norm);
    double ANorm = A->NormOne();

    cout << "|Ax-b |/|A| = " << Norm/ANorm << endl;

    delete newAx;
    delete newX;
    delete newB;
    delete A;
    delete partitioner;
}