Ejemplo n.º 1
0
/* ml_epetra_data_pack_status - This function does a status query on the
   ML_EPETRA_DATA_PACK passed in.
   Returns: IS_TRUE
*/
int ml_epetra_data_pack::status(){
  mexPrintf("**** Problem ID %d [ML_Epetra] ****\n",id);
  if(A) mexPrintf("Matrix: %dx%d w/ %d nnz\n",A->NumGlobalRows(),A->NumGlobalCols(),A->NumMyNonzeros()); 
  mexPrintf(" Operator complexity = %e\n",operator_complexity);
  if(List){mexPrintf("Parameter List:\n");List->print(cout,1);}
  mexPrintf("\n");
  return IS_TRUE;
}/*end status*/
Ejemplo n.º 2
0
shared_ptr<Epetra_CrsMatrix> sparseCholesky(const Epetra_CrsMatrix &mat) {
  // Note: we assume the matrix mat is symmetric and positive-definite
  size_t size = mat.NumGlobalCols();
  if (mat.NumGlobalRows() != size)
    throw std::invalid_argument("sparseCholesky(): matrix must be square");

  int *rowOffsets = 0;
  int *colIndices = 0;
  double *values = 0;
  mat.ExtractCrsDataPointers(rowOffsets, colIndices, values);

  Epetra_SerialComm comm;
  Epetra_LocalMap rowMap(static_cast<int>(size), 0 /* index_base */, comm);
  Epetra_LocalMap columnMap(static_cast<int>(size), 0 /* index_base */, comm);
  shared_ptr<Epetra_CrsMatrix> result = boost::make_shared<Epetra_CrsMatrix>(
      Copy, rowMap, columnMap, mat.GlobalMaxNumEntries());

  arma::Mat<double> localMat;
  arma::Mat<double> localCholesky;
  std::vector<bool> processed(size, false);
  for (size_t r = 0; r < size; ++r) {
    if (processed[r])
      continue;
    int localSize = rowOffsets[r + 1] - rowOffsets[r];
    localMat.set_size(localSize, localSize);
    localMat.fill(0.);
    localCholesky.set_size(localSize, localSize);
    for (int s = 0; s < localSize; ++s) {
      int row = colIndices[rowOffsets[r] + s];
      for (int c = 0; c < localSize; ++c) {
        int col = colIndices[rowOffsets[row] + c];
        if (col != colIndices[rowOffsets[r] + c])
          throw std::invalid_argument("sparseCholesky(): matrix is not "
                                      "block-diagonal");
        localMat(s, c) = values[rowOffsets[row] + c];
      }
    }
    assert(arma::norm(localMat - localMat.t(), "fro") <
           1e-12 * arma::norm(localMat, "fro"));
    localCholesky = arma::chol(localMat); // localCholesky: U
    for (int s = 0; s < localSize; ++s) {
      int row = colIndices[rowOffsets[r] + s];
      processed[row] = true;
#ifndef NDEBUG
      int errorCode =
#endif
          result->InsertGlobalValues(row, s + 1 /* number of values */,
                                     localCholesky.colptr(s),
                                     colIndices + rowOffsets[r]);
      assert(errorCode == 0);
    }
  }
  result->FillComplete(columnMap, rowMap);

  return result;
}
Ejemplo n.º 3
0
bool CrsMatrixInfo( const Epetra_CrsMatrix & A,
		    ostream & os ) 

{

  int MyPID = A.Comm().MyPID(); 

  // take care that matrix is already trasformed
  bool IndicesAreGlobal = A.IndicesAreGlobal();
  if( IndicesAreGlobal == true ) {
    if( MyPID == 0 ) {
      os << "WARNING : matrix must be transformed to local\n";
      os << "          before calling CrsMatrixInfo\n";
      os << "          Now returning...\n";
    }
    return false;
  }

  int NumGlobalRows = A.NumGlobalRows();
  int NumGlobalNonzeros = A.NumGlobalNonzeros();
  int NumGlobalCols = A.NumGlobalCols();
  double NormInf = A.NormInf();
  double NormOne = A.NormOne();
  int NumGlobalDiagonals = A.NumGlobalDiagonals();
  int GlobalMaxNumEntries = A.GlobalMaxNumEntries();
  int IndexBase = A.IndexBase();
  bool StorageOptimized = A.StorageOptimized();
  bool LowerTriangular = A.LowerTriangular();
  bool UpperTriangular = A.UpperTriangular();
  bool NoDiagonal = A.NoDiagonal();

  // these variables identifies quantities I have to compute,
  // since not provided by Epetra_CrsMatrix

  double MyFrobeniusNorm( 0.0 ), FrobeniusNorm( 0.0 );
  double MyMinElement( DBL_MAX ), MinElement( DBL_MAX );
  double MyMaxElement( DBL_MIN ), MaxElement( DBL_MIN );
  double MyMinAbsElement( DBL_MAX ), MinAbsElement( DBL_MAX );
  double MyMaxAbsElement( 0.0 ), MaxAbsElement( 0.0 );

  int NumMyRows = A.NumMyRows();
  int * NzPerRow = new int[NumMyRows];
  int Row; // iterator on rows
  int Col; // iterator on cols
  int MaxNumEntries = A.MaxNumEntries();
  double * Values = new double[MaxNumEntries];
  int * Indices = new int[MaxNumEntries];
  double Element, AbsElement; // generic nonzero element and its abs value
  int NumEntries;
  double * Diagonal = new double [NumMyRows];
  // SumOffDiagonal is the sum of absolute values for off-diagonals
  double * SumOffDiagonal = new double [NumMyRows];  
  for( Row=0 ;  Row<NumMyRows ; ++Row ) {
    SumOffDiagonal[Row] = 0.0;
  }
  int * IsDiagonallyDominant = new int [NumMyRows];
  int GlobalRow;

  // cycle over all matrix elements
  for( Row=0 ; Row<NumMyRows ; ++Row ) {
    GlobalRow = A.GRID(Row);
    NzPerRow[Row] = A.NumMyEntries(Row);
    A.ExtractMyRowCopy(Row,NzPerRow[Row],NumEntries,Values,Indices);
    for( Col=0 ; Col<NumEntries ; ++Col ) {
      Element = Values[Col];
      AbsElement = abs(Element);
      if( Element<MyMinElement ) MyMinElement = Element;
      if( Element>MyMaxElement ) MyMaxElement = Element;
      if( AbsElement<MyMinAbsElement ) MyMinAbsElement = AbsElement;
      if( AbsElement>MyMaxAbsElement ) MyMaxAbsElement = AbsElement;
      if( Indices[Col] == Row ) Diagonal[Row] = Element;
      else
	SumOffDiagonal[Row] += abs(Element);
      MyFrobeniusNorm += pow(Element,2);
    }
  }   

  // analise storage per row 
  int MyMinNzPerRow( NumMyRows ), MinNzPerRow( NumMyRows );
  int MyMaxNzPerRow( 0 ), MaxNzPerRow( 0 );

  for( Row=0 ; Row<NumMyRows ; ++Row ) {
    if( NzPerRow[Row]<MyMinNzPerRow ) MyMinNzPerRow=NzPerRow[Row];
    if( NzPerRow[Row]>MyMaxNzPerRow ) MyMaxNzPerRow=NzPerRow[Row];
  }

  // a test to see if matrix is diagonally-dominant

  int MyDiagonalDominance( 0 ), DiagonalDominance( 0 );
  int MyWeakDiagonalDominance( 0 ), WeakDiagonalDominance( 0 );

  for( Row=0 ; Row<NumMyRows ; ++Row ) {
    if( abs(Diagonal[Row])>SumOffDiagonal[Row] ) 
      ++MyDiagonalDominance;
    else if( abs(Diagonal[Row])==SumOffDiagonal[Row] ) 
      ++MyWeakDiagonalDominance;
  }

  // reduction operations
  
  A.Comm().SumAll(&MyFrobeniusNorm, &FrobeniusNorm, 1);
  A.Comm().MinAll(&MyMinElement, &MinElement, 1);
  A.Comm().MaxAll(&MyMaxElement, &MaxElement, 1);
  A.Comm().MinAll(&MyMinAbsElement, &MinAbsElement, 1);
  A.Comm().MaxAll(&MyMaxAbsElement, &MaxAbsElement, 1);
  A.Comm().MinAll(&MyMinNzPerRow, &MinNzPerRow, 1);
  A.Comm().MaxAll(&MyMaxNzPerRow, &MaxNzPerRow, 1);
  A.Comm().SumAll(&MyDiagonalDominance, &DiagonalDominance, 1);
  A.Comm().SumAll(&MyWeakDiagonalDominance, &WeakDiagonalDominance, 1);

  // free memory

  delete Values;
  delete Indices;
  delete Diagonal;
  delete SumOffDiagonal;
  delete IsDiagonallyDominant;
  delete NzPerRow;

  // simply no output for MyPID>0, only proc 0 write on os
  if( MyPID != 0 ) return true;

  os << "*** general Information about the matrix\n";
  os << "Number of Global Rows = " << NumGlobalRows << endl;
  os << "Number of Global Cols = " << NumGlobalCols << endl;
  os << "is the matrix square  = " <<
    ((NumGlobalRows==NumGlobalCols)?"yes":"no") << endl;
  os << "||A||_\\infty          = " << NormInf << endl;
  os << "||A||_1               = " << NormOne << endl;
  os << "||A||_F               = " << sqrt(FrobeniusNorm) << endl;
  os << "Number of nonzero diagonal entries = "
     << NumGlobalDiagonals
     << "( " << 1.0* NumGlobalDiagonals/NumGlobalRows*100
     << " %)\n";
  os << "Nonzero per row : min = " << MinNzPerRow 
     << " average = " << 1.0*NumGlobalNonzeros/NumGlobalRows
     << " max = " << MaxNzPerRow << endl; 
  os << "Maximum number of nonzero elements/row = " 
     << GlobalMaxNumEntries << endl;
  os << "min( a_{i,j} )      = " << MinElement << endl;
  os << "max( a_{i,j} )      = " << MaxElement << endl;
  os << "min( abs(a_{i,j}) ) = " << MinAbsElement << endl;
  os << "max( abs(a_{i,j}) ) = " << MaxAbsElement << endl;
  os << "Number of diagonal dominant rows        = " << DiagonalDominance 
     << " (" << 100.0*DiagonalDominance/NumGlobalRows << " % of total)\n";
  os << "Number of weakly diagonal dominant rows = " 
     << WeakDiagonalDominance 
     << " (" << 100.0*WeakDiagonalDominance/NumGlobalRows << " % of total)\n";

  os << "*** Information about the Trilinos storage\n";
  os << "Base Index                 = " << IndexBase << endl;
  os << "is storage optimized       = " 
     << ((StorageOptimized==true)?"yes":"no") << endl;
  os << "are indices global         = "
     << ((IndicesAreGlobal==true)?"yes":"no") << endl;
  os << "is matrix lower triangular = " 
     << ((LowerTriangular==true)?"yes":"no") << endl;
  os << "is matrix upper triangular = " 
     << ((UpperTriangular==true)?"yes":"no") << endl;
  os << "are there diagonal entries = " 
     <<  ((NoDiagonal==false)?"yes":"no") << endl;

  return true;

}
int main(int argc, char** argv) {

  int rc=0, fail = 0;
#ifdef HAVE_EPETRAEXT
  bool verbose = false;
  int localProc = 0;
//   std::string *fstr;

#ifdef HAVE_MPI
  int numProcs;
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
  const Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  const Epetra_SerialComm Comm;
#endif

  Teuchos::CommandLineProcessor clp(false,true);

  // --f=fileName provides a different matrix market file for input
  // --v will print out the partitioning (small files only)

  std::string *inputFile = new std::string("simple.mtx");
  bool runAll = false;

  clp.setOption( "f", inputFile,
		"Name of input matrix market file");
  clp.setOption( "run-all", "abort", &runAll,
		"Don't abort if one test fails, run all of them.");
  clp.setOption( "v", "q", &verbose,
		"Display matrix before and after partitioning.");

  Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return =
    clp.parse(argc,argv);

  if( parse_return == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED){
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
  }
  if( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 1;
  }

  const char *fname = inputFile->c_str();

  // Read in the matrix market file and distribute its rows across the
  // processes.
  //
  // This reader uses the default Epetra_Map for number of rows for the
  // RowMap() and for the RangeMap().  For non-square matrices it uses
  // the default Epetra_Map for the number of columns for the DomainMap(),
  // otherwise it uses the RowMap().
  //
  // The maps can be specified with other versions of MMFtoCrsMatrix().


  Epetra_CrsMatrix *matrixPtr;
  rc = EpetraExt::MatrixMarketFileToCrsMatrix(fname, Comm, matrixPtr);
  if (rc < 0){
    if (localProc==0){
      std::cout << "error reading input file" << std::endl << "FAIL" << std::endl;
    }
    exit(1);
  }

  bool square = (matrixPtr->NumGlobalRows() == matrixPtr->NumGlobalCols());
  // If matrix is square, determine if it's symmetric  TODO


  // Run some partitioning tests
  //   Test graph and hypergraph partitioning
  //   Test with and without application supplied weights
  //   Test the Epetra_CrsMatrix interface and also the Epetra_CrsGraph interface
  //   Do tests where the vertex or edge weights vary widely

  Teuchos::RCP<Epetra_CrsMatrix> testm = Teuchos::rcp(matrixPtr);
  int failures = 0;

#ifdef SHORT_TEST
  fail = run_test(testm,
	     verbose,
	     false,                 // do not test #partitions < #processes
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_CRSGRAPH);

  CHECK_FAILED();
  goto Report;

#else

  if (square){
#ifdef HAVE_ISORROPIA_ZOLTAN
    fail = run_test(testm,            // test matrix
	       verbose,               // display matrix before and after?
	       false,                 // do not test #partitions < #processes
	       GRAPH_PARTITIONING,    // perform zoltan graph partitioning
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_LINEARPROBLEM); // use linear problem interface of isorropia

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,            // draw graph before and after partitioning?
	       false,                 // do not test #partitions < #processes
	       HYPERGRAPH_PARTITIONING,      // do graph partitioning
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_CRSMATRIX);       // use the Epetra_CrsMatrix interface

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,
	       true,                 // test #partitions < #processes
	       GRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_CRSMATRIX);

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,
	       false,                 // do not test #partitions < #processes
	       GRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_LINEARPROBLEM);

    CHECK_FAILED();

    fail = run_test(testm,
	       verbose,
	       false,                 // do not test #partitions < #processes
	       GRAPH_PARTITIONING,
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       NO_APPLICATION_SUPPLIED_WEIGHTS,
	       EPETRA_ROWMATRIX);

    CHECK_FAILED();
#else
  fail = 0;
  if (localProc == 0){
    std::cout << "Test not run because it requires EPETRA_EXT" << std::endl;
  }
#endif



#ifdef HAVE_ISORROPIA_ZOLTAN

  fail = run_test(testm,
	     verbose,
	     true,                 // test #partitions < #processes
	     HYPERGRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_CRSGRAPH);

   CHECK_FAILED();

  fail = run_test(testm,
	     verbose,
	     false,                 // do not test #partitions < #processes
	     HYPERGRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_ROWMATRIX);

   CHECK_FAILED();

  fail = run_test(testm,
	     verbose,
	     false,                 // do not test #partitions < #processes
	     HYPERGRAPH_PARTITIONING,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     NO_APPLICATION_SUPPLIED_WEIGHTS,
	     EPETRA_LINEARPROBLEM);

   CHECK_FAILED();

#endif
  }
#endif // SHORT_TEST

#else
  fail = 0;
  if (localProc == 0){
    std::cout << "Test not run because it requires EPETRA_EXT" << std::endl;
  }
#endif

Report:

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (localProc == 0){
    if (failures){
      if (failures > 1)
	std::cout << std::endl << failures << " FAILURES" << std::endl;
      else
	std::cout << std::endl << "1 FAILURE" << std::endl;

      if (!runAll){
	std::cout <<
       "(Use option --run-all if you do not want this test to abort on failure)" << std::endl;
      }
    }
    else
      std::cout << std::endl << "PASS" << std::endl;
  }

  return fail;
}