Example #1
0
void random_distribution_1D(
  itype nrows,          // Number of global matrix rows
  Epetra_Comm &comm,    // Epetra communicator to be used in maps
  Epetra_Map **rowMap,  // OUTPUT: pointer to row map to be created
  long long offsetEpetra64
) 
{
  // Randomly assign matrix rows to processor's row Map.

  int me = comm.MyPID();
  int np = comm.NumProc();

  vector<itype> myGlobalElements(1.2 * (nrows / np) + 1);
  int nMyRows = 0;
  srandom(1);
  double denom = (double) RAND_MAX + 1.;
  for (itype i = 0; i < nrows; i++) {
    int p = (int) ((double) np * (double) random() / denom);
    if (p == me) {
      if (nMyRows >= myGlobalElements.size()) 
        myGlobalElements.resize(1.5*myGlobalElements.size());
      myGlobalElements[nMyRows] = i + offsetEpetra64;
      nMyRows++;
    }
  }
  *rowMap = new Epetra_Map(nrows, nMyRows, &myGlobalElements[0], 0, comm);
}
bool global_check_for_flag_on_proc_0(const char* flag,
                                     int numargs,
                                     char** strargs,
                                     const Epetra_Comm& comm)
{
    int mypid = comm.MyPID();
    int numprocs = comm.NumProc();

    int flag_found = 0;
    if (mypid==0) {
        for(int i=0; i<numargs; ++i) {
            if (strargs[i]==0) continue;

            if (strcmp(flag, strargs[i]) == 0) {
                flag_found = 1;
                break;
            }
        }
    }

    if (numprocs > 1) {
        comm.Broadcast(&flag_found, 1, 0);
    }

    bool return_value = flag_found==1 ? true : false;

    return( return_value );
}
int fevec6(Epetra_Comm& Comm, bool verbose)
{
  int NumElements = 4;
  Epetra_Map     Map(NumElements, 0, Comm);
  Epetra_FEVector x1(Map);
  x1.PutScalar (0);

        // let all processors set global entry 0 to 1
  const int GID = 0;
  const double value = 1;
  x1.ReplaceGlobalValues(1, &GID, &value);
  x1.GlobalAssemble (Insert);
  if (Comm.MyPID()==0)
    std::cout << "Entry " << GID << " after construct & set: " 
        << x1[0][0] << std::endl;

  x1.PutScalar(0);

        // re-apply 1 to the vector, but only on the
        // owning processor. should be enough to set
        // the value (as non-local data in x1 should
        // have been eliminated after calling
        // GlobalAssemble).
  if (Comm.MyPID()==0)
    x1.ReplaceGlobalValues(1, &GID, &value);
  x1.GlobalAssemble (Insert);

  if (Comm.MyPID()==0) {
    std::cout << "Entry " << GID << " after PutScalar & set:      " 
        << x1[0][0] << std::endl;
    if (x1[0][0] != value) return -1;
  }

  return 0;
}
Example #4
0
void ML_Read_Matrix_Dimensions(const char *filename, int *numGlobalRows, Epetra_Comm &Comm)
{
    char line[35], token1[35], token2[35], token3[35], token4[35], token5[35];
    int lineLength = 1025;
    FILE *fid = fopen(filename,"r");
    int N, NZ;
    if(fgets(line, lineLength, fid)==0) {
      if (fid!=0) fclose(fid);
      ML_Exit(Comm.MyPID(),"error opening matrix file", EXIT_FAILURE);
    }
    if(sscanf(line, "%s %s %s %s %s", token1, token2, token3, token4, token5 )==0) {
      if (fid!=0) fclose(fid);
      ML_Exit(Comm.MyPID(),"error reading matrix file header", EXIT_FAILURE);
    }
    if (strcmp(token1, "%%MatrixMarket") || strcmp(token2, "matrix") ||
        strcmp(token3, "coordinate") || strcmp(token4, "real") ||
        strcmp(token5, "general"))
    {
      if (fid!=0) fclose(fid);
      ML_Exit(Comm.MyPID(),"error reading matrix file header", EXIT_FAILURE);
    }
    // Next, strip off header lines (which start with "%")
    do {
      if(fgets(line, lineLength, fid)==0) {
        if (fid!=0) fclose(fid);
        ML_Exit(Comm.MyPID(),"error reading matrix file comments", EXIT_FAILURE);
      }
    } while (line[0] == '%');

    // Next get problem dimensions: M, N, NZ
    if(sscanf(line, "%d %d %d", numGlobalRows, &N, &NZ)==0) {
      if (fid!=0) fclose(fid);
      ML_Exit(Comm.MyPID(),"error reading matrix file dimensions", EXIT_FAILURE);
    }
} //ML_Read_Matrix_Dimensions()
void show_matrix(const char *txt, const Epetra_RowMatrix &matrix, const Epetra_Comm &comm)
{
  int me = comm.MyPID();
  if (comm.NumProc() > 10){
    if (me == 0){
      std::cout << txt << std::endl;
      std::cout << "Printed matrix format only works for 10 or fewer processes" << std::endl;
    }
    return;
  }

  int numRows = matrix.NumGlobalRows();
  int numCols = matrix.NumGlobalCols();

  if ((numRows > 200) || (numCols > 500)){
    if (me == 0){
      std::cerr << txt << std::endl;
      std::cerr << "show_matrix: problem is too large to display" << std::endl;
    }
    return;
  }

  int *myA = new int [numRows * numCols];

  make_my_A(matrix, myA, comm);

  printMatrix(txt, myA, NULL, NULL, numRows, numCols, comm);

  delete [] myA;
}
Example #6
0
int generateHyprePrintOut(const char *filename, const Epetra_Comm &comm){
  int MyPID = comm.MyPID();
  int NumProc = comm.NumProc();

  int N = 100;
  int ilower = MyPID * N;
  int iupper = (MyPID+1)*N-1;

  double filePID = (double)MyPID/(double)100000;
  std::ostringstream stream;
  // Using setprecision() puts it in the std::string
  stream << std::setiosflags(std::ios::fixed) << std::setprecision(5) << filePID;
  // Then just ignore the first character
  std::string fileName(filename);
  fileName += stream.str().substr(1,7);

  std::ofstream myfile(fileName.c_str());

  if(myfile.is_open()){
    myfile << ilower << " " << iupper << " " << ilower << " " << iupper << std::endl;
    for(int i = ilower; i <= iupper; i++){
      for(int j=i-5; j <= i+5; j++){
        if(j >= 0 && j < N*NumProc)
          myfile << i << " " << j << " " << (double)rand()/(double)RAND_MAX << std::endl;
      }
    }
    myfile.close();
    return 0;
  } else {
    std::cout << "\nERROR:\nCouldn't open file.\n";
    return -1;
  }
}
Example #7
0
int special_submap_import_test(Epetra_Comm& Comm)
{
  int localProc = Comm.MyPID();

  //set up ids_source and ids_target such that ids_source are only
  //a subset of ids_target, and furthermore that ids_target are ordered
  //such that the LIDs don't match up. In other words, even if gid 2 does
  //exist in both ids_source and ids_target, it will correspond to different
  //LIDs on at least 1 proc.
  //
  //This is to test a certain bug-fix in Epetra_Import where the 'RemoteLIDs'
  //array wasn't being calculated correctly on all procs.

  long long ids_source[1];
  ids_source[0] = localProc*2+2;

  long long ids_target[3];
  ids_target[0] = localProc*2+2;
  ids_target[1] = localProc*2+1;
  ids_target[2] = localProc*2+0;

  Epetra_Map map_source((long long) -1, 1, &ids_source[0], 0LL, Comm);
  Epetra_Map map_target((long long) -1, 3, &ids_target[0], 0LL, Comm);

  Epetra_Import importer(map_target, map_source);

  Epetra_LongLongVector vec_source(map_source);
  Epetra_LongLongVector vec_target(map_target);

  vec_target.PutValue(0);

  //set vec_source's contents so that entry[i] == GID[i].
  long long* GIDs = map_source.MyGlobalElements64();
  for(int i=0; i<map_source.NumMyElements(); ++i) {
    vec_source[i] = GIDs[i];
  }

  //Import vec_source into vec_target. This should result in the contents
  //of vec_target remaining 0 for the entries that don't exist in vec_source,
  //and other entries should be equal to the corresponding GID in the map.

  vec_target.Import(vec_source, importer, Insert);

  GIDs = map_target.MyGlobalElements64();
  int test_failed = 0;

  //the test passes if the i-th entry in vec_target equals either 0 or
  //GIDs[i].
  for(int i=0; i<vec_target.MyLength(); ++i) {
    if (vec_target[i] != GIDs[i] && vec_target[i] != 0) test_failed = 1;
  }

  int global_result;
  Comm.MaxAll(&test_failed, &global_result, 1);

  //If test didn't fail on any procs, global_result should be 0.
  //If test failed on any proc, global_result should be 1.
  return global_result;
}
void show_matrix(const char *txt, const Epetra_LinearProblem &problem, const Epetra_Comm &comm)
{
  int me = comm.MyPID();

  if (comm.NumProc() > 10){
    if (me == 0){
      std::cout << txt << std::endl;
      std::cout << "Printed matrix format only works for 10 or fewer processes" << std::endl;
    }
    return;
  }

  Epetra_RowMatrix *matrix = problem.GetMatrix();
  Epetra_MultiVector *lhs = problem.GetLHS();
  Epetra_MultiVector *rhs = problem.GetRHS();

  int numRows = matrix->NumGlobalRows();
  int numCols = matrix->NumGlobalCols();

  if ((numRows > 200) || (numCols > 500)){
    if (me == 0){
      std::cerr << txt << std::endl;
      std::cerr << "show_matrix: problem is too large to display" << std::endl;
    }
    return;
  }

  int *myA = new int [numRows * numCols];

  make_my_A(*matrix, myA, comm);

  int *myX = new int [numCols];
  int *myB = new int [numRows];

  memset(myX, 0, sizeof(int) * numCols);
  memset(myB, 0, sizeof(int) * numRows);

  const Epetra_BlockMap &lhsMap = lhs->Map();
  const Epetra_BlockMap &rhsMap = rhs->Map();

  int base = lhsMap.IndexBase();

  for (int j=0; j < lhsMap.NumMyElements(); j++){
    int colGID = lhsMap.GID(j);
    myX[colGID - base] = me + 1;
  }

  for (int i=0; i < rhsMap.NumMyElements(); i++){
    int rowGID = rhsMap.GID(i);
    myB[rowGID - base] = me + 1;
  }

  printMatrix(txt, myA, myX, myB, numRows, numCols, comm);

  delete [] myA;
  delete [] myX;
  delete [] myB;
}
Example #9
0
//==============================================================================
Poisson2dOperator::Poisson2dOperator(int nx, int ny, const Epetra_Comm & comm)
    : nx_(nx),
      ny_(ny),
      useTranspose_(false),
      comm_(comm),
      map_(0),
      numImports_(0),
      importIDs_(0),
      importMap_(0),
      importer_(0),
      importX_(0),
      Label_(0) {

    Label_ = "2D Poisson Operator";
    int numProc = comm.NumProc(); // Get number of processors
    int myPID = comm.MyPID(); // My rank
    if (2*numProc > ny) { // ny must be >= 2*numProc (to avoid degenerate cases)
        ny = 2*numProc;
        ny_ = ny;
        std::cout << " Increasing ny to " << ny << " to avoid degenerate distribution on " << numProc << " processors." << std::endl;
    }

    int chunkSize = ny/numProc;
    int remainder = ny%numProc;

    if (myPID+1 <= remainder) chunkSize++; // add on remainder

    myny_ = chunkSize;

    map_ = new Epetra_Map(-1LL, ((long long)nx)*chunkSize, 0, comm_);

    if (numProc>1) {
        // Build import GID list to build import map and importer
        if (myPID>0) numImports_ += nx;
        if (myPID+1<numProc) numImports_ += nx;

        if (numImports_>0) importIDs_ = new long long[numImports_];
        long long * ptr = importIDs_;
        long long minGID = map_->MinMyGID64();
        long long maxGID = map_->MaxMyGID64();

        if (myPID>0) for (int i=0; i< nx; i++) *ptr++ = minGID - nx + i;
        if (myPID+1<numProc) for (int i=0; i< nx; i++) *ptr++ = maxGID + i +1;

        // At the end of the above step importIDs_ will have a list of global IDs that are needed
        // to compute the matrix multiplication operation on this processor.  Now build import map
        // and importer


        importMap_ = new Epetra_Map(-1LL, numImports_, importIDs_, 0LL, comm_);

        importer_ = new Epetra_Import(*importMap_, *map_);

    }
}
Example #10
0
int alternate_import_constructor_test(Epetra_Comm& Comm) {
  int rv=0;
  int nodes_per_proc=10;
  int numprocs = Comm.NumProc();
  int mypid    = Comm.MyPID();

  // Only run if we have multiple procs & MPI
  if(numprocs==0) return 0;
#ifndef HAVE_MPI
  return 0;
#endif

  // Build Map 1 - linear
  Epetra_Map Map1((long long)-1,nodes_per_proc,(long long)0,Comm);

  // Build Map 2 - mod striped
  std::vector<long long> MyGIDs(nodes_per_proc);
  for(int i=0; i<nodes_per_proc; i++)
    MyGIDs[i] = (mypid*nodes_per_proc + i) % numprocs;
  Epetra_Map Map2((long long)-1,nodes_per_proc,&MyGIDs[0],(long long)0,Comm);

  // For testing
  Epetra_LongLongVector Source(Map1), Target(Map2);


  // Build Import 1 - normal
  Epetra_Import Import1(Map2,Map1);
  rv = rv|| test_import_gid("Alt test: 2 map constructor",Source,Target, Import1);

  // Build Import 2 - no-comm constructor
  int Nremote=Import1.NumRemoteIDs();
  const int * RemoteLIDs = Import1.RemoteLIDs();
  std::vector<int> RemotePIDs(Nremote+1); // I hate you, stl vector....
  std::vector<int> AllPIDs;
  Epetra_Util::GetPids(Import1,AllPIDs,true);

  for(int i=0; i<Nremote; i++) {
    RemotePIDs[i]=AllPIDs[RemoteLIDs[i]];
  }
  Epetra_Import Import2(Import1.TargetMap(),Import1.SourceMap(),Nremote,&RemotePIDs[0],Import1.NumExportIDs(),Import1.ExportLIDs(),Import1.ExportPIDs());

  rv = rv || test_import_gid("Alt test: no comm constructor",Source,Target,Import2);


  // Build Import 3 - Remotes only
  Epetra_Import Import3(Import1.TargetMap(),Import1.SourceMap(),Nremote,&RemotePIDs[0]);
  rv = rv || test_import_gid("Alt test: remote only constructor",Source,Target, Import3);


  return rv;
}
Example #11
0
int combine_mode_test(Epetra_Comm& Comm)
{
  int localProc = Comm.MyPID();


  long long ids_source[1];
  ids_source[0] = localProc*2+2;

  long long ids_target[3];
  ids_target[0] = localProc*2+2;
  ids_target[1] = localProc*2+1;
  ids_target[2] = localProc*2+0;

  Epetra_Map map_source((long long) -1, 1, &ids_source[0], 0LL, Comm);
  Epetra_Map map_target((long long) -1, 3, &ids_target[0], 0LL, Comm);

  Epetra_Import importer(map_target, map_source);

  Epetra_LongLongVector vec_source(map_source);
  Epetra_LongLongVector vec_target(map_target);

  vec_target.PutValue(0);

  //set vec_source's contents so that entry[i] == GID[i].
  long long* GIDs = map_source.MyGlobalElements64();
  for(int i=0; i<map_source.NumMyElements(); ++i) {
    vec_source[i] = GIDs[i];
  }

  //Import vec_source into vec_target. This should result in the contents
  //of vec_target remaining 0 for the entries that don't exist in vec_source,
  //and other entries should be equal to the corresponding GID in the map.

  vec_target.Import(vec_source, importer, Insert);

  GIDs = map_target.MyGlobalElements64();
  int test_failed = 0;

  //the test passes if the i-th entry in vec_target equals either 0 or
  //GIDs[i].
  for(int i=0; i<vec_target.MyLength(); ++i) {
    if (vec_target[i] != GIDs[i] && vec_target[i] != 0) test_failed = 1;
  }

  int global_result;
  Comm.MaxAll(&test_failed, &global_result, 1);

  //If test didn't fail on any procs, global_result should be 0.
  //If test failed on any proc, global_result should be 1.
  return global_result;
}
//==============================================================================
// Epetra_BlockMap constructor function for a Epetra-defined uniform linear distribution of constant size elements.
void Epetra_BlockMap::ConstructAutoUniform(long long NumGlobal_Elements, int Element_Size, int Index_Base, const Epetra_Comm& comm, bool IsLongLong)
{
  
  // Each processor gets roughly numGlobalPoints/p points
  // This routine automatically defines a linear partitioning of a
  // map with numGlobalPoints across the processors
  // specified in the given Epetra_Comm
  
  if (NumGlobal_Elements < 0) 
    throw ReportError("NumGlobal_Elements = " + toString(NumGlobal_Elements) + ".  Should be >= 0.", -1);
  if (Element_Size <= 0) 
    throw ReportError("ElementSize = " + toString(Element_Size) + ".  Should be > 0.", -2);
  
  BlockMapData_ = new Epetra_BlockMapData(NumGlobal_Elements, Element_Size, Index_Base, comm, IsLongLong);
  int NumProc = comm.NumProc();
  BlockMapData_->ConstantElementSize_ = true;
  BlockMapData_->LinearMap_ = true;

  int MyPID = comm.MyPID();

  if(BlockMapData_->NumGlobalElements_ / NumProc > (long long) std::numeric_limits<int>::max())
    throw ReportError("Epetra_BlockMap::ConstructAutoUniform: Error. Not enough space for elements on each processor", -99);

  BlockMapData_->NumMyElements_ = (int) (BlockMapData_->NumGlobalElements_ / NumProc);
  int remainder = (int) (BlockMapData_->NumGlobalElements_ % NumProc); // remainder will fit int
  int start_index = MyPID * (BlockMapData_->NumMyElements_ + 1);

  if (MyPID < remainder) 
    BlockMapData_->NumMyElements_++;
  else 
    start_index -= (MyPID - remainder);

  BlockMapData_->NumGlobalPoints_ = BlockMapData_->NumGlobalElements_ * BlockMapData_->ElementSize_;
  BlockMapData_->NumMyPoints_ = BlockMapData_->NumMyElements_ * BlockMapData_->ElementSize_;

  BlockMapData_->MinMyElementSize_ = BlockMapData_->ElementSize_;
  BlockMapData_->MaxMyElementSize_ = BlockMapData_->ElementSize_;
  BlockMapData_->MinElementSize_ = BlockMapData_->ElementSize_;
  BlockMapData_->MaxElementSize_ = BlockMapData_->ElementSize_;

  BlockMapData_->MinAllGID_ = BlockMapData_->IndexBase_;
  BlockMapData_->MaxAllGID_ = BlockMapData_->MinAllGID_ + BlockMapData_->NumGlobalElements_ - 1;
  BlockMapData_->MinMyGID_ = start_index + BlockMapData_->IndexBase_;
  BlockMapData_->MaxMyGID_ = BlockMapData_->MinMyGID_ + BlockMapData_->NumMyElements_ - 1;
  BlockMapData_->DistributedGlobal_ = IsDistributedGlobal(BlockMapData_->NumGlobalElements_, BlockMapData_->NumMyElements_);

  EndOfConstructorOps();
}
Example #13
0
int rebalanceEpetraProblem( RCP<Epetra_Map>         &Map,
                            RCP<Epetra_CrsMatrix>   &A,
                            RCP<Epetra_MultiVector> &B,
                            RCP<Epetra_MultiVector> &X,
                            Epetra_Comm             &Comm
                          )
{
  // Rebalance linear system across multiple processors.
  if ( Comm.NumProc() > 1 ) {
    RCP<Epetra_Map> newMap = rcp( new Epetra_Map( Map->NumGlobalElements(), Map->IndexBase(), Comm ) );
    RCP<Epetra_Import> newImport = rcp( new Epetra_Import( *newMap, *Map ) );

    // Create rebalanced versions of the linear system.
    RCP<Epetra_CrsMatrix> newA = rcp( new Epetra_CrsMatrix( BELOSEPETRACOPY, *newMap, 0 ) );
    newA->Import( *A, *newImport, Insert );
    newA->FillComplete();
    RCP<Epetra_MultiVector> newB = rcp( new Epetra_MultiVector( *newMap, B->NumVectors() ) );
    newB->Import( *B, *newImport, Insert );
    RCP<Epetra_MultiVector> newX = rcp( new Epetra_MultiVector( *newMap, X->NumVectors() ) );
    newX->Import( *X, *newImport, Insert );

    // Set the pointers to the new rebalance linear system.
    A = newA;
    B = newB;
    X = newX;
    Map = newMap;
  }

  return (0);
}
Example #14
0
Teuchos::RCP<Epetra_CrsMatrix> buildMatrix(int nx, Epetra_Comm & comm)
{
   Epetra_Map map(nx*comm.NumProc(),0,comm);
   Teuchos::RCP<Epetra_CrsMatrix> mat = Teuchos::rcp(new Epetra_CrsMatrix(Copy,map,3));

   int offsets[3] = {-1, 0, 1 };
   double values[3] = { -1, 2, -1};
   int maxGid = map.MaxAllGID();
   for(int lid=0;lid<nx;lid++) {
      int gid = mat->GRID(lid);
      int numEntries = 3, offset = 0;
      int indices[3] = { gid+offsets[0],
                         gid+offsets[1],
                         gid+offsets[2] };
      if(gid==0) { // left end point
         numEntries = 2;
         offset = 1;
      }            // right end point
      else if(gid==maxGid)
         numEntries = 2;

      // insert rows
      mat->InsertGlobalValues(gid,numEntries,values+offset,indices+offset);
   }

   mat->FillComplete();
   return mat;
}
int fevec4(Epetra_Comm& Comm, bool verbose)
{
  int NumElements = 4;
  Epetra_Map     Map(NumElements, 0, Comm);
  Epetra_FEVector x1(Map);
  const double value = 1.;
  x1.PutScalar (value);
				// replace one element by itself. processor 0
				// does not own this element
  const int GID = 3;
  x1.ReplaceGlobalValues(1, &GID, &value);
  x1.GlobalAssemble (Insert);

  if (Map.MyGID(3)) {
    //insist that the value for GID==3 is 1:
    if (std::abs(x1.Values()[Map.LID(3)] - 1) > 1.e-9) return -1;
  }

  std::cout << x1;

  Comm.Barrier();

				// re-apply GlobalAssemble. Nothing should
				// happen
  x1.GlobalAssemble (Insert);
  std::cout << x1;
  if (Map.MyGID(3)) {
    //insist that the value for GID==3 is 1:
    if (std::abs(x1.Values()[Map.LID(3)] - 1) > 1.e-9) return -1;
  }

  return 0;
}
Example #16
0
Teuchos::RCP<const EpetraExt::MultiComm> 
Stokhos::buildMultiComm(const Epetra_Comm& globalComm,
			int num_global_stochastic_blocks,
			int num_spatial_procs)
{
  Teuchos::RCP<const EpetraExt::MultiComm> globalMultiComm;

#ifdef HAVE_MPI
  if (num_spatial_procs == -1) {
    // By default, use all procs for spatial parallelism
    //MPI_Comm_size(MPI_COMM_WORLD, &num_spatial_procs);
    num_spatial_procs = globalComm.NumProc();
  }
  const Epetra_MpiComm& globalMpiComm = 
    dynamic_cast<const Epetra_MpiComm&>(globalComm);
  globalMultiComm = 
    Teuchos::rcp(new EpetraExt::MultiMpiComm(globalMpiComm.Comm(), 
					     num_spatial_procs, 
					     num_global_stochastic_blocks,
					     Teuchos::VERB_NONE));
#else
  globalMultiComm = 
    Teuchos::rcp(new EpetraExt::MultiSerialComm(num_global_stochastic_blocks));
#endif

  return globalMultiComm;
}
int HypreFileToCrsMatrix(const char *filename, const Epetra_Comm &comm, Epetra_CrsMatrix *&Matrix){
  int MyPID = comm.MyPID();
  // This double will be in the format we want for the extension besides the leading zero
  double filePID = (double)MyPID/(double)100000;
  std::ostringstream stream;
  // Using setprecision() puts it in the string
  stream << std::setiosflags(std::ios::fixed) << std::setprecision(5) << filePID;
  // Then just ignore the first character
  std::string fileName(filename);
  fileName += stream.str().substr(1,7);
  // Open the file
  std::ifstream file(fileName.c_str());
  string line;
  if(file.is_open()){
    std::getline(file, line);
    int ilower, iupper;
    std::istringstream istream(line);
    // The first line of the file has the beginning and ending rows
    istream >> ilower;
    istream >> iupper;
    // Using those we can create a row map
    Epetra_Map RowMap(-1, iupper-ilower+1, 0, comm);
    Matrix = new Epetra_CrsMatrix(Copy, RowMap, 0);
    int currRow = -1;
    int counter = 0;
    std::vector<int> indices;
    std::vector<double> values;
    while(!file.eof()){
      std::getline(file, line);
      std::istringstream lineStr(line);
      int row, col;
      double val;
      lineStr >> row;
      lineStr >> col;
      lineStr >> val;
      if(currRow == -1) currRow = row; // First line
      if(row == currRow){
        // add to the vector
        counter = counter + 1;
        indices.push_back(col);
        values.push_back(val);
      } else {
        Matrix->InsertGlobalValues(currRow, counter, &values[0], &indices[0]);
        indices.clear();
        values.clear();
        counter = 0;
        currRow = row;
        // make a new vector
        indices.push_back(col);
        values.push_back(val);
        counter = counter + 1;
      }
    }
    Matrix->InsertGlobalValues(currRow, counter, &values[0], &indices[0]);
    Matrix->Comm().Barrier();
    Matrix->FillComplete();
    file.close();
    return 0;
  } else {
Example #18
0
Teuchos::RCP< Epetra_LinearProblem >
build_problem(Teuchos::ParameterList& test_params,
              const Epetra_Comm& comm)
{
  Teuchos::Time timer("build_problem");
  timer.start();

  Epetra_CrsMatrix* A;
  Epetra_Vector* b = NULL;

  std::string mm_file("not specified");
  std::string rhs_mm_file("not specified");
  helper::GetParameter(test_params, "mm_file", mm_file);
  helper::GetParameter(test_params, "rhs_mm_file", rhs_mm_file);
  std::string hb_file("not specified");
  helper::GetParameter(test_params, "hb_file", hb_file);

  if (mm_file != "not specified") {
    if (comm.MyPID() == 0) {
      std::cout << "Matrix-Market file: " << mm_file << std::endl;
    }
    A = read_matrix_mm(mm_file, comm);
    if (rhs_mm_file != "not specified") {
      if (comm.MyPID() == 0) {
        std::cout << "Matrix-Market file: " << rhs_mm_file << std::endl;
      }
      b = read_vector_mm(rhs_mm_file, comm);
    }
  }
  else if (hb_file != "not specified") {
    read_matrix_hb(hb_file, comm, A, b);
  }
  else {
    throw std::runtime_error("No matrix file specified.");
  }

  Teuchos::RCP<Epetra_LinearProblem> problem = build_problem_mm(test_params, A, b);
  timer.stop();
  if (comm.MyPID() == 0) {
    std::cout << "proc 0 time to read matrix & create problem: " << timer.totalElapsedTime()
      << std::endl;
  }

  return problem;
}
Example #19
0
static void print_out(const Epetra_Comm& Comm, const int level, const char* what)
{
  if (Comm.MyPID() == 0 && ML_Get_PrintLevel() > 2)
#ifdef TFLOP
    printf("Amesos (level %d) : Building %s\n", level, what);
#else
    std::cout << "Amesos (level " << level << ") : Building " << what << "\n";
#endif
}
// Do something with the given communicator.  In this case, we just
// print Epetra's version to the given output stream, on Process 0.
void
exampleRoutine (const Epetra_Comm& comm,
                std::ostream& out)
{
  if (comm.MyPID () == 0) {
    // On (MPI) Process 0, print out the Epetra software version.
    out << Epetra_Version () << std::endl << std::endl;
  }
}
Example #21
0
void build_maps(
  itype nrows,      // Number of global matrix rows
  bool testEpetra64,// Flag indicating whether to adjust global row/column
                    // indices to exercise Epetra64 capability.
  Epetra_Comm &comm,       // Epetra communicator to be used in maps
  Epetra_Map **vectorMap,  // OUTPUT: Map to be used for the vector
  Epetra_Map **rowMap,     // OUTPUT: Map to be used for the matrix rows
  Epetra_Map **colMap,     // OUTPUT: Map to be used for the matrix cols
  long long &offsetEpetra64, // OUTPUT for testing Epetra64: add offsetEpetra64
                             // to all row/column indices.
  bool verbose             // print out generated maps
)
{
  // Function to build the maps for 1D or 2D matrix distribution.
  // Output for 1D includes rowMap and NULL colMap and vectorMap.
  // Output for 2D includes rowMap, colMap and vectorMap.

  int me = comm.MyPID();
  int np = comm.NumProc();

  *rowMap = NULL;
  *colMap = NULL;
  *vectorMap = NULL;

//  offsetEpetra64 = (testEpetra64 ? (long long) INT_MAX - (long long) 5 : 0);
  offsetEpetra64 = (testEpetra64 ? (long long) 2 * INT_MAX : 0);

  // Generate 1D row-based decomposition.

  if ((me == 0) && verbose) 
    cout << endl 
         << "1D Distribution: " << endl
         << "    np     = " << np << endl;

  // Linear map similar to Trilinos default.
  itype nMyRows = nrows / np + (nrows % np > me);
  itype myFirstRow = me * (nrows / np) + MIN(nrows % np, me);
  itype *myGlobalRows = new itype[nMyRows];
  for (itype i = 0; i < nMyRows; i++)
    myGlobalRows[i] = i + myFirstRow + offsetEpetra64;
  *rowMap = new Epetra_Map(nrows, nMyRows, &myGlobalRows[0], 0, comm);
  delete [] myGlobalRows;
}
int rectangular(const Epetra_Comm& Comm, bool verbose)
{
  int mypid = Comm.MyPID();
  int numlocalrows = 3;
  Epetra_Map rowmap((long long) -1, numlocalrows, 0, Comm);

  long long numglobalrows = numlocalrows*Comm.NumProc();

  long long numcols = 2*numglobalrows;

  Epetra_FECrsGraph fegraph(Copy, rowmap, numcols);

  long long* cols = new long long[numcols];
  for(int j=0; j<numcols; ++j) cols[j] = j;

  Epetra_Map domainmap((long long) -1, numcols, 0, Comm);

  long long firstlocalrow = numlocalrows*mypid;
  long long lastlocalrow = numlocalrows*(mypid+1)-1;

  for(long long i=0; i<numglobalrows; ++i) {
    //if i is a local row, then skip it. We want each processor to only
    //load rows that belong on other processors.
    if (i >= firstlocalrow && i <= lastlocalrow) continue;

    EPETRA_CHK_ERR( fegraph.InsertGlobalIndices(1, &i, numcols, &(cols[0])) );
  }

  EPETRA_CHK_ERR( fegraph.GlobalAssemble(domainmap, rowmap) );

  if (verbose) {
    std::cout << "********************** fegraph **********************" << std::endl;
    std::cout << fegraph << std::endl;
  }

  delete [] cols;

  return(0);
}
Example #23
0
void MPIWrapper::allGatherCompact(const Epetra_Comm &Comm, FieldContainer<Scalar> &gatheredValues,
                                  FieldContainer<Scalar> &myValues, FieldContainer<int> &offsets)
{
  int mySize = myValues.size();
  int totalSize;
  Comm.SumAll(&mySize, &totalSize, 1);

  int myOffset = 0;
  Comm.ScanSum(&mySize,&myOffset,1);

  myOffset -= mySize;

  gatheredValues.resize(totalSize);
  for (int i=0; i<mySize; i++)
  {
    gatheredValues[myOffset+i] = myValues[i];
  }
  MPIWrapper::entryWiseSum(Comm, gatheredValues);

  offsets.resize(Comm.NumProc());
  offsets[Comm.MyPID()] = myOffset;
  MPIWrapper::entryWiseSum(Comm, offsets);
}
Example #24
0
void MPIWrapper::allGatherHomogeneous(const Epetra_Comm &Comm, FieldContainer<int> &allValues, FieldContainer<int> &myValues)
{
  int numProcs = Teuchos::GlobalMPISession::getNProc();
  if (numProcs != allValues.dimension(0))
  {
    TEUCHOS_TEST_FOR_EXCEPTION(true, std::invalid_argument, "allValues first dimension must be #procs");
  }
  if (allValues.size() / numProcs != myValues.size())
  {
    TEUCHOS_TEST_FOR_EXCEPTION(true, std::invalid_argument, "myValues size invalid");
  }
#ifdef HAVE_MPI
  Comm.GatherAll(&myValues[0], &allValues[0], allValues.size()/numProcs);
#else
#endif
}
static int make_my_A(const Epetra_RowMatrix &matrix, int *myA, const Epetra_Comm &comm)
{
  int me = comm.MyPID();

  const Epetra_Map &rowmap = matrix.RowMatrixRowMap();
  const Epetra_Map &colmap = matrix.RowMatrixColMap();

  int myRows = matrix.NumMyRows();
  int numRows = matrix.NumGlobalRows();
  int numCols = matrix.NumGlobalCols();
  int base = rowmap.IndexBase();
  int maxRow = matrix.MaxNumEntries();

  memset(myA, 0, sizeof(int) * numRows * numCols);

  int *myIndices = new int [maxRow];
  double *tmp = new double [maxRow];

  int rowLen = 0;

  for (int i=0; i< myRows; i++){

    int rc = matrix.ExtractMyRowCopy(i, maxRow, rowLen, tmp, myIndices);

    if (rc){
      if (me == 0){
        std::cout << "Error in make_my_A" << std::endl;
      }
       return 1;
    }

    int *row = myA + (numCols * (rowmap.GID(i) - base));

    for (int j=0; j < rowLen; j++){

      int colGID = colmap.GID(myIndices[j]);
      
      row[colGID - base] = me + 1;
    }
  }

  if (maxRow){
    delete [] myIndices;
    delete [] tmp;
  }
  return 0;
}
int fevec7(Epetra_Comm& Comm, bool verbose)
{
  const int NumVectors = 4;
  const int NumElements = 4;
  Epetra_Map     Map(NumElements, 0, Comm);
  std::vector<double> mydata(NumElements*NumVectors, 1.0);
  Epetra_FEVector x1(View, Map, &mydata[0], NumElements, NumVectors);

  x1.PutScalar (0);

        // let all processors set global entry 0 to 1
  const int GID = 0;
  const double value = 1;
  x1.ReplaceGlobalValues(1, &GID, &value);
  x1.GlobalAssemble (Insert);

  if (Comm.MyPID()==0 && x1[0][0] != value) return -1;
  return 0;
}
Example #27
0
int check_graph_sharing(Epetra_Comm& Comm)
{
  int numLocalElems = 5;
  int localProc = Comm.MyPID();
  int firstElem = localProc*numLocalElems;
  int err;
  Epetra_Map map(-1, numLocalElems, 0, Comm);

  Epetra_CrsMatrix* A = new Epetra_CrsMatrix(Copy, map, 1);

  for (int i=0; i<numLocalElems; ++i) {
    int row = firstElem+i;
    int col = row;
    double val = 1.0;

    err = A->InsertGlobalValues(row, 1, &val, &col);
    if (err != 0) {
      cerr << "A->InsertGlobalValues("<<row<<") returned err="<<err<<endl;
      return(err);
    }
  }

  A->FillComplete(false);

  Epetra_CrsMatrix B(Copy, A->Graph());

  delete A;

  for (int i=0; i<numLocalElems; ++i) {
    int row = firstElem+i;
    int col = row;
    double val = 1.0;

    err = B.ReplaceGlobalValues(row, 1, &val, &col);
    if (err != 0) {
      cerr << "B.InsertGlobalValues("<<row<<") returned err="<<err<<endl;
      return(err);
    }
  }

  return(0);
}
Example #28
0
int test_bug2890(Epetra_Comm& Comm, bool verbose)
{
//This function tests the AZ_random1() function in AztecOO.
//The implementation of the Park and Miller random number
//generator was incorrect and resulted in an overflow condition.
//This is *not* a complete test of AztecOO's RNG.
//
//A more robust check is to compile AztecOO with gcc -ftrapv and run
//a Krylov method that invokes AZ_random_vector().

  int seed = -127773;
  double rand_num;

  rand_num = AZ_srandom1(&seed);

  if (verbose && Comm.MyPID() == 0)
    printf("test_bug2890: rand_num = %e (should be in [0,1])\n",rand_num);

  if ( (rand_num > 1) || (rand_num < 0) )
    return 1;    // rand_num should be in [0,1]
  else
    return 0;
}
Example #29
0
//============================================================================
void Ifpack_BreakForDebugger(Epetra_Comm& Comm)
{
  char hostname[80];
  char buf[80];
  if (Comm.MyPID()  == 0) cout << "Host and Process Ids for tasks" << endl;
  for (int i = 0; i <Comm.NumProc() ; i++) {
    if (i == Comm.MyPID() ) {
#if defined(TFLOP) || defined(JANUS_STLPORT)
      sprintf(buf, "Host: %s   PID: %d", "janus", getpid());
#elif defined(_WIN32)
      sprintf(buf,"Windows compiler, unknown hostname and PID!");
#else
      gethostname(hostname, sizeof(hostname));
      sprintf(buf, "Host: %s\tComm.MyPID(): %d\tPID: %d",
              hostname, Comm.MyPID(), getpid());
#endif
      printf("%s\n",buf);
      fflush(stdout);
#if !( defined(_WIN32) )
      sleep(1);
#endif
    }
  }
  if(Comm.MyPID() == 0) {
    printf("\n");
    printf("** Pausing to attach debugger...\n");
    printf("** You may now attach debugger to the processes listed above.\n");
    printf( "**\n");
    printf( "** Enter a character to continue > "); fflush(stdout);
    char go;
    scanf("%c",&go);
  }

  Comm.Barrier();

}
void Trilinos_Util_distrib_vbr_matrix(const Epetra_Comm & Comm,
	      int *N_global, int *N_blk_global,
	      int *n_nonzeros, int *n_blk_nonzeros, 
	      int *N_update, int **update,
	      double **val, int **indx, int **rpntr, int **cpntr,
	      int **bpntr, int **bindx,
	      double **x, double **b, double **xexact)
#undef DEBUG 

{
  int i, n_global_nonzeros, n_global_blk_nonzeros;
  int N_local;
  int j, row, have_xexact = 0 ;
  int *rpntr1, *bindx1, *bpntr1, *indx1;
  double *val1, *b1, *x1, *xexact1=0;
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  printf("Processor %d of %d entering distrib_matrix.\n",
	 MyPID,NumProc) ;

  /*************** Distribute global matrix to all processors ************/

  if(MyPID == 0)
    {
      if ((*xexact) != NULL) have_xexact = 1;
      printf("%s", "Broadcasting exact solution\n");
    }

  if(NumProc  > 1)
    { 

      Comm.Broadcast( N_global,      1, 0);
      Comm.Broadcast( N_blk_global,  1, 0);
      Comm.Broadcast( n_nonzeros,     1, 0);
      Comm.Broadcast( n_blk_nonzeros, 1, 0);
      Comm.Broadcast( &have_xexact,   1, 0);

      printf("Processor %d of %d done with global parameter  broadcast.\n",
	     MyPID,NumProc) ;

      if(MyPID != 0)
	{
      *bpntr = (int   *) calloc(*N_blk_global+1,sizeof(int)) ;
      *rpntr = (int   *) calloc(*N_blk_global+1,sizeof(int)) ;
      *bindx = (int   *) calloc(*n_blk_nonzeros+1,sizeof(int)) ;
      *indx  = (int   *) calloc(*n_blk_nonzeros+1,sizeof(int)) ;
      *val = (double *) calloc(*n_nonzeros+1,sizeof(double)) ;
      printf("Processor %d of %d done with global calloc.\n",
	     MyPID,NumProc) ;
}

      Comm.Broadcast( (*bpntr), (*N_blk_global+1), 0);
      Comm.Broadcast( (*rpntr), (*N_blk_global+1), 0);
      Comm.Broadcast( (*bindx), (*n_blk_nonzeros+1), 0);
      Comm.Broadcast( (*indx),  (*n_blk_nonzeros+1), 0);
      Comm.Broadcast( (*val),  (*n_nonzeros+1), 0);

      printf("Processor %d of %d done with matrix broadcast.\n",
	     MyPID,NumProc) ;
 
      /* Set rhs and initialize guess */
      if(MyPID != 0)
	{
	  (*b) = (double *) calloc(*N_global,sizeof(double)) ;
	  (*x) = (double *) calloc(*N_global,sizeof(double)) ;
	  if (have_xexact)
	  (*xexact) =   (double *) calloc(*N_global,sizeof(double)) ;
	}

      Comm.Broadcast( (*x), (*N_global), 0);
      Comm.Broadcast( (*b), (*N_global), 0);
      if (have_xexact)
	Comm.Broadcast((*xexact), (*N_global), 0);
      printf("Processor %d of %d done with rhs/guess broadcast.\n",
	     MyPID,NumProc) ;

    }

  /********************** Generate update map  *************************/

  //read_update(N_update, update, proc_config, *N_blk_global, 1, linear) ;

  Epetra_Map map(*N_blk_global, 0, Comm);
  *N_update = map.NumMyElements();
  (*update) = (int *) calloc(*N_update,sizeof(int)) ;
  map.MyGlobalElements(*update);

  printf("Processor %d of %d has %d rows of %d total block rows.\n",
	 MyPID,NumProc,*N_update,*N_blk_global) ;

  /*************** Construct local matrix from global matrix ************/

  /* The local matrix is a copy of the rows assigned to this processor.  
     It is stored in MSR format and still has global indices 
  */

  if(NumProc  > 1)
    { 
      n_global_nonzeros = *n_nonzeros;
      n_global_blk_nonzeros = *n_blk_nonzeros;

      *n_nonzeros = 0;
      *n_blk_nonzeros = 0;
      N_local = 0;
      
      for (i=0; i<*N_update; i++)
	{
	  row = (*update)[i];
	  *n_nonzeros     += (*indx)[(*bpntr)[row+1]] - (*indx)[(*bpntr)[row]];
	  *n_blk_nonzeros += (*bpntr)[row+1] - (*bpntr)[row];
	  N_local         += (*rpntr)[row+1] - (*rpntr)[row];
	  
	}

      printf("Processor %d of %d has %d nonzeros of %d total nonzeros.\n",
	     MyPID,NumProc,
	     *n_nonzeros,n_global_nonzeros) ;

   printf("Processor %d of %d has %d block nonzeros of %d total block nonzeros.\n",
	     MyPID,NumProc,
	     *n_blk_nonzeros,n_global_blk_nonzeros) ;

   printf("Processor %d of %d has %d equations of %d total equations.\n",
	     MyPID,NumProc,
	     N_local,*N_global) ;

#ifdef DEBUG
      { double sum1 = 0.0;
      for (i=0;i<*N_global; i++) sum1 += (*b)[i];

      printf("Processor %d of %d has sum of b = %12.4g.\n",
	     MyPID,NumProc,sum1) ;
      }
#endif /* DEBUG */

      /* Allocate memory for local matrix */

      bpntr1 = (int   *) calloc(*N_update+1,sizeof(int)) ;
      rpntr1 = (int   *) calloc(*N_update+1,sizeof(int)) ;
      bindx1 = (int   *) calloc(*n_blk_nonzeros+1,sizeof(int)) ;
      indx1  = (int   *) calloc(*n_blk_nonzeros+1,sizeof(int)) ;
      val1 = (double *) calloc(*n_nonzeros+1,sizeof(double)) ;
      b1 =   (double *) calloc(N_local,sizeof(double)) ;
      x1 =   (double *) calloc(N_local,sizeof(double)) ;
      if (have_xexact)
      xexact1 =   (double *) calloc(N_local,sizeof(double)) ;

      {     
	int cur_blk_size, indx_offset, len_val, row_offset, row_offset1;
	double *val_ptr, *val1_ptr;

	bpntr1[0] = 0;
	indx1[0] = 0;
	rpntr1[0] = 0;
	for (i=0; i<*N_update; i++)
	  {
	    row = (*update)[i];
	    cur_blk_size = (*rpntr)[row+1] - (*rpntr)[row];
	    rpntr1[i+1] = rpntr1[i] + cur_blk_size;
	    row_offset = (*rpntr)[row];
	    row_offset1 = rpntr1[i];
	    for (j = 0; j<cur_blk_size; j++)
	      {
		b1[row_offset1+j] = (*b)[row_offset+j];
		x1[row_offset1+j] = (*x)[row_offset+j];
		if (have_xexact) xexact1[row_offset1+j] = (*xexact)[row_offset+j];
	      }
	    bpntr1[i+1] = bpntr1[i];
	    
#ifdef DEBUG	  
	    printf("Proc %d of %d: Global row = %d: Local row = %d: b = %12.4g: x = %12.4g: bindx = %d: val = %12.4g \n",
		    MyPID,NumProc, 
		    row, i, b1[i], x1[i], bindx1[i], val1[i]) ;
#endif
	    indx_offset = (*indx)[(*bpntr)[row]] - indx1[bpntr1[i]];
	    for (j = (*bpntr)[row]; j < (*bpntr)[row+1]; j++)
	      {
		indx1[bpntr1 [i+1] + 1] = (*indx)[j+1] - indx_offset;
		bindx1[bpntr1 [i+1] ] = (*bindx)[j];
		bpntr1[i+1] ++;
	      }
	    len_val = indx1[bpntr1[i+1]] - indx1[bpntr1[i]];
	    val_ptr = (*val)+(*indx)[(*bpntr)[row]];
	    val1_ptr = val1+indx1[bpntr1[i]];
	    for (j = 0; j<len_val; j++)
	      { 
		*val1_ptr = *val_ptr;
		val_ptr++; val1_ptr++;
	      }
	  }
      }
      printf("Processor %d of %d done with extracting local operators.\n",
	     MyPID,NumProc) ;

      if (have_xexact)
	{
	  printf(
     "The residual using VBR format and exact solution on processor %d is %12.4g\n",
	      MyPID,
	      Trilinos_Util_svbrres (N_local, *N_global, *N_update, val1, indx1, bindx1, 
		       rpntr1, (*rpntr), bpntr1, bpntr1+1,
		       (*xexact), b1));
	}
  
      /* Release memory for global matrix, rhs and solution */
      
      free ((void *) (*val));
      free ((void *) (*indx));
      free ((void *) (*bindx));
      free ((void *) (*bpntr));
      free ((void *) (*rpntr));
      free ((void *) (*b));
      free ((void *) (*x));
      if (have_xexact) free((void *) *xexact);

      /* Return local matrix through same pointers. */
      
      *val = val1;
      *indx = indx1;
      *bindx = bindx1;
      *bpntr = bpntr1;
      *rpntr = rpntr1;
      *b = b1;
      *x = x1;
      if (have_xexact) *xexact = xexact1;

    }
      if (have_xexact && NumProc  == 1)
	{
	  printf(
     "The residual using VBR format and exact solution on processor %d is %12.4g\n",
	      MyPID,
	      Trilinos_Util_svbrres (*N_global, *N_global, *N_update, (*val), (*indx), (*bindx), 
		       (*rpntr), (*rpntr), (*bpntr), (*bpntr)+1,
		       (*xexact), (*b)));
	}

  
  printf("Processor %d of %d leaving distrib_matrix.\n",
	 MyPID,NumProc) ;
  
  /* end distrib_matrix */
}