示例#1
0
int special_submap_import_test(Epetra_Comm& Comm)
{
  int localProc = Comm.MyPID();

  //set up ids_source and ids_target such that ids_source are only
  //a subset of ids_target, and furthermore that ids_target are ordered
  //such that the LIDs don't match up. In other words, even if gid 2 does
  //exist in both ids_source and ids_target, it will correspond to different
  //LIDs on at least 1 proc.
  //
  //This is to test a certain bug-fix in Epetra_Import where the 'RemoteLIDs'
  //array wasn't being calculated correctly on all procs.

  long long ids_source[1];
  ids_source[0] = localProc*2+2;

  long long ids_target[3];
  ids_target[0] = localProc*2+2;
  ids_target[1] = localProc*2+1;
  ids_target[2] = localProc*2+0;

  Epetra_Map map_source((long long) -1, 1, &ids_source[0], 0LL, Comm);
  Epetra_Map map_target((long long) -1, 3, &ids_target[0], 0LL, Comm);

  Epetra_Import importer(map_target, map_source);

  Epetra_LongLongVector vec_source(map_source);
  Epetra_LongLongVector vec_target(map_target);

  vec_target.PutValue(0);

  //set vec_source's contents so that entry[i] == GID[i].
  long long* GIDs = map_source.MyGlobalElements64();
  for(int i=0; i<map_source.NumMyElements(); ++i) {
    vec_source[i] = GIDs[i];
  }

  //Import vec_source into vec_target. This should result in the contents
  //of vec_target remaining 0 for the entries that don't exist in vec_source,
  //and other entries should be equal to the corresponding GID in the map.

  vec_target.Import(vec_source, importer, Insert);

  GIDs = map_target.MyGlobalElements64();
  int test_failed = 0;

  //the test passes if the i-th entry in vec_target equals either 0 or
  //GIDs[i].
  for(int i=0; i<vec_target.MyLength(); ++i) {
    if (vec_target[i] != GIDs[i] && vec_target[i] != 0) test_failed = 1;
  }

  int global_result;
  Comm.MaxAll(&test_failed, &global_result, 1);

  //If test didn't fail on any procs, global_result should be 0.
  //If test failed on any proc, global_result should be 1.
  return global_result;
}
示例#2
0
int combine_mode_test(Epetra_Comm& Comm)
{
  int localProc = Comm.MyPID();


  long long ids_source[1];
  ids_source[0] = localProc*2+2;

  long long ids_target[3];
  ids_target[0] = localProc*2+2;
  ids_target[1] = localProc*2+1;
  ids_target[2] = localProc*2+0;

  Epetra_Map map_source((long long) -1, 1, &ids_source[0], 0LL, Comm);
  Epetra_Map map_target((long long) -1, 3, &ids_target[0], 0LL, Comm);

  Epetra_Import importer(map_target, map_source);

  Epetra_LongLongVector vec_source(map_source);
  Epetra_LongLongVector vec_target(map_target);

  vec_target.PutValue(0);

  //set vec_source's contents so that entry[i] == GID[i].
  long long* GIDs = map_source.MyGlobalElements64();
  for(int i=0; i<map_source.NumMyElements(); ++i) {
    vec_source[i] = GIDs[i];
  }

  //Import vec_source into vec_target. This should result in the contents
  //of vec_target remaining 0 for the entries that don't exist in vec_source,
  //and other entries should be equal to the corresponding GID in the map.

  vec_target.Import(vec_source, importer, Insert);

  GIDs = map_target.MyGlobalElements64();
  int test_failed = 0;

  //the test passes if the i-th entry in vec_target equals either 0 or
  //GIDs[i].
  for(int i=0; i<vec_target.MyLength(); ++i) {
    if (vec_target[i] != GIDs[i] && vec_target[i] != 0) test_failed = 1;
  }

  int global_result;
  Comm.MaxAll(&test_failed, &global_result, 1);

  //If test didn't fail on any procs, global_result should be 0.
  //If test failed on any proc, global_result should be 1.
  return global_result;
}
示例#3
0
void
example (const Epetra_Comm& comm)
{
  // The global number of rows in the matrix A to create.  We scale
  // this relative to the number of (MPI) processes, so that no matter
  // how many MPI processes you run, every process will have 10 rows.
  const global_ordinal_type numGblElts = 10 * comm.NumProc ();
  // The global min global index in all the Maps here.
  const global_ordinal_type indexBase = 0;

  // Local error code for use below.
  //
  // In the ideal case, we would use this to emulate behavior like
  // that of Haskell's Maybe in the context of MPI.  That is, if one
  // process experiences an error, we don't want to abort early and
  // cause the other processes to deadlock on MPI communication
  // operators.  Rather, we want to chain along the local error state,
  // until we reach a point where it's natural to pass along that
  // state with other processes.  For example, if one is doing an
  // MPI_Allreduce anyway, it makes sense to pass along one more bit
  // of information: whether the calling process is in a local error
  // state.  Epetra's interface doesn't let one chain the local error
  // state in this way, so we use extra collectives below to propagate
  // that state.  The code below uses very conservative error checks;
  // typical user code would not need to be so conservative and could
  // therefore avoid all the all-reduces.
  int lclerr = 0;

  // Construct a Map that is global (not locally replicated), but puts
  // all the equations on MPI Proc 0.
  const int procZeroMapNumLclElts = (comm.MyPID () == 0) ?
    numGblElts :
    static_cast<global_ordinal_type> (0);
  Epetra_Map procZeroMap (numGblElts, procZeroMapNumLclElts, indexBase, comm);

  // Construct a Map that puts approximately the same number of
  // equations on each processor.
  Epetra_Map globalMap (numGblElts, indexBase, comm);

  // Create a sparse matrix using procZeroMap.
  Epetra_CrsMatrix* A = createCrsMatrix (procZeroMap);
  if (A == NULL) {
    lclerr = 1;
  }

  // Make sure that sparse matrix creation succeeded.  Normally you
  // don't have to check this; we are being extra conservative because
  // this example is also a test.  Even though the matrix's rows live
  // entirely on Process 0, the matrix is nonnull on all processes in
  // its Map's communicator.
  int gblerr = 0;
  (void) comm.MaxAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("createCrsMatrix returned NULL on at least one "
                              "process.");
  }

  //
  // We've created a sparse matrix whose rows live entirely on MPI
  // Process 0.  Now we want to distribute it over all the processes.
  //

  // Redistribute the matrix.  Since both the source and target Maps
  // are one-to-one, we could use either an Import or an Export.  If
  // only the source Map were one-to-one, we would have to use an
  // Import; if only the target Map were one-to-one, we would have to
  // use an Export.  We do not allow redistribution using Import or
  // Export if neither source nor target Map is one-to-one.

  // Make an export object with procZeroMap as the source Map, and
  // globalMap as the target Map.  The Export type has the same
  // template parameters as a Map.  Note that Export does not depend
  // on the Scalar template parameter of the objects it
  // redistributes.  You can reuse the same Export for different
  // Tpetra object types, or for Tpetra objects of the same type but
  // different Scalar template parameters (e.g., Scalar=float or
  // Scalar=double).
  Epetra_Export exporter (procZeroMap, globalMap);

  // Make a new sparse matrix whose row map is the global Map.
  Epetra_CrsMatrix B (Copy, globalMap, 0);

  // Redistribute the data, NOT in place, from matrix A (which lives
  // entirely on Proc 0) to matrix B (which is distributed evenly over
  // the processes).
  //
  // Export() has collective semantics, so we must always call it on
  // all processes collectively.  This is why we don't select on
  // lclerr, as we do for the local operations above.
  lclerr = B.Export (*A, exporter, Insert);

  // Make sure that the Export succeeded.  Normally you don't have to
  // check this; we are being extra conservative because this example
  // example is also a test.  We test both min and max, since lclerr
  // may be negative, zero, or positive.
  gblerr = 0;
  (void) comm.MinAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("Export() failed on at least one process.");
  }
  (void) comm.MaxAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("Export() failed on at least one process.");
  }

  // FillComplete has collective semantics, so we must always call it
  // on all processes collectively.  This is why we don't select on
  // lclerr, as we do for the local operations above.
  lclerr = B.FillComplete ();

  // Make sure that FillComplete succeeded.  Normally you don't have
  // to check this; we are being extra conservative because this
  // example is also a test.  We test both min and max, since lclerr
  // may be negative, zero, or positive.
  gblerr = 0;
  (void) comm.MinAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("B.FillComplete() failed on at least one process.");
  }
  (void) comm.MaxAll (&lclerr, &gblerr, 1);
  if (gblerr != 0) {
    throw std::runtime_error ("B.FillComplete() failed on at least one process.");
  }

  if (A != NULL) {
    delete A;
  }
}