Esempio n. 1
0
//EpetraMap_To_TpetraMap: takes in Epetra_Map object, converts it to its equivalent Tpetra::Map object,
//and returns an RCP pointer to this Tpetra::Map
Teuchos::RCP<const Tpetra_Map> Petra::EpetraMap_To_TpetraMap(const Teuchos::RCP<const Epetra_Map>& epetraMap_,
                                                      const Teuchos::RCP<const Teuchos::Comm<int> >& commT_)
{
  const std::size_t numElements = Teuchos::as<std::size_t>(epetraMap_->NumMyElements());
  const auto indexBase = Teuchos::as<GO>(epetraMap_->IndexBase());
  if (epetraMap_->DistributedGlobal() || epetraMap_->Comm().NumProc() == Teuchos::OrdinalTraits<int>::one()) {
    Teuchos::Array<Tpetra_GO> indices(numElements);
    int *epetra_indices = epetraMap_->MyGlobalElements();
    for(LO i=0; i < numElements; i++)
       indices[i] = epetra_indices[i];
    const Tpetra::global_size_t computeGlobalElements = Teuchos::OrdinalTraits<Tpetra::global_size_t>::invalid();
    return Teuchos::rcp(new Tpetra_Map(computeGlobalElements, indices, indexBase, commT_));
  } else {
    return Teuchos::rcp(new Tpetra_Map(numElements, indexBase, commT_, Tpetra::LocallyReplicated));
  }
}
Esempio n. 2
0
void
Adapt::NodalDataBlock::resizeOverlapMap(Teuchos::RCP<const Epetra_Map> overlap_nodeMap, const Epetra_Comm& comm){

//  overlap_node_map = Teuchos::rcp(new Epetra_BlockMap(numGlobalNodes,
  overlap_node_map = Teuchos::rcp(new Epetra_BlockMap(-1,
                            overlap_nodeMap->NumMyElements(),
                            overlap_nodeMap->MyGlobalElements(),
                            blocksize,
                            0,
                            comm));

  // Build the vector and accessors
  overlap_node_vec = Teuchos::rcp(new Epetra_Vector(*overlap_node_map, false));

  mapsHaveChanged = true;

}
Esempio n. 3
0
int main(int argc, char *argv[])
{
  int i;
  bool ierr, gerr;
  gerr = true;

#ifdef HAVE_MPI
  // Initialize MPI and setup an Epetra communicator
  MPI_Init(&argc,&argv);
  Teuchos::RCP<Epetra_MpiComm> Comm = Teuchos::rcp( new Epetra_MpiComm(MPI_COMM_WORLD) );
#else
  // If we aren't using MPI, then setup a serial communicator.
  Teuchos::RCP<Epetra_SerialComm> Comm = Teuchos::rcp( new Epetra_SerialComm() );
#endif

   // number of global elements
  int dim = 100;
  int blockSize = 5;

  bool verbose = false;
  if (argc>1) {
    if (argv[1][0]=='-' && argv[1][1]=='v') {
      verbose = true;
    }
  }

  // Construct a Map that puts approximately the same number of 
  // equations on each processor.
  Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( new Epetra_Map(dim, 0, *Comm) );
  
  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map->NumMyElements();
  std::vector<int> MyGlobalElements(NumMyElements);
  Map->MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer std::vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation 
  // on this processor
  std::vector<int> NumNz(NumMyElements);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)
  for (i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i]==0 || MyGlobalElements[i] == dim-1) {
      NumNz[i] = 2;
    }
    else {
      NumNz[i] = 3;
    }
  }

  // Create an Epetra_Matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Map, &NumNz[0]) );
   
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  double two = 2.0;
  int NumEntries;
  for (i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i]==0) {
      Indices[0] = 1;
      NumEntries = 1;
    }
    else if (MyGlobalElements[i] == dim-1) {
      Indices[0] = dim-2;
      NumEntries = 1;
    }
    else {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      NumEntries = 2;
    }
    ierr = A->InsertGlobalValues(MyGlobalElements[i],NumEntries,&Values[0],&Indices[0]);
    assert(ierr==0);
    // Put in the diagonal entry
    ierr = A->InsertGlobalValues(MyGlobalElements[i],1,&two,&MyGlobalElements[i]);
    assert(ierr==0);
  }
   
  // Finish building the epetra matrix A
  ierr = A->FillComplete();
  assert(ierr==0);

  // Issue several useful typedefs;
  typedef Belos::MultiVec<double> EMV;
  typedef Belos::Operator<double> EOP;

  // Create an Epetra_MultiVector for an initial std::vector to start the solver.
  // Note that this needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Belos::EpetraMultiVec> ivec = Teuchos::rcp( new Belos::EpetraMultiVec(*Map, blockSize) );
  ivec->Random();

  // Create an output manager to handle the I/O from the solver
  Teuchos::RCP<Belos::OutputManager<double> > MyOM = Teuchos::rcp( new Belos::OutputManager<double>() );
  if (verbose) {
    MyOM->setVerbosity( Belos::Warnings );
  }

  // test the Epetra adapter multivector
  ierr = Belos::TestMultiVecTraits<double,EMV>(MyOM,ivec);
  gerr &= ierr;
  if (ierr) {
    MyOM->print(Belos::Warnings,"*** EpetraAdapter PASSED TestMultiVecTraits()\n");
  }
  else {
    MyOM->print(Belos::Warnings,"*** EpetraAdapter FAILED TestMultiVecTraits() ***\n\n");
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (gerr == false) {
    MyOM->print(Belos::Warnings,"End Result: TEST FAILED\n");
    return -1;
  }
  //
  // Default return value
  //
  MyOM->print(Belos::Warnings,"End Result: TEST PASSED\n");
  return 0;

}
Esempio n. 4
0
int main(int argc, char *argv[])
{
  
  using Teuchos::rcp_implicit_cast;

  int i, ierr, gerr;
  gerr = 0;

#ifdef HAVE_MPI
  // Initialize MPI and setup an Epetra communicator
  MPI_Init(&argc,&argv);
  Teuchos::RCP<Epetra_MpiComm> Comm = Teuchos::rcp( new Epetra_MpiComm(MPI_COMM_WORLD) );
#else
  // If we aren't using MPI, then setup a serial communicator.
  Teuchos::RCP<Epetra_SerialComm> Comm = Teuchos::rcp( new Epetra_SerialComm() );
#endif


   // number of global elements
  int dim = 100;
  int blockSize = 3;

  // PID info
  int MyPID = Comm->MyPID();
  bool verbose = 0;

  if (argc>1) {
    if (argv[1][0]=='-' && argv[1][1]=='v') {
      verbose = true;
    }
  }

  // Construct a Map that puts approximately the same number of 
  // equations on each processor.
  Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( new Epetra_Map(dim, 0, *Comm) );
  
  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map->NumMyElements();
  std::vector<int> MyGlobalElements(NumMyElements);
  Map->MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer std::vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation 
  // on this processor
  std::vector<int> NumNz(NumMyElements);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)
  for (i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i]==0 || MyGlobalElements[i] == dim-1) {
      NumNz[i] = 2;
    }
    else {
      NumNz[i] = 3;
    }
  }

  // Create an Epetra_Matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Map, &NumNz[0]) );
   
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  double two = 2.0;
  int NumEntries;
  for (i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i]==0) {
      Indices[0] = 1;
      NumEntries = 1;
    }
    else if (MyGlobalElements[i] == dim-1) {
      Indices[0] = dim-2;
      NumEntries = 1;
    }
    else {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      NumEntries = 2;
    }
    ierr = A->InsertGlobalValues(MyGlobalElements[i],NumEntries,&Values[0],&Indices[0]);
    assert(ierr==0);
    // Put in the diagonal entry
    ierr = A->InsertGlobalValues(MyGlobalElements[i],1,&two,&MyGlobalElements[i]);
    assert(ierr==0);
  }
   
  // Finish building the epetra matrix A
  ierr = A->FillComplete();
  assert(ierr==0);

  // Create an Belos::EpetraOp from this Epetra_CrsMatrix
  Teuchos::RCP<Belos::EpetraOp> op = Teuchos::rcp(new Belos::EpetraOp(A));

  // Issue several useful typedefs;
  typedef Belos::MultiVec<double> EMV;
  typedef Belos::Operator<double> EOP;

  // Create an Epetra_MultiVector for an initial std::vector to start the solver.
  // Note that this needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Belos::EpetraMultiVec> ivec = Teuchos::rcp( new Belos::EpetraMultiVec(*Map, blockSize) );
  ivec->Random();

  // Create an output manager to handle the I/O from the solver
  Teuchos::RCP<Belos::OutputManager<double> > MyOM = Teuchos::rcp( new Belos::OutputManager<double>( MyPID ) );
  if (verbose) {
    MyOM->setVerbosity( Belos::Errors + Belos::Warnings );
  }

#ifdef HAVE_EPETRA_THYRA
  typedef Thyra::MultiVectorBase<double> TMVB;
  typedef Thyra::LinearOpBase<double>    TLOB;
  // create thyra objects from the epetra objects

  // first, a Thyra::VectorSpaceBase
  Teuchos::RCP<const Thyra::VectorSpaceBase<double> > epetra_vs = 
    Thyra::create_VectorSpace(Map);

  // then, a MultiVectorBase (from the Epetra_MultiVector)
  Teuchos::RCP<Thyra::MultiVectorBase<double> > thyra_ivec = 
    Thyra::create_MultiVector(rcp_implicit_cast<Epetra_MultiVector>(ivec),epetra_vs);

  // then, a LinearOpBase (from the Epetra_CrsMatrix)
  Teuchos::RCP<Thyra::LinearOpBase<double> > thyra_op = 
    Teuchos::rcp( new Thyra::EpetraLinearOp(A) );


  // test the Thyra adapter multivector
  ierr = Belos::TestMultiVecTraits<double,TMVB>(MyOM,thyra_ivec);
  gerr |= ierr;
  switch (ierr) {
  case Belos::Ok:
    if ( verbose && MyPID==0 ) {
      std::cout << "*** ThyraAdapter PASSED TestMultiVecTraits()" << std::endl;
    }
    break;
  case Belos::Error:
    if ( verbose && MyPID==0 ) {
      std::cout << "*** ThyraAdapter FAILED TestMultiVecTraits() ***" 
           << std::endl << std::endl;
    }
    break;
  }

  // test the Thyra adapter operator 
  ierr = Belos::TestOperatorTraits<double,TMVB,TLOB>(MyOM,thyra_ivec,thyra_op);
  gerr |= ierr;
  switch (ierr) {
  case Belos::Ok:
    if ( verbose && MyPID==0 ) {
      std::cout << "*** ThyraAdapter PASSED TestOperatorTraits()" << std::endl;
    }
    break;
  case Belos::Error:
    if ( verbose && MyPID==0 ) {
      std::cout << "*** ThyraAdapter FAILED TestOperatorTraits() ***" 
           << std::endl << std::endl;
    }
    break;
  }
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (gerr) {
    if (verbose && MyPID==0)
      std::cout << "End Result: TEST FAILED" << std::endl;	
    return -1;
  }
  //
  // Default return value
  //
  if (verbose && MyPID==0)
    std::cout << "End Result: TEST PASSED" << std::endl;
  return 0;

}
Esempio n. 5
0
int main(int argc, char *argv[])
{
  int i;
  bool ierr, gerr;
  gerr = true;

#ifdef HAVE_MPI
  // Initialize MPI and setup an Epetra communicator
  MPI_Init(&argc,&argv);
  Teuchos::RCP<Epetra_MpiComm> Comm = Teuchos::rcp( new Epetra_MpiComm(MPI_COMM_WORLD) );
#else
  // If we aren't using MPI, then setup a serial communicator.
  Teuchos::RCP<Epetra_SerialComm> Comm = Teuchos::rcp( new Epetra_SerialComm() );
#endif

   // number of global elements
  const int dim = 100;
  const int blockSize = 5;

  bool verbose = false;
  if (argc>1) {
    if (argv[1][0]=='-' && argv[1][1]=='v') {
      verbose = true;
    }
  }

  // Create an output manager to handle the I/O from the solver
  Teuchos::RCP<Anasazi::OutputManager<double> > MyOM = Teuchos::rcp( new Anasazi::BasicOutputManager<double>() );
  if (verbose) {
    MyOM->setVerbosity( Anasazi::Warnings );
  }

#ifndef HAVE_EPETRA_THYRA
  MyOM->stream(Anasazi::Warnings) 
    << "Please configure Anasazi with:" << std::endl
    << "--enable-epetra-thyra" << std::endl
    << "--enable-anasazi-thyra" << std::endl;
#ifdef HAVE_MPI
  MPI_Finalize();
#endif
  return -1;
#endif

  // Construct a Map that puts approximately the same number of 
  // equations on each processor.
  Teuchos::RCP<Epetra_Map> Map = Teuchos::rcp( new Epetra_Map(dim, 0, *Comm) );
  
  // Get update list and number of local equations from newly created Map.
  int NumMyElements = Map->NumMyElements();
  std::vector<int> MyGlobalElements(NumMyElements);
  Map->MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation 
  // on this processor
  std::vector<int> NumNz(NumMyElements);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)
  for (i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i]==0 || MyGlobalElements[i] == dim-1) {
      NumNz[i] = 2;
    }
    else {
      NumNz[i] = 3;
    }
  }

  // Create an Epetra_Matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, *Map, &NumNz[0]) );
   
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1
  std::vector<double> Values(2);
  Values[0] = -1.0; Values[1] = -1.0;
  std::vector<int> Indices(2);
  double two = 2.0;
  int NumEntries;
  for (i=0; i<NumMyElements; i++) {
    if (MyGlobalElements[i]==0) {
      Indices[0] = 1;
      NumEntries = 1;
    }
    else if (MyGlobalElements[i] == dim-1) {
      Indices[0] = dim-2;
      NumEntries = 1;
    }
    else {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      NumEntries = 2;
    }
    ierr = A->InsertGlobalValues(MyGlobalElements[i],NumEntries,&Values[0],&Indices[0]);
    assert(ierr==0);
    // Put in the diagonal entry
    ierr = A->InsertGlobalValues(MyGlobalElements[i],1,&two,&MyGlobalElements[i]);
    assert(ierr==0);
  }

  // Finish building the epetra matrix A
  ierr = A->FillComplete();
  assert(ierr==0);

#ifdef HAVE_EPETRA_THYRA
  typedef Thyra::MultiVectorBase<double> TMVB;
  typedef Thyra::LinearOpBase<double>    TLOB;

  // first, create a Thyra::VectorSpaceBase from an Epetra_Map using the Epetra-Thyra wrappers
  Teuchos::RCP<const Thyra::VectorSpaceBase<double> > space = Thyra::create_VectorSpace(Map);

  // then, create a Thyra::MultiVectorBase from the Thyra::VectorSpaceBase using Thyra creational functions
  Teuchos::RCP<Thyra::MultiVectorBase<double> > thyra_ivec = Thyra::createMembers(space,blockSize);

  // then, create a Thyra::LinearOpBase from the Epetra_CrsMatrix using the Epetra-Thyra wrappers
  Teuchos::RCP<const Thyra::LinearOpBase<double> > thyra_op = Thyra::epetraLinearOp(A);

  // test the Thyra multivector adapter
  ierr = Anasazi::TestMultiVecTraits<double,TMVB>(MyOM,thyra_ivec);
  gerr |= ierr;
  if (ierr) {
    MyOM->stream(Anasazi::Warnings) << "*** ThyraAdapter PASSED TestMultiVecTraits()" << std::endl;
  }
  else {
    MyOM->stream(Anasazi::Warnings) << "*** ThyraAdapter FAILED TestMultiVecTraits() ***" << std::endl << std::endl;
  }

  // test the Thyra operator adapter
  ierr = Anasazi::TestOperatorTraits<double,TMVB,TLOB>(MyOM,thyra_ivec,thyra_op);
  gerr |= ierr;
  if (ierr) {
    MyOM->stream(Anasazi::Warnings) << "*** ThyraAdapter PASSED TestOperatorTraits()" << std::endl;
  }
  else {
    MyOM->stream(Anasazi::Warnings) << "*** ThyraAdapter FAILED TestOperatorTraits() ***" << std::endl << std::endl;
  }
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (gerr == false) {
    MyOM->print(Anasazi::Warnings,"End Result: TEST FAILED\n");
    return -1;
  }
  //
  // Default return value
  //
  MyOM->print(Anasazi::Warnings,"End Result: TEST PASSED\n");
  return 0;

}
TEUCHOS_UNIT_TEST(PdQuickGridDiscretization_MPI_np2, SimpleTensorProductMeshTest) {

  Teuchos::RCP<Epetra_Comm> comm;
  comm = rcp(new Epetra_MpiComm(MPI_COMM_WORLD));

  int numProcs = comm->NumProc();
  int rank     = comm->MyPID();

  TEST_COMPARE(numProcs, ==, 2);

  if(numProcs != 2){
     std::cerr << "Unit test runtime ERROR: utPeridigm_PdQuickGridDiscretization_MPI_np2 only makes sense on 2 processors" << std::endl;
     return;
  }

  RCP<ParameterList> discParams = rcp(new ParameterList);

  // create a 2x2x2 discretization
  // specify a spherical neighbor search with the horizon a tad longer than the mesh spacing
  discParams->set("Type", "PdQuickGrid");
  discParams->set("NeighborhoodType", "Spherical");
  ParameterList& quickGridParams = discParams->sublist("TensorProduct3DMeshGenerator");
  quickGridParams.set("Type", "PdQuickGrid");
  quickGridParams.set("X Origin", 0.0);
  quickGridParams.set("Y Origin", 0.0);
  quickGridParams.set("Z Origin", 0.0);
  quickGridParams.set("X Length", 1.0);
  quickGridParams.set("Y Length", 1.0);
  quickGridParams.set("Z Length", 1.0);
  quickGridParams.set("Number Points X", 2);
  quickGridParams.set("Number Points Y", 2);
  quickGridParams.set("Number Points Z", 2);

  // initialize the horizon manager and set the horizon to 0.501
  ParameterList blockParameterList;
  ParameterList& blockParams = blockParameterList.sublist("My Block");
  blockParams.set("Block Names", "block_1");
  blockParams.set("Horizon", 0.501);
  PeridigmNS::HorizonManager::self().loadHorizonInformationFromBlockParameters(blockParameterList);

  // create the discretization
  RCP<PdQuickGridDiscretization> discretization =
    rcp(new PdQuickGridDiscretization(comm, discParams));

  // sanity check, calling with a dimension other than 1 or 3 should throw an exception
  TEST_THROW(discretization->getGlobalOwnedMap(0), Teuchos::Exceptions::InvalidParameter);
  TEST_THROW(discretization->getGlobalOwnedMap(2), Teuchos::Exceptions::InvalidParameter);
  TEST_THROW(discretization->getGlobalOwnedMap(4), Teuchos::Exceptions::InvalidParameter);

  // basic checks on the 1d map
  Teuchos::RCP<const Epetra_BlockMap> map = discretization->getGlobalOwnedMap(1);
  TEST_ASSERT(map->NumGlobalElements() == 8);
  TEST_ASSERT(map->NumMyElements() == 4);
  TEST_ASSERT(map->ElementSize() == 1);
  TEST_ASSERT(map->IndexBase() == 0);
  TEST_ASSERT(map->UniqueGIDs() == true);
  int* myGlobalElements = map->MyGlobalElements();
  if(rank == 0){
    TEST_ASSERT(myGlobalElements[0] == 0);
    TEST_ASSERT(myGlobalElements[1] == 2);
    TEST_ASSERT(myGlobalElements[2] == 4);
    TEST_ASSERT(myGlobalElements[3] == 6);
  }
  if(rank == 1){
    TEST_ASSERT(myGlobalElements[0] == 5);
    TEST_ASSERT(myGlobalElements[1] == 7);
    TEST_ASSERT(myGlobalElements[2] == 1);
    TEST_ASSERT(myGlobalElements[3] == 3);
  }

  // check the 1d overlap map
  // for this simple discretization, everything should be ghosted on both processors
  Teuchos::RCP<const Epetra_BlockMap> overlapMap = discretization->getGlobalOverlapMap(1);
  TEST_ASSERT(overlapMap->NumGlobalElements() == 16);
  TEST_ASSERT(overlapMap->NumMyElements() == 8);
  TEST_ASSERT(overlapMap->ElementSize() == 1);
  TEST_ASSERT(overlapMap->IndexBase() == 0);
  TEST_ASSERT(overlapMap->UniqueGIDs() == false);
  myGlobalElements = overlapMap->MyGlobalElements();
  if(rank == 0){
    TEST_ASSERT(myGlobalElements[0] == 0);
    TEST_ASSERT(myGlobalElements[1] == 2);
    TEST_ASSERT(myGlobalElements[2] == 4);
    TEST_ASSERT(myGlobalElements[3] == 6);
    TEST_ASSERT(myGlobalElements[4] == 1);
    TEST_ASSERT(myGlobalElements[5] == 3);
    TEST_ASSERT(myGlobalElements[6] == 5);
    TEST_ASSERT(myGlobalElements[7] == 7);
  }
  if(rank == 1){
    TEST_ASSERT(myGlobalElements[0] == 5);
    TEST_ASSERT(myGlobalElements[1] == 7);
    TEST_ASSERT(myGlobalElements[2] == 1);
    TEST_ASSERT(myGlobalElements[3] == 3);
    TEST_ASSERT(myGlobalElements[4] == 0);
    TEST_ASSERT(myGlobalElements[5] == 2);
    TEST_ASSERT(myGlobalElements[6] == 4);
    TEST_ASSERT(myGlobalElements[7] == 6);
  }

  // same checks for 3d map
  map = discretization->getGlobalOwnedMap(3);
  TEST_ASSERT(map->NumGlobalElements() == 8);
  TEST_ASSERT(map->NumMyElements() == 4);
  TEST_ASSERT(map->ElementSize() == 3);
  TEST_ASSERT(map->IndexBase() == 0);
  TEST_ASSERT(map->UniqueGIDs() == true);
  myGlobalElements = map->MyGlobalElements();
  if(rank == 0){
    TEST_ASSERT(myGlobalElements[0] == 0);
    TEST_ASSERT(myGlobalElements[1] == 2);
    TEST_ASSERT(myGlobalElements[2] == 4);
    TEST_ASSERT(myGlobalElements[3] == 6);
  }
  if(rank == 1){
    TEST_ASSERT(myGlobalElements[0] == 5);
    TEST_ASSERT(myGlobalElements[1] == 7);
    TEST_ASSERT(myGlobalElements[2] == 1);
    TEST_ASSERT(myGlobalElements[3] == 3);
  }

  // check the 3d overlap map
  // for this simple discretization, everything should be ghosted on both processors
  overlapMap = discretization->getGlobalOverlapMap(3);
  TEST_ASSERT(overlapMap->NumGlobalElements() == 16);
  TEST_ASSERT(overlapMap->NumMyElements() == 8);
  TEST_ASSERT(overlapMap->ElementSize() == 3);
  TEST_ASSERT(overlapMap->IndexBase() == 0);
  TEST_ASSERT(overlapMap->UniqueGIDs() == false);
  myGlobalElements = overlapMap->MyGlobalElements();
  if(rank == 0){
    TEST_ASSERT(myGlobalElements[0] == 0);
    TEST_ASSERT(myGlobalElements[1] == 2);
    TEST_ASSERT(myGlobalElements[2] == 4);
    TEST_ASSERT(myGlobalElements[3] == 6);
    TEST_ASSERT(myGlobalElements[4] == 1);
    TEST_ASSERT(myGlobalElements[5] == 3);
    TEST_ASSERT(myGlobalElements[6] == 5);
    TEST_ASSERT(myGlobalElements[7] == 7);
  }
  if(rank == 1){
    TEST_ASSERT(myGlobalElements[0] == 5);
    TEST_ASSERT(myGlobalElements[1] == 7);
    TEST_ASSERT(myGlobalElements[2] == 1);
    TEST_ASSERT(myGlobalElements[3] == 3);
    TEST_ASSERT(myGlobalElements[4] == 0);
    TEST_ASSERT(myGlobalElements[5] == 2);
    TEST_ASSERT(myGlobalElements[6] == 4);
    TEST_ASSERT(myGlobalElements[7] == 6);
  }

  // check the bond map
  // the horizon was chosen such that each point should have three neighbors
  // note that if the NeighborhoodType parameter is not set to Spherical, this will fail
  Teuchos::RCP<const Epetra_BlockMap> bondMap = discretization->getGlobalBondMap();
  TEST_ASSERT(bondMap->NumGlobalElements() == 8);
  TEST_ASSERT(bondMap->NumMyElements() == 4);
  TEST_ASSERT(bondMap->IndexBase() == 0);
  TEST_ASSERT(bondMap->UniqueGIDs() == true);
  myGlobalElements = bondMap->MyGlobalElements();
  if(rank == 0){
    TEST_ASSERT(myGlobalElements[0] == 0);
    TEST_ASSERT(myGlobalElements[1] == 2);
    TEST_ASSERT(myGlobalElements[2] == 4);
    TEST_ASSERT(myGlobalElements[3] == 6);
  }
  if(rank == 1){
    TEST_ASSERT(myGlobalElements[0] == 5);
    TEST_ASSERT(myGlobalElements[1] == 7);
    TEST_ASSERT(myGlobalElements[2] == 1);
    TEST_ASSERT(myGlobalElements[3] == 3);
  }
  TEST_ASSERT(discretization->getNumBonds() == 4*3);

  // check the initial positions
  // all three coordinates are contained in a single vector
  Teuchos::RCP<Epetra_Vector> initialX = discretization->getInitialX();
  TEST_ASSERT(initialX->MyLength() == 4*3);
  TEST_ASSERT(initialX->GlobalLength() == 8*3);
  if(rank == 0){
    TEST_FLOATING_EQUALITY((*initialX)[0],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[1],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[2],  0.25, 1.0e-16);
 
    TEST_FLOATING_EQUALITY((*initialX)[3],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[4],  0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[5],  0.25, 1.0e-16);

    TEST_FLOATING_EQUALITY((*initialX)[6],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[7],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[8],  0.75, 1.0e-16);

    TEST_FLOATING_EQUALITY((*initialX)[9],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[10], 0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[11], 0.75, 1.0e-16);
  }
  if(rank == 1){
    TEST_FLOATING_EQUALITY((*initialX)[0],  0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[1],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[2],  0.75, 1.0e-16);

    TEST_FLOATING_EQUALITY((*initialX)[3],  0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[4],  0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[5],  0.75, 1.0e-16);

    TEST_FLOATING_EQUALITY((*initialX)[6],  0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[7],  0.25, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[8],  0.25, 1.0e-16);

    TEST_FLOATING_EQUALITY((*initialX)[9],  0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[10], 0.75, 1.0e-16);
    TEST_FLOATING_EQUALITY((*initialX)[11], 0.25, 1.0e-16);
  }

  // check cell volumes
  Teuchos::RCP<Epetra_Vector> volume = discretization->getCellVolume();
  TEST_ASSERT(volume->MyLength() == 4);
  TEST_ASSERT(volume->GlobalLength() == 8);
  for(int i=0 ; i<volume->MyLength() ; ++i)
    TEST_FLOATING_EQUALITY((*volume)[i], 0.125, 1.0e-16);

  // check the neighbor lists
  Teuchos::RCP<PeridigmNS::NeighborhoodData> neighborhoodData = discretization->getNeighborhoodData();
  TEST_ASSERT(neighborhoodData->NumOwnedPoints() == 4);
  int* ownedIds = neighborhoodData->OwnedIDs();
  TEST_ASSERT(ownedIds[0] == 0);
  TEST_ASSERT(ownedIds[1] == 1);
  TEST_ASSERT(ownedIds[2] == 2);
  TEST_ASSERT(ownedIds[3] == 3);
  TEST_ASSERT(neighborhoodData->NeighborhoodListSize() == 16);
  int* neighborhood = neighborhoodData->NeighborhoodList();
  int* neighborhoodPtr = neighborhoodData->NeighborhoodPtr();
  // remember, these are local IDs on each processor, 
  // which includes both owned and ghost nodes (confusing!)
  if(rank == 0){
    TEST_ASSERT(neighborhoodPtr[0] == 0);
    TEST_ASSERT(neighborhood[0]    == 3);
    TEST_ASSERT(neighborhood[1]    == 4);
    TEST_ASSERT(neighborhood[2]    == 1);
    TEST_ASSERT(neighborhood[3]    == 2);

    TEST_ASSERT(neighborhoodPtr[1] == 4);
    TEST_ASSERT(neighborhood[4]    == 3);
    TEST_ASSERT(neighborhood[5]    == 0);
    TEST_ASSERT(neighborhood[6]    == 5);
    TEST_ASSERT(neighborhood[7]    == 3);

    TEST_ASSERT(neighborhoodPtr[2] == 8);
    TEST_ASSERT(neighborhood[8]    == 3);
    TEST_ASSERT(neighborhood[9]    == 0);
    TEST_ASSERT(neighborhood[10]   == 6);
    TEST_ASSERT(neighborhood[11]   == 3);

    TEST_ASSERT(neighborhoodPtr[3] == 12);
    TEST_ASSERT(neighborhood[12]   == 3);
    TEST_ASSERT(neighborhood[13]   == 1);
    TEST_ASSERT(neighborhood[14]   == 2);
    TEST_ASSERT(neighborhood[15]   == 7);
  }
  if(rank == 1){
    TEST_ASSERT(neighborhoodPtr[0] == 0);
    TEST_ASSERT(neighborhood[0]    == 3);
    TEST_ASSERT(neighborhood[1]    == 2);
    TEST_ASSERT(neighborhood[2]    == 6);
    TEST_ASSERT(neighborhood[3]    == 1);

    TEST_ASSERT(neighborhoodPtr[1] == 4);
    TEST_ASSERT(neighborhood[4]    == 3);
    TEST_ASSERT(neighborhood[5]    == 3);
    TEST_ASSERT(neighborhood[6]    == 0);
    TEST_ASSERT(neighborhood[7]    == 7);

    TEST_ASSERT(neighborhoodPtr[2] == 8);
    TEST_ASSERT(neighborhood[8]    == 3);
    TEST_ASSERT(neighborhood[9]    == 4);
    TEST_ASSERT(neighborhood[10]   == 3);
    TEST_ASSERT(neighborhood[11]   == 0);

    TEST_ASSERT(neighborhoodPtr[3] == 12);
    TEST_ASSERT(neighborhood[12]   == 3);
    TEST_ASSERT(neighborhood[13]   == 2);
    TEST_ASSERT(neighborhood[14]   == 5);
    TEST_ASSERT(neighborhood[15]   == 1);
  }
}
  void getpartition_(int& mySize, int* myIndicies) {

      // Copy indices into array to send back to glimmer
      partitionMap->MyGlobalElements(myIndicies);

  }