Exemplo n.º 1
0
//----------------------------------------------------------------------------
int Epetra_Util::GetRemotePIDs(const Epetra_Import & Importer, std::vector<int> &RemotePIDs){
#ifdef HAVE_MPI
  Epetra_MpiDistributor *D=dynamic_cast<Epetra_MpiDistributor*>(&Importer.Distributor());
  if(!D) {
    RemotePIDs.resize(0); 
    return 0;
  }

  int i,j,k;

  // Get the distributor's data
  int NumReceives        = D->NumReceives();
  const int *ProcsFrom   = D->ProcsFrom();
  const int *LengthsFrom = D->LengthsFrom();
  
  // Resize the outgoing data structure
  RemotePIDs.resize(Importer.NumRemoteIDs());

  // Now, for each remote ID, record who actually owns it.  This loop follows the operation order in the
  // MpiDistributor so it ought to duplicate that effect.
  for(i=0,j=0;i<NumReceives;i++){
    int pid=ProcsFrom[i];    
    for(k=0;k<LengthsFrom[i];k++){
      RemotePIDs[j]=pid;
      j++;
    }    
  }
  return 0;
#else
  RemotePIDs.resize(0);
  return 0;
#endif
}
Exemplo n.º 2
0
//============================================================================
Epetra_CrsGraph* Ifpack_CreateOverlappingCrsMatrix(const Epetra_CrsGraph* Graph,
                                                   const int OverlappingLevel)
{

  if (OverlappingLevel == 0)
    return(0); // All done
  if (Graph->Comm().NumProc() == 1)
    return(0); // All done

  Epetra_CrsGraph* OverlappingGraph;
  Epetra_BlockMap* OverlappingMap;
  OverlappingGraph = const_cast<Epetra_CrsGraph*>(Graph);
  OverlappingMap = const_cast<Epetra_BlockMap*>(&(Graph->RowMap()));

  Epetra_CrsGraph* OldGraph;
  Epetra_BlockMap* OldMap;
  const Epetra_BlockMap* DomainMap = &(Graph->DomainMap());
  const Epetra_BlockMap* RangeMap = &(Graph->RangeMap());

  for (int level = 1; level <= OverlappingLevel ; ++level) {

    OldGraph = OverlappingGraph;
    OldMap = OverlappingMap;

    Epetra_Import* OverlappingImporter;
    OverlappingImporter = const_cast<Epetra_Import*>(OldGraph->Importer());
    OverlappingMap = new Epetra_BlockMap(OverlappingImporter->TargetMap());

    if (level < OverlappingLevel)
      OverlappingGraph = new Epetra_CrsGraph(Copy, *OverlappingMap, 0);
    else
      // On last iteration, we want to filter out all columns except
      // those that correspond
      // to rows in the graph.  This assures that our matrix is square
      OverlappingGraph = new Epetra_CrsGraph(Copy, *OverlappingMap,
                                          *OverlappingMap, 0);

    OverlappingGraph->Import(*OldGraph, *OverlappingImporter, Insert);
    if (level < OverlappingLevel)
      OverlappingGraph->FillComplete(*DomainMap, *RangeMap);
    else {
      // Copy last OverlapImporter because we will use it later
      OverlappingImporter = new Epetra_Import(*OverlappingMap, *DomainMap);
      OverlappingGraph->FillComplete(*DomainMap, *RangeMap);
    }

    if (level > 1) {
      delete OldGraph;
      delete OldMap;
    }

    delete OverlappingMap;
    OverlappingGraph->FillComplete();

  }

  return(OverlappingGraph);
}
Exemplo n.º 3
0
//==============================================================================
// Epetra_OffsetIndex constructor from Importer
Epetra_OffsetIndex::Epetra_OffsetIndex( const Epetra_CrsGraph & SourceGraph,
                                        const Epetra_CrsGraph & TargetGraph,
                                        Epetra_Import & Importer )
  : Epetra_Object("Epetra::OffsetIndex"),
    NumSame_(0),
    SameOffsets_(0),
    NumPermute_(0),
    PermuteOffsets_(0),
    NumExport_(0),
    NumRemote_(0),
    RemoteOffsets_(0),
    DataOwned_(true)
{
  NumSame_ = Importer.NumSameIDs();

  NumPermute_ = Importer.NumPermuteIDs();
  int * PermuteLIDs = Importer.PermuteToLIDs();

  NumExport_ = Importer.NumExportIDs();
  int * ExportLIDs = Importer.ExportLIDs();

  NumRemote_ = Importer.NumRemoteIDs();
  int * RemoteLIDs = Importer.RemoteLIDs();

  if(!SourceGraph.RowMap().GlobalIndicesTypeMatch(TargetGraph.RowMap()))
     throw ReportError("Epetra_OffsetIndex::Epetra_OffsetIndex: SourceGraph and TargetGraph global indices type mismatch", -1);
  if(SourceGraph.RowMap().GlobalIndicesInt()) {
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
     GenerateLocalOffsets_<int>( SourceGraph, TargetGraph,
                            PermuteLIDs );

     GenerateRemoteOffsets_<int>( SourceGraph, TargetGraph,
                             ExportLIDs, RemoteLIDs,
                             Importer.Distributor() );
#else
    throw ReportError("Epetra_OffsetIndex::Epetra_OffsetIndex: ERROR, GlobalIndicesInt but no API for it.",-1);
#endif
  }
  else if(SourceGraph.RowMap().GlobalIndicesLongLong()) {
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
     GenerateLocalOffsets_<long long>( SourceGraph, TargetGraph,
                            PermuteLIDs );

     GenerateRemoteOffsets_<long long>( SourceGraph, TargetGraph,
                             ExportLIDs, RemoteLIDs,
                             Importer.Distributor() );
#else
    throw ReportError("Epetra_OffsetIndex::Epetra_OffsetIndex: ERROR, GlobalIndicesLongLong but no API for it.",-1);
#endif
  }
  else
     throw ReportError("Epetra_OffsetIndex::Epetra_OffsetIndex: SourceGraph global indices type unknown", -1);
}
Exemplo n.º 4
0
//----------------------------------------------------------------------------
int Epetra_Util::GetPids(const Epetra_Import & Importer, std::vector<int> &pids, bool use_minus_one_for_local){
#ifdef HAVE_MPI
  Epetra_MpiDistributor *D=dynamic_cast<Epetra_MpiDistributor*>(&Importer.Distributor());
  if(!D) EPETRA_CHK_ERR(-2);

  int i,j,k;
  int mypid=Importer.TargetMap().Comm().MyPID();
  int N=Importer.TargetMap().NumMyElements();

  // Get the importer's data
  const int *RemoteLIDs  = Importer.RemoteLIDs();

  // Get the distributor's data
  int NumReceives        = D->NumReceives();
  const int *ProcsFrom   = D->ProcsFrom();
  const int *LengthsFrom = D->LengthsFrom();
  
  // Resize the outgoing data structure
  pids.resize(N);

  // Start by claiming that I own all the data
  if(use_minus_one_for_local)
    for(i=0; i<N; i++) pids[i]=-1;
  else
    for(i=0; i<N; i++) pids[i]=mypid;

  // Now, for each remote ID, record who actually owns it.  This loop follows the operation order in the
  // MpiDistributor so it ought to duplicate that effect.
  for(i=0,j=0;i<NumReceives;i++){
    int pid=ProcsFrom[i];
    for(k=0;k<LengthsFrom[i];k++){
      if(pid!=mypid) pids[RemoteLIDs[j]]=pid;
      j++;
    }    
  }
  return 0;
#else
  EPETRA_CHK_ERR(-10);
#endif
}
void  EpetraGhostView::import(const Epetra_Import& importer,
  const Epetra_Vector& srcObject)
{
  /* If my vector does not yet exist, create it using the target map of the
   * importer */
  if (ghostView_.get()==0)
  {
    ghostView_ = rcp(new Epetra_Vector(importer.TargetMap()));
  }

  /* do the import */
  int ierr = ghostView_->Import(srcObject, importer, Insert);

  if (ierr < 0)
  {
    Out::os() << "target map=" << endl;
    importer.TargetMap().Print(Out::os());
    Out::os() << "source map=" << endl;
    srcObject.Map().Print(Out::os());
  }
  TEUCHOS_TEST_FOR_EXCEPTION(ierr < 0, std::runtime_error, "ierr=" << ierr << " in EpetraGhostView::import()");
}
Exemplo n.º 6
0
int Epetra_Util::GetPidGidPairs(const Epetra_Import & Importer,std::vector< std::pair<int,long long> > & gpids, bool use_minus_one_for_local){
  // Put the (PID,GID) pair in member of Importer.TargetMap() in gpids.  If use_minus_one_for_local==true, put in -1 instead of MyPID.
  // This only works if we have an MpiDistributor in our Importer.  Otheriwise return an error.
#ifdef HAVE_MPI
  Epetra_MpiDistributor *D=dynamic_cast<Epetra_MpiDistributor*>(&Importer.Distributor());
  if(!D) EPETRA_CHK_ERR(-2);

  int i,j,k;
  int mypid=Importer.TargetMap().Comm().MyPID();
  int N=Importer.TargetMap().NumMyElements();

  // Get the importer's data
  const int *RemoteLIDs  = Importer.RemoteLIDs();

  // Get the distributor's data
  int NumReceives        = D->NumReceives();
  const int *ProcsFrom   = D->ProcsFrom();
  const int *LengthsFrom = D->LengthsFrom();

  // Resize the outgoing data structure
  gpids.resize(N);

  // Start by claiming that I own all the data
  if(use_minus_one_for_local)
    for(i=0;i <N; i++) gpids[i]=std::make_pair(-1,Importer.TargetMap().GID64(i));
  else
    for(i=0;i <N; i++) gpids[i]=std::make_pair(mypid,Importer.TargetMap().GID64(i));

  // Now, for each remote ID, record who actually owns it.  This loop follows the operation order in the
  // MpiDistributor so it ought to duplicate that effect.
  for(i=0,j=0;i<NumReceives;i++){
    int pid=ProcsFrom[i];
    for(k=0;k<LengthsFrom[i];k++){
      if(pid!=mypid) gpids[RemoteLIDs[j]].first=pid;
      j++;
    }    
  }
  return 0;
#else
  EPETRA_CHK_ERR(-10);
#endif
}
Exemplo n.º 7
0
//=============================================================================
int Epetra_DistObject::Export(const Epetra_SrcDistObject& A,
			      const Epetra_Import& Importer,
			      Epetra_CombineMode CombineMode,
                              const Epetra_OffsetIndex * Indexor)
{

  if (!Map_.SameAs(Importer.SourceMap())) EPETRA_CHK_ERR(-2);
  if (!A.Map().SameAs(Importer.TargetMap())) EPETRA_CHK_ERR(-3);

  int NumSameIDs = Importer.NumSameIDs();
  int NumPermuteIDs = Importer.NumPermuteIDs();
  int NumRemoteIDs = Importer.NumExportIDs();
  int NumExportIDs = Importer.NumRemoteIDs();
  int* ExportLIDs = Importer.RemoteLIDs();
  int* RemoteLIDs = Importer.ExportLIDs();
  int* PermuteToLIDs = Importer.PermuteFromLIDs();
  int* PermuteFromLIDs = Importer.PermuteToLIDs();

  EPETRA_CHK_ERR(DoTransfer(A, CombineMode, NumSameIDs, NumPermuteIDs, NumRemoteIDs, NumExportIDs,
			    PermuteToLIDs, PermuteFromLIDs,  RemoteLIDs, ExportLIDs,
			    LenImports_, Imports_, LenExports_, Exports_,
			    Importer.Distributor(), true, Indexor));
  return(0);
}
Exemplo n.º 8
0
//============================================================================
Epetra_CrsMatrix* Ifpack_CreateOverlappingCrsMatrix(const Epetra_RowMatrix* Matrix,
                                                    const int OverlappingLevel)
{

  if (OverlappingLevel == 0)
    return(0); // All done
  if (Matrix->Comm().NumProc() == 1)
    return(0); // All done

  Epetra_CrsMatrix* OverlappingMatrix;
  OverlappingMatrix = 0;
  Epetra_Map* OverlappingMap;
  OverlappingMap = (Epetra_Map*)&(Matrix->RowMatrixRowMap());

  const Epetra_RowMatrix* OldMatrix;
  const Epetra_Map* DomainMap = &(Matrix->OperatorDomainMap());
  const Epetra_Map* RangeMap = &(Matrix->OperatorRangeMap());

  for (int level = 1; level <= OverlappingLevel ; ++level) {

    if (OverlappingMatrix)
      OldMatrix = OverlappingMatrix;
    else
      OldMatrix = Matrix;

    Epetra_Import* OverlappingImporter;
    OverlappingImporter = (Epetra_Import*)OldMatrix->RowMatrixImporter();
    int NumMyElements = OverlappingImporter->TargetMap().NumMyElements();

    // need to build an Epetra_Map in this way because Epetra_CrsMatrix
    // requires Epetra_Map and not Epetra_BlockMap

#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
    if(OverlappingImporter->TargetMap().GlobalIndicesInt()) {
          int* MyGlobalElements = OverlappingImporter->TargetMap().MyGlobalElements();
      OverlappingMap = new Epetra_Map(-1,NumMyElements,MyGlobalElements,
                                    0, Matrix->Comm());
    }
        else
#endif
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
    if(OverlappingImporter->TargetMap().GlobalIndicesLongLong()) {
      long long* MyGlobalElements = OverlappingImporter->TargetMap().MyGlobalElements64();
      OverlappingMap = new Epetra_Map((long long) -1,NumMyElements,MyGlobalElements,
                                    0, Matrix->Comm());
        }
        else
#endif
      throw "Ifpack_CreateOverlappingCrsMatrix: GlobalIndices type unknown";

    if (level < OverlappingLevel)
      OverlappingMatrix = new Epetra_CrsMatrix(Copy, *OverlappingMap, 0);
    else
      // On last iteration, we want to filter out all columns except
      // those that correspond
      // to rows in the graph.  This assures that our matrix is square
      OverlappingMatrix = new Epetra_CrsMatrix(Copy, *OverlappingMap,
                                               *OverlappingMap, 0);

    OverlappingMatrix->Import(*OldMatrix, *OverlappingImporter, Insert);
    if (level < OverlappingLevel) {
      OverlappingMatrix->FillComplete(*DomainMap, *RangeMap);
    }
    else {
      OverlappingMatrix->FillComplete(*DomainMap, *RangeMap);
    }

    delete OverlappingMap;

    if (level > 1) {
      delete OldMatrix;
    }
    OverlappingMatrix->FillComplete();

  }

  return(OverlappingMatrix);
}
Exemplo n.º 9
0
int 
main (int argc, char *argv[])
{
  using Teuchos::ArrayRCP;
  using Teuchos::ArrayView;
  using Teuchos::Comm;
  using Teuchos::CommandLineProcessor;
  using Teuchos::FancyOStream;
  using Teuchos::getFancyOStream;
  using Teuchos::OSTab;
  using Teuchos::ptr;
  using Teuchos::RCP;
  using Teuchos::rcp;
  using Teuchos::rcpFromRef;
  using std::cout;
  using std::endl;

  bool success = true; // May be changed by tests

  Teuchos::oblackholestream blackHole;
  //Teuchos::GlobalMPISession (&argc, &argv, &blackHole);
  MPI_Init (&argc, &argv);

  //
  // Construct communicators, and verify that we are on 4 processors.
  //

  // Construct a Teuchos Comm object.
  RCP<const Comm<int> > teuchosComm = Teuchos::DefaultComm<int>::getComm();
  const int numProcs = teuchosComm->getSize();
  const int pid = teuchosComm->getRank();
  RCP<FancyOStream> pOut = 
    getFancyOStream (rcpFromRef ((pid == 0) ? std::cout : blackHole));
  FancyOStream& out = *pOut;
  // Verify that we are on four processors (which manifests the bug).
  if (teuchosComm->getSize() != 4) {
    out << "This test must be run on four processors.  Exiting ..." << endl;
    return EXIT_FAILURE;
  }

  // We also need an Epetra Comm, so that we can compare Tpetra and
  // Epetra results.
  Epetra_MpiComm epetraComm (MPI_COMM_WORLD);

  //
  // Default values of command-line options.
  //
  bool verbose = false;
  bool printEpetra = false;
  bool printTpetra = false;
  CommandLineProcessor cmdp (false,true);
  //
  // Set command-line options.
  //
  cmdp.setOption ("verbose", "quiet", &verbose, "Print verbose output.");
  // Epetra and Tpetra output will ask the Maps and Import objects to
  // print themselves in distributed, maximally verbose fashion.  It's
  // best to turn on either Epetra or Tpetra, but not both.  Then you
  // can compare their output side by side.
  cmdp.setOption ("printEpetra", "dontPrintEpetra", &printEpetra, 
		  "Print Epetra output (in verbose mode only).");
  cmdp.setOption ("printTpetra", "dontPrintTpetra", &printTpetra, 
		  "Print Tpetra output (in verbose mode only).");
  // Parse command-line options.
  if (cmdp.parse (argc,argv) != CommandLineProcessor::PARSE_SUCCESSFUL) {
    out << "End Result: TEST FAILED" << endl;
    MPI_Finalize ();
    return EXIT_FAILURE;
  }

  if (verbose) {
    out << "Running test on " << numProcs << " process" 
	<< (numProcs != 1 ? "es" : "") << "." << endl;
  }

  // The maps for this problem are derived from a 3D structured mesh.
  // In this example, the dimensions are 4x4x2 and there are 2
  // processors assigned to the first dimension and 2 processors
  // assigned to the second dimension, with no parallel decomposition
  // along the third dimension.  The "owned" arrays represent the
  // one-to-one map, with each array representing a 2x2x2 slice.  If
  // DIMENSIONS == 2, then only the first 4 values will be used,
  // representing a 2x2(x1) slice.
  int owned0[8] = { 0, 1, 4, 5,16,17,20,21};
  int owned1[8] = { 2, 3, 6, 7,18,19,22,23};
  int owned2[8] = { 8, 9,12,13,24,25,28,29};
  int owned3[8] = {10,11,14,15,26,27,30,31};

  // The "overlap" arrays represent the map with communication
  // elements, with each array representing a 3x3x2 slice.  If
  // DIMENSIONS == 2, then only the first 9 values will be used,
  // representing a 3x3(x1) slice.
  int overlap0[18] = {0,1,2,4, 5, 6, 8, 9,10,16,17,18,20,21,22,24,25,26};
  int overlap1[18] = {1,2,3,5, 6, 7, 9,10,11,17,18,19,21,22,23,25,26,27};
  int overlap2[18] = {4,5,6,8, 9,10,12,13,14,20,21,22,24,25,26,28,29,30};
  int overlap3[18] = {5,6,7,9,10,11,13,14,15,21,22,23,25,26,27,29,30,31};

  // Construct the owned and overlap maps for both Epetra and Tpetra.
  int* owned;
  int* overlap;
  if (pid == 0) {
    owned   = owned0;
    overlap = overlap0;
  }
  else if (pid == 1) {
    owned   = owned1;
    overlap = overlap1;
  }
  else if (pid == 2) {
    owned   = owned2;
    overlap = overlap2;
  }
  else {
    owned   = owned3;
    overlap = overlap3;
  }

#if DIMENSIONS == 2
  int ownedSize   = 4;
  int overlapSize = 9;
#elif DIMENSIONS == 3
  int ownedSize   =  8;
  int overlapSize = 18;
#endif

  // Create the two Epetra Maps.  Source for the Import is the owned
  // map; target for the Import is the overlap map.
  Epetra_Map epetraOwnedMap (  -1, ownedSize,   owned,   0, epetraComm);
  Epetra_Map epetraOverlapMap (-1, overlapSize, overlap, 0, epetraComm);

  if (verbose && printEpetra) {
    // Have the Epetra_Map objects describe themselves.
    //
    // Epetra_BlockMap::Print() takes an std::ostream&, and expects
    // all MPI processes to be able to write to it.  (The method
    // handles its own synchronization.)
    out << "Epetra owned map:" << endl;
    epetraOwnedMap.Print (std::cout);
    out << "Epetra overlap map:" << endl;
    epetraOverlapMap.Print (std::cout);
  }

  // Create the two Tpetra Maps.  The "invalid" global element count
  // input tells Tpetra::Map to compute the global number of elements
  // itself.
  const int invalid = Teuchos::OrdinalTraits<int>::invalid();
  RCP<Tpetra::Map<int> > tpetraOwnedMap = 
    rcp (new Tpetra::Map<int> (invalid, ArrayView<int> (owned, ownedSize), 
			       0, teuchosComm));
  tpetraOwnedMap->setObjectLabel ("Owned Map");
  RCP<Tpetra::Map<int> > tpetraOverlapMap =
    rcp (new Tpetra::Map<int> (invalid, ArrayView<int> (overlap, overlapSize),
			       0, teuchosComm));
  tpetraOverlapMap->setObjectLabel ("Overlap Map");

  // In verbose mode, have the Tpetra::Map objects describe themselves.
  if (verbose && printTpetra) {
    Teuchos::EVerbosityLevel verb = Teuchos::VERB_EXTREME;

    // Tpetra::Map::describe() takes a FancyOStream, but expects all
    // MPI processes to be able to write to it.  (The method handles
    // its own synchronization.)
    RCP<FancyOStream> globalOut = getFancyOStream (rcpFromRef (std::cout));
    out << "Tpetra owned map:" << endl;
    {
      OSTab tab (globalOut);
      tpetraOwnedMap->describe (*globalOut, verb);
    }
    out << "Tpetra overlap map:" << endl;
    {
      OSTab tab (globalOut);
      tpetraOverlapMap->describe (*globalOut, verb);
    }
  }

  // Use the owned and overlap maps to construct an importer for both
  // Epetra and Tpetra.
  Epetra_Import       epetraImporter (epetraOverlapMap, epetraOwnedMap  );
  Tpetra::Import<int> tpetraImporter (tpetraOwnedMap  , tpetraOverlapMap);

  // In verbose mode, have the Epetra_Import object describe itself.
  if (verbose && printEpetra) {
    out << "Epetra importer:" << endl;
    // The importer's Print() method takes an std::ostream& and plans
    // to write to it on all MPI processes (handling synchronization
    // itself).
    epetraImporter.Print (std::cout);
    out << endl;
  }

  // In verbose mode, have the Tpetra::Import object describe itself.
  if (verbose && printTpetra) {
    out << "Tpetra importer:" << endl;
    // The importer doesn't implement Teuchos::Describable.  It wants
    // std::cout and plans to write to it on all MPI processes (with
    // its own synchronization).
    tpetraImporter.print (std::cout);
    out << endl;
  }

  // Construct owned and overlap vectors for both Epetra and Tpetra.
  Epetra_Vector epetraOwnedVector   (epetraOwnedMap  );
  Epetra_Vector epetraOverlapVector (epetraOverlapMap);
  Tpetra::Vector<double,int> tpetraOwnedVector   (tpetraOwnedMap  );
  Tpetra::Vector<double,int> tpetraOverlapVector (tpetraOverlapMap);

  // The test is as follows: initialize the owned and overlap vectors
  // with global IDs in the owned regions.  Initialize the overlap
  // vectors to equal -1 in the overlap regions.  Then perform a
  // communication from the owned vectors to the overlap vectors.  The
  // resulting overlap vectors should have global IDs everywhere and
  // all of the -1 values should be overwritten.

  // Initialize.  We cannot assign directly to the Tpetra Vectors;
  // instead, we extract nonconst views and assign to those.  The
  // results aren't guaranteed to be committed to the vector unless
  // the views are released (by assigning Teuchos::null to them).
  epetraOverlapVector.PutScalar(-1);
  tpetraOverlapVector.putScalar(-1);
  ArrayRCP<double> tpetraOwnedArray   = tpetraOwnedVector.getDataNonConst(0);
  ArrayRCP<double> tpetraOverlapArray = tpetraOverlapVector.getDataNonConst(0);
  for (int owned_lid = 0; 
       owned_lid < tpetraOwnedMap->getNodeElementList().size(); 
       ++owned_lid) {
    int gid         = tpetraOwnedMap->getGlobalElement(owned_lid);
    int overlap_lid = tpetraOverlapMap->getLocalElement(gid);
    epetraOwnedVector[owned_lid]     = gid;
    epetraOverlapVector[overlap_lid] = gid;
    tpetraOwnedArray[owned_lid]      = gid;
    tpetraOverlapArray[overlap_lid]  = gid;
  }
  // Make sure that the changes to the Tpetra Vector were committed,
  // by releasing the nonconst views.
  tpetraOwnedArray = Teuchos::null;
  tpetraOverlapArray = Teuchos::null;

  // Test the Epetra and Tpetra Import.
  if (verbose) {
    out << "Testing Import from owned Map to overlap Map:" << endl << endl;
  }
  epetraOverlapVector.Import(  epetraOwnedVector, epetraImporter, Insert);
  tpetraOverlapVector.doImport(tpetraOwnedVector, tpetraImporter, 
			       Tpetra::INSERT);
  // Check the Import results.
  success = countFailures (teuchosComm, epetraOwnedMap, epetraOwnedVector, 
			   epetraOverlapMap, epetraOverlapVector, 
			   tpetraOwnedMap, tpetraOwnedVector, 
			   tpetraOverlapMap, tpetraOverlapVector, verbose);

  const bool testOtherDirections = false;
  if (testOtherDirections) {
    //
    // Reinitialize the Tpetra vectors and test whether Export works.
    //
    tpetraOverlapVector.putScalar(-1);
    tpetraOwnedArray   = tpetraOwnedVector.getDataNonConst(0);
    tpetraOverlapArray = tpetraOverlapVector.getDataNonConst(0);
    for (int owned_lid = 0; 
	 owned_lid < tpetraOwnedMap->getNodeElementList().size(); 
	 ++owned_lid) 
      {
	int gid         = tpetraOwnedMap->getGlobalElement(owned_lid);
	int overlap_lid = tpetraOverlapMap->getLocalElement(gid);
	tpetraOwnedArray[owned_lid]      = gid;
	tpetraOverlapArray[overlap_lid]  = gid;
      }
    // Make sure that the changes to the Tpetra Vector were committed,
    // by releasing the nonconst views.
    tpetraOwnedArray = Teuchos::null;
    tpetraOverlapArray = Teuchos::null;

    // Make a Tpetra Export object, and test the export.
    Tpetra::Export<int> tpetraExporter1 (tpetraOwnedMap, tpetraOverlapMap);
    if (verbose) {
      out << "Testing Export from owned Map to overlap Map:" << endl << endl;
    }
    tpetraOverlapVector.doExport (tpetraOwnedVector, tpetraExporter1, 
				  Tpetra::INSERT);

    // Check the Export results.
    success = countFailures (teuchosComm, epetraOwnedMap, epetraOwnedVector, 
			     epetraOverlapMap, epetraOverlapVector, 
			     tpetraOwnedMap, tpetraOwnedVector, 
			     tpetraOverlapMap, tpetraOverlapVector, verbose);
    //
    // Reinitialize the Tpetra vectors and see what Import in the
    // other direction does.
    //
    tpetraOverlapVector.putScalar(-1);
    tpetraOwnedArray   = tpetraOwnedVector.getDataNonConst(0);
    tpetraOverlapArray = tpetraOverlapVector.getDataNonConst(0);
    for (int owned_lid = 0; 
	 owned_lid < tpetraOwnedMap->getNodeElementList().size(); 
	 ++owned_lid) 
      {
	int gid         = tpetraOwnedMap->getGlobalElement(owned_lid);
	int overlap_lid = tpetraOverlapMap->getLocalElement(gid);
	tpetraOwnedArray[owned_lid]      = gid;
	tpetraOverlapArray[overlap_lid]  = gid;
      }
    // Make sure that the changes to the Tpetra Vector were committed,
    // by releasing the nonconst views.
    tpetraOwnedArray = Teuchos::null;
    tpetraOverlapArray = Teuchos::null;

    if (verbose) {
      out << "Testing Import from overlap Map to owned Map:" << endl << endl;
    }
    Tpetra::Import<int> tpetraImporter2 (tpetraOverlapMap, tpetraOwnedMap);
    tpetraOwnedVector.doImport (tpetraOverlapVector, tpetraImporter2, 
				Tpetra::INSERT);
    // Check the Import results.
    success = countFailures (teuchosComm, epetraOwnedMap, epetraOwnedVector, 
			     epetraOverlapMap, epetraOverlapVector, 
			     tpetraOwnedMap, tpetraOwnedVector, 
			     tpetraOverlapMap, tpetraOverlapVector, verbose);
  } // if testOtherDirections

  out << "End Result: TEST " << (success ? "PASSED" : "FAILED") << endl;
  MPI_Finalize ();
  return success ? EXIT_SUCCESS : EXIT_FAILURE;
}