void Redistributor<Node>::redistribute_reverse(const ::Tpetra::Vector<double,int,int,Node> & input_vector, ::Tpetra::Vector<double,int,int,Node> & output_vector) { if (!created_importer_) { create_importer(input_vector.Map()); } // Export using the importer output_vector.Export(input_vector, *importer_, ::Tpetra::INSERT); }
void TpetraLinearObjFactory<Traits,ScalarT,LocalOrdinalT,GlobalOrdinalT,NodeT>:: globalToGhostTpetraVector(const Tpetra::Vector<ScalarT,LocalOrdinalT,GlobalOrdinalT,NodeT> & in, Tpetra::Vector<ScalarT,LocalOrdinalT,GlobalOrdinalT,NodeT> & out) const { using Teuchos::RCP; // do the global distribution RCP<ImportType> importer = getGhostedImport(); out.putScalar(0.0); out.doImport(in,*importer,Tpetra::INSERT); }
// ============================================================================= void mesh_tri:: compute_control_volumes_t_(Tpetra::Vector<double,int,int> & cv_overlap) const { // get owned entities moab::Range cells = this->mbw_->get_entities_by_dimension(0, 2); Teuchos::ArrayRCP<double> cv_data = cv_overlap.getDataNonConst(); //const vector_fieldType & coords_field = get_node_field("coordinates"); // std::vector<double> coords; // ierr = mcomm_->getMoab()->get_vertex_coordinates(coords); // TEUCHOS_ASSERT_EQUALITY(ierr, moab::MB_SUCCESS); // Calculate the contributions to the finite volumes cell by cell. for (unsigned long cell : cells) { const auto conn = this->mbw_->get_connectivity(cell); const auto splitting = this->compute_triangle_splitting_(conn); for (int i = 0; i < 3; i++) { cv_data[this->local_index(conn[i])] += splitting[i]; } } return; }
Scalar power_method(const Teuchos::RCP<const Tpetra::Operator<Scalar,Ordinal> > &A, size_t niters, typename Teuchos::ScalarTraits<Scalar>::magnitudeType tolerance, bool verbose) { typedef Scalar scalar_type; typedef Teuchos::ScalarTraits<scalar_type> STS; typedef typename STS::magnitudeType magnitude_type; typedef Teuchos::ScalarTraits<magnitude_type> STM; const bool NO_INITIALIZE_TO_ZERO = false; // create three vectors; do not bother initializing q to zero, as we will fill it with random below Tpetra::Vector<scalar_type, Ordinal> z (A->getRangeMap (), NO_INITIALIZE_TO_ZERO); // mfh 26 Nov 2012: For some reason, not initializing r to zero // causes the norm of r to be NaN in the loop below. This fixes // a test failure on my workstation. Tpetra::Vector<scalar_type, Ordinal> q (A->getRangeMap ()); //, NO_INITIALIZE_TO_ZERO); Tpetra::Vector<scalar_type, Ordinal> r (A->getRangeMap ()); //, NO_INITIALIZE_TO_ZERO); // Fill z with random numbers z.randomize (); // Variables needed for iteration scalar_type lambda = STS::zero (); magnitude_type normz, residual = STM::zero (); // power iteration for (size_t iter = 0; iter < niters; ++iter) { normz = z.norm2 (); // Compute 2-norm of z q.scale (STS::one () / normz, z); // Compute q = z / normz A->apply (q, z); // Compute z = A*q lambda = q.dot (z); // Approximate maximum eigenvalue: lambda = dot(q, z) if ( iter % 100 == 0 || iter + 1 == niters ) { r.update (STS::one (), z, -lambda, q, STS::zero ()); // Compute A*q - lambda*q magnitude_type r_norm = r.norm2 (); residual = STS::magnitude (r_norm / lambda); if (verbose) { std::cout << "Iter = " << iter << ": lambda = " << lambda << ", ||A*q - lambda*q||_2 = " << r_norm << ", ||A*q - lambda*q||_2 / lambda = " << residual << std::endl; } } if (residual < tolerance) { break; } } return lambda; }
void Redistributor<Node>::redistribute(const ::Tpetra::Vector<double,int,int,Node>& inputVector, ::Tpetra::Vector<double,int,int,Node> * &outputVector) { if (!created_importer_) { create_importer(inputVector.Map()); } outputVector = new ::Tpetra::Vector<double,int,int,Node>(*target_map_); outputVector->Import(inputVector, *importer_, ::Tpetra::INSERT); return; }
size_t findUniqueGids( Tpetra::MultiVector<gno_t, lno_t, gno_t> &keys, Tpetra::Vector<gno_t, lno_t, gno_t> &gids ) { // Input: Tpetra MultiVector of keys; key length = numVectors() // May contain duplicate keys within a processor. // May contain duplicate keys across processors. // Input: Empty Tpetra Vector with same map for holding the results // Output: Filled gids vector, containing unique global numbers for // each unique key. Global numbers are in range [0,#UniqueKeys). size_t num_keys = keys.getLocalLength(); size_t num_entries = keys.getNumVectors(); #ifdef HAVE_ZOLTAN2_MPI MPI_Comm mpicomm = Teuchos::getRawMpiComm(*(keys.getMap()->getComm())); #else // Zoltan's siMPI will be used here { int flag; MPI_Initialized(&flag); if (!flag) { int narg = 0; char **argv = NULL; MPI_Init(&narg, &argv); } } MPI_Comm mpicomm = MPI_COMM_WORLD; // Will get MPI_COMM_WORLD from siMPI #endif int num_gid = sizeof(gno_t)/sizeof(ZOLTAN_ID_TYPE) * num_entries; int num_user = sizeof(gno_t); // Buffer the keys for Zoltan_DD Teuchos::ArrayRCP<const gno_t> *tmpKeyVecs = new Teuchos::ArrayRCP<const gno_t>[num_entries]; for (size_t v = 0; v < num_entries; v++) tmpKeyVecs[v] = keys.getData(v); ZOLTAN_ID_PTR ddkeys = new ZOLTAN_ID_TYPE[num_gid * num_keys]; size_t idx = 0; for (size_t i = 0; i < num_keys; i++) { for (size_t v = 0; v < num_entries; v++) { ZOLTAN_ID_PTR ddkey = &(ddkeys[idx]); TPL_Traits<ZOLTAN_ID_PTR,gno_t>::ASSIGN(ddkey, tmpKeyVecs[v][i]); idx += TPL_Traits<ZOLTAN_ID_PTR,gno_t>::NUM_ID; } } delete [] tmpKeyVecs; // Allocate memory for the result char *ddnewgids = new char[num_user * num_keys]; // Compute the new GIDs size_t nUnique = findUniqueGidsCommon<gno_t>(num_keys, num_gid, ddkeys, ddnewgids, mpicomm); // Copy the result into the output vector gno_t *result = (gno_t *)ddnewgids; for (size_t i = 0; i < num_keys; i++) gids.replaceLocalValue(i, result[i]); // Clean up delete [] ddkeys; delete [] ddnewgids; return nUnique; }
int main (int argc, char *argv[]) { using Teuchos::ArrayRCP; using Teuchos::ArrayView; using Teuchos::Comm; using Teuchos::CommandLineProcessor; using Teuchos::FancyOStream; using Teuchos::getFancyOStream; using Teuchos::OSTab; using Teuchos::ptr; using Teuchos::RCP; using Teuchos::rcp; using Teuchos::rcpFromRef; using std::cout; using std::endl; bool success = true; // May be changed by tests Teuchos::oblackholestream blackHole; //Teuchos::GlobalMPISession (&argc, &argv, &blackHole); MPI_Init (&argc, &argv); // // Construct communicators, and verify that we are on 4 processors. // // Construct a Teuchos Comm object. RCP<const Comm<int> > teuchosComm = Teuchos::DefaultComm<int>::getComm(); const int numProcs = teuchosComm->getSize(); const int pid = teuchosComm->getRank(); RCP<FancyOStream> pOut = getFancyOStream (rcpFromRef ((pid == 0) ? std::cout : blackHole)); FancyOStream& out = *pOut; // Verify that we are on four processors (which manifests the bug). if (teuchosComm->getSize() != 4) { out << "This test must be run on four processors. Exiting ..." << endl; return EXIT_FAILURE; } // We also need an Epetra Comm, so that we can compare Tpetra and // Epetra results. Epetra_MpiComm epetraComm (MPI_COMM_WORLD); // // Default values of command-line options. // bool verbose = false; bool printEpetra = false; bool printTpetra = false; CommandLineProcessor cmdp (false,true); // // Set command-line options. // cmdp.setOption ("verbose", "quiet", &verbose, "Print verbose output."); // Epetra and Tpetra output will ask the Maps and Import objects to // print themselves in distributed, maximally verbose fashion. It's // best to turn on either Epetra or Tpetra, but not both. Then you // can compare their output side by side. cmdp.setOption ("printEpetra", "dontPrintEpetra", &printEpetra, "Print Epetra output (in verbose mode only)."); cmdp.setOption ("printTpetra", "dontPrintTpetra", &printTpetra, "Print Tpetra output (in verbose mode only)."); // Parse command-line options. if (cmdp.parse (argc,argv) != CommandLineProcessor::PARSE_SUCCESSFUL) { out << "End Result: TEST FAILED" << endl; MPI_Finalize (); return EXIT_FAILURE; } if (verbose) { out << "Running test on " << numProcs << " process" << (numProcs != 1 ? "es" : "") << "." << endl; } // The maps for this problem are derived from a 3D structured mesh. // In this example, the dimensions are 4x4x2 and there are 2 // processors assigned to the first dimension and 2 processors // assigned to the second dimension, with no parallel decomposition // along the third dimension. The "owned" arrays represent the // one-to-one map, with each array representing a 2x2x2 slice. If // DIMENSIONS == 2, then only the first 4 values will be used, // representing a 2x2(x1) slice. int owned0[8] = { 0, 1, 4, 5,16,17,20,21}; int owned1[8] = { 2, 3, 6, 7,18,19,22,23}; int owned2[8] = { 8, 9,12,13,24,25,28,29}; int owned3[8] = {10,11,14,15,26,27,30,31}; // The "overlap" arrays represent the map with communication // elements, with each array representing a 3x3x2 slice. If // DIMENSIONS == 2, then only the first 9 values will be used, // representing a 3x3(x1) slice. int overlap0[18] = {0,1,2,4, 5, 6, 8, 9,10,16,17,18,20,21,22,24,25,26}; int overlap1[18] = {1,2,3,5, 6, 7, 9,10,11,17,18,19,21,22,23,25,26,27}; int overlap2[18] = {4,5,6,8, 9,10,12,13,14,20,21,22,24,25,26,28,29,30}; int overlap3[18] = {5,6,7,9,10,11,13,14,15,21,22,23,25,26,27,29,30,31}; // Construct the owned and overlap maps for both Epetra and Tpetra. int* owned; int* overlap; if (pid == 0) { owned = owned0; overlap = overlap0; } else if (pid == 1) { owned = owned1; overlap = overlap1; } else if (pid == 2) { owned = owned2; overlap = overlap2; } else { owned = owned3; overlap = overlap3; } #if DIMENSIONS == 2 int ownedSize = 4; int overlapSize = 9; #elif DIMENSIONS == 3 int ownedSize = 8; int overlapSize = 18; #endif // Create the two Epetra Maps. Source for the Import is the owned // map; target for the Import is the overlap map. Epetra_Map epetraOwnedMap ( -1, ownedSize, owned, 0, epetraComm); Epetra_Map epetraOverlapMap (-1, overlapSize, overlap, 0, epetraComm); if (verbose && printEpetra) { // Have the Epetra_Map objects describe themselves. // // Epetra_BlockMap::Print() takes an std::ostream&, and expects // all MPI processes to be able to write to it. (The method // handles its own synchronization.) out << "Epetra owned map:" << endl; epetraOwnedMap.Print (std::cout); out << "Epetra overlap map:" << endl; epetraOverlapMap.Print (std::cout); } // Create the two Tpetra Maps. The "invalid" global element count // input tells Tpetra::Map to compute the global number of elements // itself. const int invalid = Teuchos::OrdinalTraits<int>::invalid(); RCP<Tpetra::Map<int> > tpetraOwnedMap = rcp (new Tpetra::Map<int> (invalid, ArrayView<int> (owned, ownedSize), 0, teuchosComm)); tpetraOwnedMap->setObjectLabel ("Owned Map"); RCP<Tpetra::Map<int> > tpetraOverlapMap = rcp (new Tpetra::Map<int> (invalid, ArrayView<int> (overlap, overlapSize), 0, teuchosComm)); tpetraOverlapMap->setObjectLabel ("Overlap Map"); // In verbose mode, have the Tpetra::Map objects describe themselves. if (verbose && printTpetra) { Teuchos::EVerbosityLevel verb = Teuchos::VERB_EXTREME; // Tpetra::Map::describe() takes a FancyOStream, but expects all // MPI processes to be able to write to it. (The method handles // its own synchronization.) RCP<FancyOStream> globalOut = getFancyOStream (rcpFromRef (std::cout)); out << "Tpetra owned map:" << endl; { OSTab tab (globalOut); tpetraOwnedMap->describe (*globalOut, verb); } out << "Tpetra overlap map:" << endl; { OSTab tab (globalOut); tpetraOverlapMap->describe (*globalOut, verb); } } // Use the owned and overlap maps to construct an importer for both // Epetra and Tpetra. Epetra_Import epetraImporter (epetraOverlapMap, epetraOwnedMap ); Tpetra::Import<int> tpetraImporter (tpetraOwnedMap , tpetraOverlapMap); // In verbose mode, have the Epetra_Import object describe itself. if (verbose && printEpetra) { out << "Epetra importer:" << endl; // The importer's Print() method takes an std::ostream& and plans // to write to it on all MPI processes (handling synchronization // itself). epetraImporter.Print (std::cout); out << endl; } // In verbose mode, have the Tpetra::Import object describe itself. if (verbose && printTpetra) { out << "Tpetra importer:" << endl; // The importer doesn't implement Teuchos::Describable. It wants // std::cout and plans to write to it on all MPI processes (with // its own synchronization). tpetraImporter.print (std::cout); out << endl; } // Construct owned and overlap vectors for both Epetra and Tpetra. Epetra_Vector epetraOwnedVector (epetraOwnedMap ); Epetra_Vector epetraOverlapVector (epetraOverlapMap); Tpetra::Vector<double,int> tpetraOwnedVector (tpetraOwnedMap ); Tpetra::Vector<double,int> tpetraOverlapVector (tpetraOverlapMap); // The test is as follows: initialize the owned and overlap vectors // with global IDs in the owned regions. Initialize the overlap // vectors to equal -1 in the overlap regions. Then perform a // communication from the owned vectors to the overlap vectors. The // resulting overlap vectors should have global IDs everywhere and // all of the -1 values should be overwritten. // Initialize. We cannot assign directly to the Tpetra Vectors; // instead, we extract nonconst views and assign to those. The // results aren't guaranteed to be committed to the vector unless // the views are released (by assigning Teuchos::null to them). epetraOverlapVector.PutScalar(-1); tpetraOverlapVector.putScalar(-1); ArrayRCP<double> tpetraOwnedArray = tpetraOwnedVector.getDataNonConst(0); ArrayRCP<double> tpetraOverlapArray = tpetraOverlapVector.getDataNonConst(0); for (int owned_lid = 0; owned_lid < tpetraOwnedMap->getNodeElementList().size(); ++owned_lid) { int gid = tpetraOwnedMap->getGlobalElement(owned_lid); int overlap_lid = tpetraOverlapMap->getLocalElement(gid); epetraOwnedVector[owned_lid] = gid; epetraOverlapVector[overlap_lid] = gid; tpetraOwnedArray[owned_lid] = gid; tpetraOverlapArray[overlap_lid] = gid; } // Make sure that the changes to the Tpetra Vector were committed, // by releasing the nonconst views. tpetraOwnedArray = Teuchos::null; tpetraOverlapArray = Teuchos::null; // Test the Epetra and Tpetra Import. if (verbose) { out << "Testing Import from owned Map to overlap Map:" << endl << endl; } epetraOverlapVector.Import( epetraOwnedVector, epetraImporter, Insert); tpetraOverlapVector.doImport(tpetraOwnedVector, tpetraImporter, Tpetra::INSERT); // Check the Import results. success = countFailures (teuchosComm, epetraOwnedMap, epetraOwnedVector, epetraOverlapMap, epetraOverlapVector, tpetraOwnedMap, tpetraOwnedVector, tpetraOverlapMap, tpetraOverlapVector, verbose); const bool testOtherDirections = false; if (testOtherDirections) { // // Reinitialize the Tpetra vectors and test whether Export works. // tpetraOverlapVector.putScalar(-1); tpetraOwnedArray = tpetraOwnedVector.getDataNonConst(0); tpetraOverlapArray = tpetraOverlapVector.getDataNonConst(0); for (int owned_lid = 0; owned_lid < tpetraOwnedMap->getNodeElementList().size(); ++owned_lid) { int gid = tpetraOwnedMap->getGlobalElement(owned_lid); int overlap_lid = tpetraOverlapMap->getLocalElement(gid); tpetraOwnedArray[owned_lid] = gid; tpetraOverlapArray[overlap_lid] = gid; } // Make sure that the changes to the Tpetra Vector were committed, // by releasing the nonconst views. tpetraOwnedArray = Teuchos::null; tpetraOverlapArray = Teuchos::null; // Make a Tpetra Export object, and test the export. Tpetra::Export<int> tpetraExporter1 (tpetraOwnedMap, tpetraOverlapMap); if (verbose) { out << "Testing Export from owned Map to overlap Map:" << endl << endl; } tpetraOverlapVector.doExport (tpetraOwnedVector, tpetraExporter1, Tpetra::INSERT); // Check the Export results. success = countFailures (teuchosComm, epetraOwnedMap, epetraOwnedVector, epetraOverlapMap, epetraOverlapVector, tpetraOwnedMap, tpetraOwnedVector, tpetraOverlapMap, tpetraOverlapVector, verbose); // // Reinitialize the Tpetra vectors and see what Import in the // other direction does. // tpetraOverlapVector.putScalar(-1); tpetraOwnedArray = tpetraOwnedVector.getDataNonConst(0); tpetraOverlapArray = tpetraOverlapVector.getDataNonConst(0); for (int owned_lid = 0; owned_lid < tpetraOwnedMap->getNodeElementList().size(); ++owned_lid) { int gid = tpetraOwnedMap->getGlobalElement(owned_lid); int overlap_lid = tpetraOverlapMap->getLocalElement(gid); tpetraOwnedArray[owned_lid] = gid; tpetraOverlapArray[overlap_lid] = gid; } // Make sure that the changes to the Tpetra Vector were committed, // by releasing the nonconst views. tpetraOwnedArray = Teuchos::null; tpetraOverlapArray = Teuchos::null; if (verbose) { out << "Testing Import from overlap Map to owned Map:" << endl << endl; } Tpetra::Import<int> tpetraImporter2 (tpetraOverlapMap, tpetraOwnedMap); tpetraOwnedVector.doImport (tpetraOverlapVector, tpetraImporter2, Tpetra::INSERT); // Check the Import results. success = countFailures (teuchosComm, epetraOwnedMap, epetraOwnedVector, epetraOverlapMap, epetraOverlapVector, tpetraOwnedMap, tpetraOwnedVector, tpetraOverlapMap, tpetraOverlapVector, verbose); } // if testOtherDirections out << "End Result: TEST " << (success ? "PASSED" : "FAILED") << endl; MPI_Finalize (); return success ? EXIT_SUCCESS : EXIT_FAILURE; }