// ============================================================================ void BorderingHelpers:: dissect(const Tpetra::MultiVector<double,int,int> & x, Tpetra::MultiVector<double,int,int> & xSmall, double * lambda ) { #ifndef NDEBUG TEUCHOS_ASSERT_EQUALITY(x.NumVectors(), xSmall.NumVectors()); // Make sure the maps are matching. std::shared_ptr<const Tpetra::Map<int,int>> extendedMap = nosh::BorderingHelpers::extendMapBy1(xSmall.getMap()); TEUCHOS_ASSERT(x.getMap().SameAs(*extendedMap)); #endif Epetra_Import importer(xSmall.getMap(), x.getMap()); // Strip off the phase constraint variable. xSmall.Import(x, importer, Insert); // TODO Check if we need lambda on all procs. if (x.getMap().Comm().MyPID() == 0) { const int n = x.MyLength(); for (int k = 0; k < x.NumVectors(); k++) lambda[k] = (*(x(k)))[n - 1]; } return; }
void Chebyshev<MatrixType>:: apply (const Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& X, Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& Y, Teuchos::ETransp mode, scalar_type alpha, scalar_type beta) const { { Teuchos::TimeMonitor timeMon (*Time_); // compute() calls initialize() if it hasn't already been called. // Thus, we only need to check isComputed(). TEUCHOS_TEST_FOR_EXCEPTION(! isComputed(), std::runtime_error, "Ifpack2::Chebyshev::apply(): You must call the compute() method before " "you may call apply()."); TEUCHOS_TEST_FOR_EXCEPTION( X.getNumVectors() != Y.getNumVectors(), std::runtime_error, "Ifpack2::Chebyshev::apply(): X and Y must have the same number of " "columns. X.getNumVectors() = " << X.getNumVectors() << " != " << "Y.getNumVectors() = " << Y.getNumVectors() << "."); #ifdef HAVE_TEUCHOS_DEBUG { // The relation 'isSameAs' is transitive. It's also a collective, // so we don't have to do a "shared" test for exception (i.e., a // global reduction on the test value). TEUCHOS_TEST_FOR_EXCEPTION( ! X.getMap ()->isSameAs (*getDomainMap ()), std::runtime_error, "Ifpack2::Chebyshev: The domain Map of the matrix must be the same as " "the Map of the input vector(s) X."); TEUCHOS_TEST_FOR_EXCEPTION( ! Y.getMap ()->isSameAs (*getRangeMap ()), std::runtime_error, "Ifpack2::Chebyshev: The range Map of the matrix must be the same as " "the Map of the output vector(s) Y."); } #endif // HAVE_TEUCHOS_DEBUG applyImpl (X, Y, mode, alpha, beta); } ++NumApply_; ApplyTime_ += Time_->totalElapsedTime (); }
// ============================================================================ void BorderingHelpers:: merge(const Tpetra::MultiVector<double,int,int> & x, const double * lambda, Tpetra::MultiVector<double,int,int> & out ) { #ifndef NDEBUG // Check if the maps are matching. std::shared_ptr<const Tpetra::Map<int,int>> extendedMap = nosh::BorderingHelpers::extendMapBy1(x.getMap()); TEUCHOS_ASSERT(out.getMap().SameAs(*extendedMap)); #endif Epetra_Import importer(out.getMap(), x.getMap()); TEUCHOS_ASSERT_EQUALITY(0, out.Import(x, importer, Insert)); // Set last entry on proc 0. if (x.getMap().Comm().MyPID() == 0) { const int numMyElems = x.getMap().NumMyElements(); for (int k = 0; k < x.NumVectors(); k++) (*out(k))[numMyElems] = lambda[k]; } return; }
void IdentitySolver<MatrixType>:: apply (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& X, Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Y, Teuchos::ETransp /*mode*/, scalar_type alpha, scalar_type beta) const { using Teuchos::RCP; typedef Teuchos::ScalarTraits<scalar_type> STS; typedef Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type> MV; TEUCHOS_TEST_FOR_EXCEPTION( ! isComputed (), std::runtime_error, "Ifpack2::IdentitySolver::apply: If compute() has not yet been called, " "or if you have changed the matrix via setMatrix(), " "you must call compute() before you may call this method."); // "Identity solver" does what it says: it's the identity operator. // We have to Export if the domain and range Maps are not the same. // Otherwise, this operator would be a permutation, not the identity. if (export_.is_null ()) { Y.update (alpha, X, beta); } else { if (alpha == STS::one () && beta == STS::zero ()) { // the common case Y.doExport (X, *export_, Tpetra::REPLACE); } else { // We know that the domain and range Maps are compatible. First // bring X into the range Map via Export. Then compute in place // in Y. MV X_tmp (Y.getMap (), Y.getNumVectors ()); X_tmp.doExport (X, *export_, Tpetra::REPLACE); Y.update (alpha, X_tmp, beta); } } ++numApply_; }
void TomBlockRelaxation<MatrixType,ContainerType>::ApplyInverseJacobi( const Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>& X, Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>& Y) const { size_t NumVectors = X.getNumVectors(); Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> AY( Y.getMap(),NumVectors ); // Initial matvec not needed int starting_iteration=0; if(ZeroStartingSolution_) { DoJacobi(X,Y); starting_iteration=1; } for (int j = starting_iteration; j < NumSweeps_ ; j++) { applyMat(Y,AY); AY.update(1.0,X,-1.0); DoJacobi(AY,Y); // Flops for matrix apply & update ApplyFlops_ += NumVectors * (2 * NumGlobalNonzeros_ + 2 * NumGlobalRows_); } }
static Teuchos::RCP<Tpetra::MultiVector<Scalar,LO,GO,Node> > Clone( const Tpetra::MultiVector<Scalar,LO,GO,Node>& mv, const int numvecs ) { return Teuchos::rcp( new Tpetra::MultiVector<Scalar,LO,GO,Node>(mv.getMap(),numvecs)); }
size_t findUniqueGids( Tpetra::MultiVector<gno_t, lno_t, gno_t> &keys, Tpetra::Vector<gno_t, lno_t, gno_t> &gids ) { // Input: Tpetra MultiVector of keys; key length = numVectors() // May contain duplicate keys within a processor. // May contain duplicate keys across processors. // Input: Empty Tpetra Vector with same map for holding the results // Output: Filled gids vector, containing unique global numbers for // each unique key. Global numbers are in range [0,#UniqueKeys). size_t num_keys = keys.getLocalLength(); size_t num_entries = keys.getNumVectors(); #ifdef HAVE_ZOLTAN2_MPI MPI_Comm mpicomm = Teuchos::getRawMpiComm(*(keys.getMap()->getComm())); #else // Zoltan's siMPI will be used here { int flag; MPI_Initialized(&flag); if (!flag) { int narg = 0; char **argv = NULL; MPI_Init(&narg, &argv); } } MPI_Comm mpicomm = MPI_COMM_WORLD; // Will get MPI_COMM_WORLD from siMPI #endif int num_gid = sizeof(gno_t)/sizeof(ZOLTAN_ID_TYPE) * num_entries; int num_user = sizeof(gno_t); // Buffer the keys for Zoltan_DD Teuchos::ArrayRCP<const gno_t> *tmpKeyVecs = new Teuchos::ArrayRCP<const gno_t>[num_entries]; for (size_t v = 0; v < num_entries; v++) tmpKeyVecs[v] = keys.getData(v); ZOLTAN_ID_PTR ddkeys = new ZOLTAN_ID_TYPE[num_gid * num_keys]; size_t idx = 0; for (size_t i = 0; i < num_keys; i++) { for (size_t v = 0; v < num_entries; v++) { ZOLTAN_ID_PTR ddkey = &(ddkeys[idx]); TPL_Traits<ZOLTAN_ID_PTR,gno_t>::ASSIGN(ddkey, tmpKeyVecs[v][i]); idx += TPL_Traits<ZOLTAN_ID_PTR,gno_t>::NUM_ID; } } delete [] tmpKeyVecs; // Allocate memory for the result char *ddnewgids = new char[num_user * num_keys]; // Compute the new GIDs size_t nUnique = findUniqueGidsCommon<gno_t>(num_keys, num_gid, ddkeys, ddnewgids, mpicomm); // Copy the result into the output vector gno_t *result = (gno_t *)ddnewgids; for (size_t i = 0; i < num_keys; i++) gids.replaceLocalValue(i, result[i]); // Clean up delete [] ddkeys; delete [] ddnewgids; return nUnique; }