Teuchos::RCP<const Tpetra::Map<LO,GO,Node> > createMeshMap (const LO& blockSize, const Tpetra::Map<LO,GO,Node>& pointMap) { typedef Teuchos::OrdinalTraits<Tpetra::global_size_t> TOT; typedef Tpetra::Map<LO,GO,Node> map_type; //calculate mesh GIDs Teuchos::ArrayView<const GO> pointGids = pointMap.getNodeElementList(); Teuchos::Array<GO> meshGids; GO indexBase = pointMap.getIndexBase(); // Use hash table to track whether we've encountered this GID previously. This will happen // when striding through the point DOFs in a block. It should not happen otherwise. // I don't use sort/make unique because I don't want to change the ordering. meshGids.reserve(pointGids.size()); Tpetra::Details::HashTable<GO,int> hashTable(pointGids.size()); for (int i=0; i<pointGids.size(); ++i) { GO meshGid = (pointGids[i]-indexBase) / blockSize + indexBase; if (hashTable.get(meshGid) == -1) { hashTable.add(meshGid,1); //(key,value) meshGids.push_back(meshGid); } } Teuchos::RCP<const map_type> meshMap = Teuchos::rcp( new map_type(TOT::invalid(), meshGids(), 0, pointMap.getComm()) ); return meshMap; }
void RowGraph<LocalOrdinal,GlobalOrdinal,Node>:: pack (const Teuchos::ArrayView<const LocalOrdinal>& exportLIDs, Teuchos::Array<GlobalOrdinal>& exports, const Teuchos::ArrayView<size_t>& numPacketsPerLID, size_t& constantNumPackets, Distributor& distor) const { using Teuchos::Array; typedef LocalOrdinal LO; typedef GlobalOrdinal GO; typedef Map<LO, GO, Node> map_type; const char tfecfFuncName[] = "packAndPrepare"; (void) distor; // forestall "unused argument" compiler warning TEUCHOS_TEST_FOR_EXCEPTION_CLASS_FUNC( exportLIDs.size() != numPacketsPerLID.size(), std::runtime_error, ": exportLIDs and numPacketsPerLID must have the same size."); const map_type& srcMap = * (this->getRowMap ()); constantNumPackets = 0; // Set numPacketsPerLID[i] to the number of entries owned by the // calling process in (local) row exportLIDs[i] of the graph, that // the caller wants us to send out. Compute the total number of // packets (that is, entries) owned by this process in all the // rows that the caller wants us to send out. size_t totalNumPackets = 0; Array<GO> row; for (LO i = 0; i < exportLIDs.size (); ++i) { const GO GID = srcMap.getGlobalElement (exportLIDs[i]); size_t row_length = this->getNumEntriesInGlobalRow (GID); numPacketsPerLID[i] = row_length; totalNumPackets += row_length; } exports.resize (totalNumPackets); // Loop again over the rows to export, and pack rows of indices // into the output buffer. size_t exportsOffset = 0; for (LO i = 0; i < exportLIDs.size (); ++i) { const GO GID = srcMap.getGlobalElement (exportLIDs[i]); size_t row_length = this->getNumEntriesInGlobalRow (GID); row.resize (row_length); size_t check_row_length = 0; this->getGlobalRowCopy (GID, row (), check_row_length); typename Array<GO>::const_iterator row_iter = row.begin(); typename Array<GO>::const_iterator row_end = row.end(); size_t j = 0; for (; row_iter != row_end; ++row_iter, ++j) { exports[exportsOffset+j] = *row_iter; } exportsOffset += row.size (); } }
void CUDANode::readyBuffers(Teuchos::ArrayView<Teuchos::ArrayRCP<const char> > buffers, Teuchos::ArrayView<Teuchos::ArrayRCP<char> > ncBuffers) { #ifdef HAVE_KOKKOS_DEBUG for (size_t i=0; i < buffers.size(); ++i) { CHECK_COMPUTE_BUFFER(buffers[i]); } for (size_t i=0; i < ncBuffers.size(); ++i) { CHECK_COMPUTE_BUFFER(ncBuffers[i]); } #endif TEST_FOR_EXCEPT(true); }
Teuchos::Array< int > computeCommIndexes(int rank, const Teuchos::ArrayView< int > & commStrides) { Teuchos::Array< int > result(commStrides.size()); for (int axis = 0; axis < commStrides.size(); ++axis) { result[axis] = rank / commStrides[axis]; rank = rank % commStrides[axis]; } return result; }
DenseContainer<MatrixType, LocalScalarType>:: DenseContainer (const Teuchos::RCP<const row_matrix_type>& matrix, const Teuchos::ArrayView<const local_ordinal_type>& localRows) : Container<MatrixType> (matrix, localRows), numRows_ (localRows.size ()), diagBlock_ (numRows_, numRows_), ipiv_ (numRows_, 0) { using Teuchos::Array; using Teuchos::ArrayView; using Teuchos::RCP; using Teuchos::rcp; using Teuchos::toString; typedef Tpetra::Map<local_ordinal_type, global_ordinal_type, node_type> map_type; typedef typename ArrayView<const local_ordinal_type>::size_type size_type; TEUCHOS_TEST_FOR_EXCEPTION( ! matrix->hasColMap (), std::invalid_argument, "Ifpack2::DenseContainer: " "The constructor's input matrix must have a column Map."); // Check whether the input set of local row indices is correct. const map_type& rowMap = * (matrix->getRowMap ()); const size_type numRows = localRows.size (); bool rowIndicesValid = true; Array<local_ordinal_type> invalidLocalRowIndices; for (size_type i = 0; i < numRows; ++i) { if (! rowMap.isNodeLocalElement (localRows[i])) { rowIndicesValid = false; invalidLocalRowIndices.push_back (localRows[i]); break; } } TEUCHOS_TEST_FOR_EXCEPTION( ! rowIndicesValid, std::invalid_argument, "Ifpack2::DenseContainer: " "On process " << rowMap.getComm ()->getRank () << " of " << rowMap.getComm ()->getSize () << ", in the given set of local row " "indices localRows = " << toString (localRows) << ", the following " "entries are not valid local row indices on the calling process: " << toString (invalidLocalRowIndices) << "."); #ifdef HAVE_MPI RCP<const Teuchos::Comm<int> > localComm = rcp (new Teuchos::MpiComm<int> (MPI_COMM_SELF)); #else RCP<const Teuchos::Comm<int> > localComm = rcp (new Teuchos::SerialComm<int> ()); #endif // HAVE_MPI // FIXME (mfh 25 Aug 2013) What if the matrix's row Map has a // different index base than zero? const global_ordinal_type indexBase = 0; localMap_ = rcp (new map_type (numRows_, indexBase, localComm)); }
void CrsMatrixWrapper<ST>::ypAx(const Teuchos::ArrayView<ST>& y, const Teuchos::ArrayView<const ST>& x) const { RCP<VectorType<ST> > X = rcp(new VectorType<ST>(mat.getRowMap(), x, x.size(), 1)); RCP<VectorType<ST> > Y = rcp(new VectorType<ST>(mat.getRowMap(), y, y.size(), 1)); const ST alpha = Teuchos::ScalarTraits<ST>::one(); const ST beta = Teuchos::ScalarTraits<ST>::one(); // Y = beta*Y + alpha*A*X mat.apply(*X, *Y, Teuchos::NO_TRANS, alpha, beta); Y->get1dCopy(y, y.size()); }
void DropNegativeEntriesFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node>::Build(Level& currentLevel) const { FactoryMonitor m(*this, "Matrix filtering (springs)", currentLevel); RCP<Matrix> Ain = Get< RCP<Matrix> >(currentLevel, "A"); LocalOrdinal nDofsPerNode = Ain->GetFixedBlockSize(); // create new empty Operator Teuchos::RCP<Matrix> Aout = MatrixFactory::Build(Ain->getRowMap(), Ain->getGlobalMaxNumRowEntries(), Xpetra::StaticProfile); size_t numLocalRows = Ain->getNodeNumRows(); for(size_t row=0; row<numLocalRows; row++) { GlobalOrdinal grid = Ain->getRowMap()->getGlobalElement(row); int rDofID = Teuchos::as<int>(grid % nDofsPerNode); // extract row information from input matrix Teuchos::ArrayView<const LocalOrdinal> indices; Teuchos::ArrayView<const Scalar> vals; Ain->getLocalRowView(row, indices, vals); // just copy all values in output Teuchos::ArrayRCP<GlobalOrdinal> indout(indices.size(),Teuchos::ScalarTraits<GlobalOrdinal>::zero()); Teuchos::ArrayRCP<Scalar> valout(indices.size(),Teuchos::ScalarTraits<Scalar>::zero()); size_t nNonzeros = 0; for(size_t i=0; i<(size_t)indices.size(); i++) { GlobalOrdinal gcid = Ain->getColMap()->getGlobalElement(indices[i]); // global column id int cDofID = Teuchos::as<int>(gcid % nDofsPerNode); if(rDofID == cDofID && Teuchos::ScalarTraits<Scalar>::magnitude(vals[i]) >= Teuchos::ScalarTraits<Scalar>::magnitude(Teuchos::ScalarTraits<Scalar>::zero())) { indout [nNonzeros] = gcid; valout [nNonzeros] = vals[i]; nNonzeros++; } } indout.resize(nNonzeros); valout.resize(nNonzeros); Aout->insertGlobalValues(Ain->getRowMap()->getGlobalElement(row), indout.view(0,indout.size()), valout.view(0,valout.size())); } Aout->fillComplete(Ain->getDomainMap(), Ain->getRangeMap()); // copy block size information Aout->SetFixedBlockSize(nDofsPerNode); GetOStream(Statistics0, 0) << "Nonzeros in A (input): " << Ain->getGlobalNumEntries() << ", Nonzeros after filtering A: " << Aout->getGlobalNumEntries() << std::endl; Set(currentLevel, "A", Aout); }
void RTOpC::apply_op_impl( const Teuchos::ArrayView<const ConstSubVectorView<Scalar> > &sub_vecs, const Teuchos::ArrayView<const SubVectorView<Scalar> > &targ_sub_vecs, const Teuchos::Ptr<ReductTarget> &_reduct_obj ) const { using Teuchos::Workspace; Teuchos::WorkspaceStore* wss = Teuchos::get_default_workspace_store().get(); const int num_vecs = sub_vecs.size(); const int num_targ_vecs = targ_sub_vecs.size(); RTOp_ReductTarget reduct_obj = RTOp_REDUCT_OBJ_NULL; if(!is_null(_reduct_obj)) reduct_obj = (*this)(*_reduct_obj); int k; Workspace<RTOp_SubVector> c_sub_vecs(wss,num_vecs,false); for( k = 0; k < num_vecs; ++k ) { const SubVector& v = sub_vecs[k]; RTOp_sub_vector(v.globalOffset(),v.subDim(),v.values(),v.stride(),&c_sub_vecs[k]); } Workspace<RTOp_MutableSubVector> c_targ_sub_vecs(wss,num_targ_vecs,false); for( k = 0; k < num_targ_vecs; ++k ) { const MutableSubVector& v = targ_sub_vecs[k]; RTOp_mutable_sub_vector(v.globalOffset(),v.subDim(),v.values(),v.stride(),&c_targ_sub_vecs[k]); } const int err = RTOp_apply_op( &op_ ,num_vecs, num_vecs ? &c_sub_vecs[0] : (RTOp_SubVector*)NULL ,num_targ_vecs, num_targ_vecs ? &c_targ_sub_vecs[0] : (RTOp_MutableSubVector*)NULL ,reduct_obj ); TEUCHOS_TEST_FOR_EXCEPTION( err==RTOp_ERR_INVALID_NUM_VECS, InvalidNumVecs ,"RTOpC::apply_op(...): Error, " "RTOp_apply_op(...) returned RTOp_ERR_INVALID_NUM_VECS" ); TEUCHOS_TEST_FOR_EXCEPTION( err==RTOp_ERR_INVALID_NUM_TARG_VECS, InvalidNumTargVecs ,"RTOpC::apply_op(...): Error, " "RTOp_apply_op(...) returned RTOp_ERR_INVALID_NUM_TARG_VECS" ); TEUCHOS_TEST_FOR_EXCEPTION( err!=0, UnknownError ,"RTOpC::apply_op(...): Error, " "RTOp_apply_op(...) returned != 0 with unknown meaning" ); }
size_type computeSize(const Teuchos::ArrayView< DIM_TYPE > & dimensions) { size_type result = 1; for (int axis = 0; axis < dimensions.size(); ++axis) result *= dimensions[axis]; return result; }
void Tpetra_RowGraph<MatrixType>::getLocalRowCopy(local_ordinal_type LocalRow, const Teuchos::ArrayView<local_ordinal_type> &Indices, size_t &NumEntries) const { Teuchos::Array<scalar_type> Values(Indices.size()); A_->getLocalRowCopy(LocalRow,Indices,Values (),NumEntries); }
void Simple2DModelEvaluator<Scalar>::set_p(const Teuchos::ArrayView<const Scalar> &p) { #ifdef TEUCHOS_DEBUG TEUCHOS_ASSERT_EQUALITY(p_.size(), p.size()); #endif p_().assign(p); }
void diff_type_get_copy<MV,S>:: apply (const Teuchos::Ptr<const MV>& mv, const Teuchos::ArrayView<S>& v, const size_t& ldx, Teuchos::Ptr<const Tpetra::Map<typename MV::local_ordinal_t, typename MV::global_ordinal_t, typename MV::node_t> > distribution_map, EDistribution distribution ) { typedef typename MV::scalar_t mv_scalar_t; typedef typename Teuchos::Array<mv_scalar_t>::size_type size_type; TEUCHOS_TEST_FOR_EXCEPTION( mv.getRawPtr () == NULL, std::invalid_argument, "Amesos2::diff_type_get_copy::apply: mv is null."); TEUCHOS_TEST_FOR_EXCEPTION( distribution_map.getRawPtr () == NULL, std::invalid_argument, "Amesos2::diff_type_get_copy::apply: distribution_map is null."); const size_type vals_length = v.size (); Teuchos::Array<mv_scalar_t> vals_tmp (vals_length); mv->get1dCopy (vals_tmp (), ldx, distribution_map, distribution); for (size_type i = 0; i < vals_length; ++i) { v[i] = Teuchos::as<S> (vals_tmp[i]); } }
void CrsWrapper_GraphBuilder<Scalar, LocalOrdinal, GlobalOrdinal, Node>:: insertGlobalValues (GlobalOrdinal globalRow, const Teuchos::ArrayView<const GlobalOrdinal> &indices, const Teuchos::ArrayView<const Scalar> &values) { typename std::map<GlobalOrdinal,std::set<GlobalOrdinal>*>::iterator iter = graph_.find (globalRow); TEUCHOS_TEST_FOR_EXCEPTION( iter == graph_.end(), std::runtime_error, "Tpetra::CrsWrapper_GraphBuilder::insertGlobalValues could not find row " << globalRow << " in the graph. Super bummer man. Hope you figure it out."); std::set<GlobalOrdinal>& cols = * (iter->second); for (typename Teuchos::ArrayView<const GlobalOrdinal>::size_type i = 0; i < indices.size (); ++i) { cols.insert (indices[i]); } const global_size_t row_length = static_cast<global_size_t> (cols.size ()); if (row_length > max_row_length_) { max_row_length_ = row_length; } }
void Simple2DModelEvaluator<Scalar>::set_x0(const Teuchos::ArrayView<const Scalar> &x0_in) { #ifdef TEUCHOS_DEBUG TEUCHOS_ASSERT_EQUALITY(x_space_->dim(), x0_in.size()); #endif Thyra::DetachedVectorView<Scalar> x0(x0_); x0.sv().values()().assign(x0_in); }
Kokkos::View<const T*,D> getKokkosViewDeepCopy(const Teuchos::ArrayView<const T>& a) { #if defined(KOKKOS_HAVE_PTHREAD) typedef Kokkos::Threads HostDevice; #elif defined(KOKKOS_HAVE_OPENMP) typedef Kokkos::OpenMP HostDevice; #else typedef Kokkos::Serial HostDevice; #endif typedef Kokkos::View<T*,D> view_type; typedef Kokkos::View<const T*,typename view_type::array_layout,HostDevice,Kokkos::MemoryUnmanaged> unmanaged_host_view_type; if (a.size() == 0) return view_type(); view_type v("", a.size()); unmanaged_host_view_type hv(a.getRawPtr(), a.size()); Kokkos::deep_copy(v,hv); return v; }
SIZE_TYPE computeSize(const Teuchos::ArrayView< DIM_TYPE > & dimensions, const Teuchos::ArrayView< SIZE_TYPE > & strides) { // SIZE_TYPE might be a const type, but we need result to be non-const typename remove_const< SIZE_TYPE >::type result = 1; for (int axis = 0; axis < dimensions.size(); ++axis) result += (dimensions[axis]-1) * strides[axis]; return result; }
// Just test whether Teuchos memory management objects and Kokkos // Array Views can coexist in the same program. This test does not // have the Teuchos and Kokkos objects interact with each other. TEUCHOS_UNIT_TEST( LinkTeuchosAndKokkos, NoInteraction ) { typedef Teuchos::Array<double>::size_type size_type; const size_type numElts = 10; Teuchos::Array<double> x (numElts); for (size_type k = 0; k < numElts; ++k) { x[k] = 42.0 + static_cast<double> (k); } Teuchos::ArrayView<double> xView = x.view (3, 5); // view of [3, 4, 5, 6, 7] TEST_EQUALITY_CONST( xView.size(), 5 ); for (size_type k = 0; k < xView.size (); ++k) { TEST_EQUALITY( xView[k], x[k+3] ); } typedef Kokkos::View<double*, Kokkos::Threads> ka_view_type; ka_view_type y ("y", numElts); Kokkos::parallel_for (y.dimension_0 (), FillFunctor<Kokkos::Threads> (y)); }
Kokkos::View<const T*,D> getKokkosViewDeepCopy(const Teuchos::ArrayView<const T>& a) { typedef typename Kokkos::Impl::if_c< Impl::VerifyExecutionCanAccessMemorySpace< D, Kokkos::HostSpace>::value, typename D::execution_space, Kokkos::HostSpace>::type HostDevice; typedef Kokkos::View<T*, D> view_type; typedef Kokkos::View<const T*, typename view_type::array_layout, HostDevice, Kokkos::MemoryUnmanaged> unmanaged_host_view_type; if (a.size () == 0) { return view_type (); } view_type v ("", a.size ()); unmanaged_host_view_type hv (a.getRawPtr (), a.size ()); Kokkos::deep_copy (v, hv); return v; }
Teuchos::Array< int > computePeriodic(int numDims, const Teuchos::ArrayView< const int > & periodic) { Teuchos::Array< int > result(numDims, 0); for (int axis = 0; axis < numDims && axis < periodic.size(); ++axis) result[axis] = periodic[axis]; return result; }
bool CloudDomain<1>::pointInDomain( const Teuchos::ArrayView<const double>& coords ) const { DTK_REQUIRE( coords.size() == 1 ); DTK_CHECK( d_bounds[0] <= d_bounds[1] ); return ( coords[0] >= d_bounds[0] && coords[0] <= d_bounds[1] ) ? true : false; }
//---------------------------------------------------------------------------// TEUCHOS_UNIT_TEST( SplineInterpolationPairing, dim_3_test ) { int dim = 3; int num_src_points = 10; int num_src_coords = dim*num_src_points; Teuchos::Array<double> src_coords(num_src_coords); for ( int i = 0; i < num_src_points; ++i ) { src_coords[dim*i] = 1.0*i; src_coords[dim*i+1] = 1.0; src_coords[dim*i+2] = 1.0; } int num_tgt_points = 2; int num_tgt_coords = dim*num_tgt_points; Teuchos::Array<double> tgt_coords( num_tgt_coords ); tgt_coords[0] = 4.9; tgt_coords[1] = 1.0; tgt_coords[2] = 1.0; tgt_coords[3] = 10.0; tgt_coords[4] = 1.0; tgt_coords[5] = 1.0; double radius = 1.1; DataTransferKit::SplineInterpolationPairing<3> pairing( src_coords(), tgt_coords(), radius ); Teuchos::ArrayView<const unsigned> view = pairing.childCenterIds( 0 ); TEST_EQUALITY( 3, view.size() ); TEST_EQUALITY( 5, view[0] ) TEST_EQUALITY( 4, view[1] ) TEST_EQUALITY( 6, view[2] ) view = pairing.childCenterIds( 1 ); TEST_EQUALITY( 1, view.size() ); TEST_EQUALITY( 9, view[0] ); Teuchos::ArrayRCP<std::size_t> children_per_parent = pairing.childrenPerParent(); TEST_EQUALITY( children_per_parent[0], 3 ); TEST_EQUALITY( children_per_parent[1], 1 ); }
void ZoltanInterface<LocalOrdinal, GlobalOrdinal, Node, LocalMatOps>:: GetLocalNumberOfNonzeros(void *data, int NumGidEntries, int NumLidEntries, ZOLTAN_ID_PTR gids, ZOLTAN_ID_PTR lids, int wgtDim, float *weights, int *ierr) { if (data == NULL || NumGidEntries < 1) { *ierr = ZOLTAN_FATAL; return; } else { *ierr = ZOLTAN_OK; } Matrix *A = (Matrix*) data; RCP<const Map> map = A->getRowMap(); LO blockSize = A->GetFixedBlockSize(); if (blockSize == 0) throw Exceptions::RuntimeError("MueLu::Zoltan : Matrix has block size 0."); Teuchos::ArrayView<const LO> cols; Teuchos::ArrayView<const SC> vals; if (blockSize == 1) { for (size_t i = 0; i < map->getNodeNumElements(); ++i) { gids[i] = (ZOLTAN_ID_TYPE) map->getGlobalElement(i); A->getLocalRowView(i, cols, vals); weights[i] = cols.size(); } } else { LO numBlocks = A->getRowMap()->getNodeNumElements() / blockSize; for (LO i = 0; i < numBlocks; ++i) { // Assign zoltan GID to the first row GID in the block // NOTE: Zoltan GIDs are different from GIDs in the Coordinates vector gids[i] = (ZOLTAN_ID_TYPE) map->getGlobalElement(i*blockSize); LO nnz = 0; for (LO j = i*blockSize; j < (i+1)*blockSize; ++j) { A->getLocalRowView(j, cols, vals); nnz += vals.size(); } weights[i] = nnz; } //for (LocalOrdinal i=0; i<numBlocks; ++i) } } //GetLocalNumberOfNonzeros()
Map<LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType> >:: Map (const global_size_t globalNumIndices, const Teuchos::ArrayView<const GlobalOrdinal>& myGlobalIndices, const GlobalOrdinal indexBase, const Teuchos::RCP<const Teuchos::Comm<int> >& comm, const Teuchos::RCP<node_type> &node) : comm_ (comm), node_ (node), directory_ (new Directory<LocalOrdinal, GlobalOrdinal, node_type> ()) { typedef GlobalOrdinal GO; typedef Kokkos::View<const GlobalOrdinal*, device_type, Kokkos::MemoryUnmanaged> host_view_type; typedef Kokkos::View<GlobalOrdinal*, device_type> device_view_type; // Copy the input GID list from host (we assume that // Teuchos::ArrayView should only view host memory) to device. // // FIXME (mfh 06 Feb, 24 Mar 2014) We could use the CUDA API // function here that can look at a pointer and tell whether it // lives on host or device, to tell whether the Teuchos::ArrayView // is viewing host or device memory. Regardless, we don't own the // data and we will need a deep copy anyway, so we might as well // copy it. host_view_type gidsHost (myGlobalIndices.getRawPtr (), myGlobalIndices.size ()); device_view_type gidsDevice ("GIDs", myGlobalIndices.size ()); Kokkos::deep_copy (gidsDevice, gidsHost); const global_size_t GSTI = Teuchos::OrdinalTraits<global_size_t>::invalid (); const GO globalNumInds = (globalNumIndices == GSTI) ? getInvalidGlobalIndex () : Teuchos::as<GO> (globalNumIndices); // Start with a host Map implementation, since this will make this // class' public (host) methods work. If users want device // methods, they will call getDeviceView(), which will initialize // the device Map implementation. // // NOTE (mfh 06 Feb 2014) If we're using UVM, we don't really need // the host and device Maps to be separate. mapHost_ = host_impl_type (globalNumInds, gidsDevice, indexBase, *comm); // Create the Directory on demand in getRemoteIndexList(). }
void RTOpC::extract_reduct_obj_state_impl( const ReductTarget &reduct_obj, const Teuchos::ArrayView<primitive_value_type> &value_data, const Teuchos::ArrayView<index_type> &index_data, const Teuchos::ArrayView<char_type> &char_data ) const { TEUCHOS_TEST_FOR_EXCEPTION( 0!=RTOp_extract_reduct_obj_state( &op_, (*this)(reduct_obj), value_data.size(), value_data.getRawPtr(), index_data.size(), index_data.getRawPtr(), char_data.size(), char_data.getRawPtr() ), UnknownError, "RTOpC::extract_reduct_obj_state(...): Error, " "RTOp_extract_reduct_obj_state(...) returned != 0" ); }
Teuchos::ArrayView<const double> StkNodalBasisSource::atomData(int localAtomRank, const Teuchos::ArrayView<double> &result) const { TEUCHOS_ASSERT(result.size() <= this->entryCount(localAtomRank)); const int dofCount = this->entryCount(localAtomRank); for (int dofRank = 0; dofRank < dofCount; ++dofRank) { const int localEntryIndex = disc_->getOwnedDOF(localAtomRank, dofRank); result[dofRank] = (*currentVector_)[localEntryIndex]; } return result; }
void RTOpC::load_reduct_obj_state_impl( const Teuchos::ArrayView<const primitive_value_type> &value_data, const Teuchos::ArrayView<const index_type> &index_data, const Teuchos::ArrayView<const char_type> &char_data, const Teuchos::Ptr<ReductTarget> &reduct_obj ) const { TEUCHOS_TEST_FOR_EXCEPTION( 0!=RTOp_load_reduct_obj_state( &op_, value_data.size(), value_data.getRawPtr(), index_data.size(), index_data.getRawPtr(), char_data.size(), char_data.getRawPtr(), (*this)(*reduct_obj) ), UnknownError, "RTOpC::load_reduct_obj_state(...): Error, " "RTOp_load_reduct_obj_state(...) returned != 0" ); }
CrsWrapper_GraphBuilder<Scalar, LocalOrdinal, GlobalOrdinal, Node>:: CrsWrapper_GraphBuilder (const Teuchos::RCP<const Map<LocalOrdinal, GlobalOrdinal, Node> >& map) : graph_(), rowmap_(map), max_row_length_(0) { Teuchos::ArrayView<const GlobalOrdinal> rows = map->getNodeElementList (); const LocalOrdinal numRows = static_cast<LocalOrdinal> (rows.size ()); for (LocalOrdinal i = 0; i < numRows; ++i) { graph_[rows[i]] = new std::set<GlobalOrdinal>; } }
void MatrixAdapter<Matrix>::do_getCrs(const Teuchos::ArrayView<scalar_t> nzval, const Teuchos::ArrayView<global_ordinal_t> colind, const Teuchos::ArrayView<typename MatrixAdapter<Matrix>::global_size_t> rowptr, typename MatrixAdapter<Matrix>::global_size_t& nnz, const Teuchos::Ptr<const Tpetra::Map<local_ordinal_t,global_ordinal_t,node_t> > rowmap, EDistribution distribution, EStorage_Ordering ordering, col_access ca) const { using Teuchos::Array; // get the ccs and transpose Array<scalar_t> nzval_tmp(nzval.size(), 0); Array<global_ordinal_t> rowind(colind.size(), 0); Array<global_size_t> colptr(this->getGlobalNumCols() + 1); this->getCcs(nzval_tmp(), rowind(), colptr(), nnz, rowmap, ordering, distribution); if( !nzval.is_null() && !colind.is_null() && !rowptr.is_null() ) Util::transpose(nzval_tmp(), rowind(), colptr(), nzval, colind, rowptr); }
TEUCHOS_UNIT_TEST(Utilities,DetectDirichletRows) { RCP<Matrix> A = TestHelpers::TestFactory<SC, LO, GO, NO>::Build1DPoisson(100); Teuchos::ArrayView<const LO> indices; Teuchos::ArrayView<const SC> values; LO localRowToZero = 5; A->resumeFill(); A->getLocalRowView(localRowToZero, indices, values); Array<SC> newvalues(values.size(),Teuchos::ScalarTraits<SC>::zero()); for (int j = 0; j < indices.size(); j++) //keep diagonal if (indices[j] == localRowToZero) newvalues[j] = values[j]; A->replaceLocalValues(localRowToZero,indices,newvalues); A->fillComplete(); ArrayRCP<const bool> drows = Utils::DetectDirichletRows(*A); TEST_EQUALITY(drows[localRowToZero], true); TEST_EQUALITY(drows[localRowToZero-1], false); A->resumeFill(); A->getLocalRowView(localRowToZero, indices, values); for (int j = 0; j < indices.size(); j++) //keep diagonal if (indices[j] == localRowToZero) newvalues[j] = values[j]; else newvalues[j] = Teuchos::as<SC>(0.25); A->replaceLocalValues(localRowToZero,indices,newvalues); //row 5 should not be Dirichlet drows = Utils::DetectDirichletRows(*A,Teuchos::as<SC>(0.24)); TEST_EQUALITY(drows[localRowToZero], false); TEST_EQUALITY(drows[localRowToZero-1], false); //row 5 should be Dirichlet drows = Utils::DetectDirichletRows(*A,Teuchos::as<SC>(0.26)); TEST_EQUALITY(drows[localRowToZero], true); TEST_EQUALITY(drows[localRowToZero-1], false); } //DetectDirichletRows
LookupStatus Map<LocalOrdinal,GlobalOrdinal,Kokkos::Compat::KokkosDeviceWrapperNode<DeviceType> >:: getRemoteIndexList (const Teuchos::ArrayView<const GlobalOrdinal>& GIDs, const Teuchos::ArrayView<int>& PIDs) const { TEUCHOS_TEST_FOR_EXCEPTION( GIDs.size () != PIDs.size (), std::invalid_argument, "Tpetra::Map (Kokkos refactor)::getRemoteIndexList (2 args): GIDs.size ()" " = " << GIDs.size () << " != PIDs.size () = " << PIDs.size () << "."); // Empty Maps (i.e., containing no indices on any processes in the // Map's communicator) are perfectly valid. In that case, if the // input GID list is nonempty, we fill the output array with // invalid values, and return IDNotPresent to notify the caller. // It's perfectly valid to give getRemoteIndexList GIDs that the // Map doesn't own. SubmapImport test 2 needs this functionality. if (getGlobalNumElements () == 0) { if (GIDs.size () == 0) { return AllIDsPresent; // trivially } else { // The Map contains no indices, so all output PIDs are invalid. for (Teuchos::ArrayView<int>::size_type k = 0; k < PIDs.size (); ++k) { PIDs[k] = Teuchos::OrdinalTraits<int>::invalid (); } return IDNotPresent; } } // getRemoteIndexList must be called collectively, and Directory // creation is collective too, so it's OK to create the Directory // on demand. setupDirectory (); return directory_->getDirectoryEntries (*this, GIDs, PIDs); }