//! Print the given array of strings, in YAML format, to \c out. static void printStringArray (std::ostream& out, const Teuchos::ArrayView<const std::string>& array) { typedef Teuchos::ArrayView<std::string>::const_iterator iter_type; out << "["; for (iter_type iter = array.begin(); iter != array.end(); ++iter) { out << "\"" << *iter << "\""; if (iter + 1 != array.end()) { out << ", "; } } out << "]"; }
Teuchos::Array<bool> createResponseTable( int count, const std::string selectionType, int index, const Teuchos::ArrayView<const int> &list) { Teuchos::Array<bool> result; if (count > 0) { if (selectionType == "All") { result.resize(count, true); } else if (selectionType == "Last") { result = createResponseTableFromIndex(count - 1, count); } else if (selectionType == "AllButLast") { result.reserve(count); result.resize(count - 1, true); result.push_back(false); } else if (selectionType == "Index") { result = createResponseTableFromIndex(index, count); } else if (selectionType == "List") { result.resize(count, false); for (Teuchos::ArrayView<const int>::const_iterator it = list.begin(), it_end = list.end(); it != it_end; ++it) { result.at(*it) = true; } } else { TEUCHOS_TEST_FOR_EXCEPT(false); } } return result; }
/// \brief Constructor. /// /// \brief matrix [in] The original input matrix. This Container /// will construct a local diagonal block from the rows given by /// <tt>localRows</tt>. /// /// \param localRows [in] The set of (local) rows assigned to this /// container. <tt>localRows[i] == j</tt>, where i (from 0 to /// <tt>getNumRows() - 1</tt>) indicates the Container's row, and /// j indicates the local row in the calling process. Subclasses /// must always pass along these indices to the base class. Container (const Teuchos::RCP<const row_matrix_type>& matrix, const Teuchos::ArrayView<const local_ordinal_type>& localRows) : inputMatrix_ (matrix), localRows_ (localRows.begin (), localRows.end ()) { TEUCHOS_TEST_FOR_EXCEPTION( matrix.is_null (), std::invalid_argument, "Ifpack2::Container: " "The constructor's input matrix must be non-null."); }
void DTKInterpolationAdapter::update_variable_values(std::string var_name, Teuchos::ArrayView<GlobalOrdinal> missed_points) { MPI_Comm old_comm = Moose::swapLibMeshComm(*comm->getRawMpiComm()); System * sys = find_sys(var_name); unsigned int var_num = sys->variable_number(var_name); bool is_nodal = sys->variable_type(var_num).family == LAGRANGE; Teuchos::RCP<FieldContainerType> values = values_to_fill[var_name]->field(); // Create a vector containing true or false for each point saying whether it was missed or not // We're only going to update values for points that were not missed std::vector<bool> missed(values->size(), false); for (Teuchos::ArrayView<const GlobalOrdinal>::const_iterator i=missed_points.begin(); i != missed_points.end(); ++i) missed[*i] = true; unsigned int i=0; // Loop over the values (one for each node) and assign the value of this variable at each node for (FieldContainerType::iterator it=values->begin(); it != values->end(); ++it) { // If this point "missed" then skip it if (missed[i]) { i++; continue; } const DofObject * dof_object = NULL; if (is_nodal) dof_object = mesh.node_ptr(vertices[i]); else dof_object = mesh.elem(elements[i]); if (dof_object->processor_id() == mesh.processor_id()) { // The 0 is for the component... this only works for LAGRANGE! dof_id_type dof = dof_object->dof_number(sys->number(), var_num, 0); sys->solution->set(dof, *it); } i++; } sys->solution->close(); // Swap back Moose::swapLibMeshComm(old_comm); }
Teuchos::Array< SIZE_TYPE > computeStrides(const Teuchos::ArrayView< DIM_TYPE > & dimensions, const Layout layout) { // In the MDArray<T>(const MDArrayView<T> &) constructor, I try to // pass the MDArrayView dimensions to computeStrides(), but they // come in as ArrayView<const T> (for reasons I can't determine) and // cause all sorts of const-correctness problems. So I copy them // into a new Array<T> and pass its reference to the main // computeStrides() function. Fortunately, the array of dimensions // is small. Teuchos::Array< DIM_TYPE > nonConstDims(0); nonConstDims.insert(nonConstDims.begin(), dimensions.begin(), dimensions.end()); return computeStrides< SIZE_TYPE, DIM_TYPE >(nonConstDims, layout); }
void addNodesToPart( const Teuchos::ArrayView<const stk::mesh::EntityId> &nodeIds, stk::mesh::Part &samplePart, stk::mesh::BulkData& bulkData) { const stk::mesh::PartVector samplePartVec(1, &samplePart); const stk::mesh::Selector locallyOwned = stk::mesh::MetaData::get(bulkData).locally_owned_part(); BulkModification mod(bulkData); typedef Teuchos::ArrayView<const stk::mesh::EntityId>::const_iterator Iter; for (Iter it = nodeIds.begin(), it_end = nodeIds.end(); it != it_end; ++it) { stk::mesh::Entity node = bulkData.get_entity(stk::topology::NODE_RANK, *it); if (bulkData.is_valid(node) && locallyOwned(bulkData.bucket(node))) { bulkData.change_entity_parts(node, samplePartVec); } } }
void addNodesToPart( const Teuchos::ArrayView<const stk::mesh::EntityId> &nodeIds, stk::mesh::Part &samplePart, stk::mesh::BulkData& bulkData) { const stk::mesh::EntityRank nodeEntityRank(0); const stk::mesh::PartVector samplePartVec(1, &samplePart); const stk::mesh::Selector locallyOwned = stk::mesh::MetaData::get(bulkData).locally_owned_part(); BulkModification mod(bulkData); typedef Teuchos::ArrayView<const stk::mesh::EntityId>::const_iterator Iter; for (Iter it = nodeIds.begin(), it_end = nodeIds.end(); it != it_end; ++it) { const Teuchos::Ptr<stk::mesh::Entity> node(bulkData.get_entity(nodeEntityRank, *it)); if (Teuchos::nonnull(node) && locallyOwned(*node)) { bulkData.change_entity_parts(*node, samplePartVec); } } }
void LocalAggregationAlgorithm<LocalOrdinal, GlobalOrdinal, Node, LocalMatOps>::CoarsenUncoupled(GraphBase const & graph, Aggregates & aggregates) const { Monitor m(*this, "Coarsen Uncoupled"); std::string orderingType; switch(ordering_) { case NATURAL: orderingType="Natural"; break; case RANDOM: orderingType="Random"; break; case GRAPH: orderingType="Graph"; break; default: break; } GetOStream(Runtime1) << "Ordering: " << orderingType << std::endl; GetOStream(Runtime1) << "Min nodes per aggregate: " << minNodesPerAggregate_ << std::endl; GetOStream(Runtime1) << "Max nbrs already selected: " << maxNeighAlreadySelected_ << std::endl; /* Create Aggregation object */ my_size_t nAggregates = 0; /* ============================================================= */ /* aggStat indicates whether this node has been aggreated, and */ /* vertex2AggId stores the aggregate number where this node has */ /* been aggregated into. */ /* ============================================================= */ Teuchos::ArrayRCP<NodeState> aggStat; const my_size_t nRows = graph.GetNodeNumVertices(); if (nRows > 0) aggStat = Teuchos::arcp<NodeState>(nRows); for ( my_size_t i = 0; i < nRows; ++i ) aggStat[i] = READY; /* ============================================================= */ /* Phase 1 : */ /* for all nodes, form a new aggregate with its neighbors */ /* if the number of its neighbors having been aggregated does */ /* not exceed a given threshold */ /* (GetMaxNeighAlreadySelected() = 0 ===> Vanek's scheme) */ /* ============================================================= */ /* some general variable declarations */ Teuchos::ArrayRCP<LO> randomVector; RCP<MueLu::LinkedList> nodeList; /* list storing the next node to pick as a root point for ordering_ == GRAPH */ MueLu_SuperNode *aggHead=NULL, *aggCurrent=NULL, *supernode=NULL; /**/ if ( ordering_ == RANDOM ) /* random ordering */ { //TODO: could be stored in a class that respect interface of LinkedList randomVector = Teuchos::arcp<LO>(nRows); //size_t or int ?-> to be propagated for (my_size_t i = 0; i < nRows; ++i) randomVector[i] = i; RandomReorder(randomVector); } else if ( ordering_ == GRAPH ) /* graph ordering */ { nodeList = rcp(new MueLu::LinkedList()); nodeList->Add(0); } /* main loop */ { LO iNode = 0; LO iNode2 = 0; Teuchos::ArrayRCP<LO> vertex2AggId = aggregates.GetVertex2AggId()->getDataNonConst(0); // output only: contents ignored while (iNode2 < nRows) { /*------------------------------------------------------ */ /* pick the next node to aggregate */ /*------------------------------------------------------ */ if ( ordering_ == NATURAL ) iNode = iNode2++; else if ( ordering_ == RANDOM ) iNode = randomVector[iNode2++]; else if ( ordering_ == GRAPH ) { if ( nodeList->IsEmpty() ) { for ( int jNode = 0; jNode < nRows; ++jNode ) { if ( aggStat[jNode] == READY ) { nodeList->Add(jNode); //TODO optim: not necessary to create a node. Can just set iNode value and skip the end break; } } } if ( nodeList->IsEmpty() ) break; /* end of the while loop */ //TODO: coding style :( iNode = nodeList->Pop(); } else { throw(Exceptions::RuntimeError("CoarsenUncoupled: bad aggregation ordering option")); } /*------------------------------------------------------ */ /* consider further only if the node is in READY mode */ /*------------------------------------------------------ */ if ( aggStat[iNode] == READY ) { // neighOfINode is the neighbor node list of node 'iNode'. Teuchos::ArrayView<const LO> neighOfINode = graph.getNeighborVertices(iNode); typename Teuchos::ArrayView<const LO>::size_type length = neighOfINode.size(); supernode = new MueLu_SuperNode; try { supernode->list = Teuchos::arcp<int>(length+1); } catch (std::bad_alloc&) { TEUCHOS_TEST_FOR_EXCEPTION(true, Exceptions::RuntimeError, "MueLu::LocalAggregationAlgorithm::CoarsenUncoupled(): Error: couldn't allocate memory for supernode! length=" + Teuchos::toString(length)); } supernode->maxLength = length; supernode->length = 1; supernode->list[0] = iNode; int selectFlag = 1; { /*--------------------------------------------------- */ /* count the no. of neighbors having been aggregated */ /*--------------------------------------------------- */ int count = 0; for (typename Teuchos::ArrayView<const LO>::const_iterator it = neighOfINode.begin(); it != neighOfINode.end(); ++it) { int index = *it; if ( index < nRows ) { if ( aggStat[index] == READY || aggStat[index] == NOTSEL ) supernode->list[supernode->length++] = index; else count++; } } /*--------------------------------------------------- */ /* if there are too many neighbors aggregated or the */ /* number of nodes in the new aggregate is too few, */ /* don't do this one */ /*--------------------------------------------------- */ if ( count > GetMaxNeighAlreadySelected() ) selectFlag = 0; } // Note: the supernode length is actually 1 more than the // number of nodes in the candidate aggregate. The // root is counted twice. I'm not sure if this is // a bug or a feature ... so I'll leave it and change // < to <= in the if just below. if (selectFlag != 1 || supernode->length <= GetMinNodesPerAggregate()) { aggStat[iNode] = NOTSEL; delete supernode; if ( ordering_ == GRAPH ) /* if graph ordering */ { for (typename Teuchos::ArrayView<const LO>::const_iterator it = neighOfINode.begin(); it != neighOfINode.end(); ++it) { int index = *it; if ( index < nRows && aggStat[index] == READY ) { nodeList->Add(index); } } } } else { aggregates.SetIsRoot(iNode); for ( int j = 0; j < supernode->length; ++j ) { int jNode = supernode->list[j]; aggStat[jNode] = SELECTED; vertex2AggId[jNode] = nAggregates; if ( ordering_ == GRAPH ) /* if graph ordering */ { Teuchos::ArrayView<const LO> neighOfJNode = graph.getNeighborVertices(jNode); for (typename Teuchos::ArrayView<const LO>::const_iterator it = neighOfJNode.begin(); it != neighOfJNode.end(); ++it) { int index = *it; if ( index < nRows && aggStat[index] == READY ) { nodeList->Add(index); } } } } supernode->next = NULL; supernode->index = nAggregates; if ( nAggregates == 0 ) { aggHead = supernode; aggCurrent = supernode; } else { aggCurrent->next = supernode; aggCurrent = supernode; } nAggregates++; // unused aggCntArray[nAggregates] = supernode->length; } } } // end of 'for' // views on distributed vectors are freed here. } // end of 'main loop' nodeList = Teuchos::null; /* Update aggregate object */ aggregates.SetNumAggregates(nAggregates); /* Verbose */ { const RCP<const Teuchos::Comm<int> > & comm = graph.GetComm(); if (IsPrint(Warnings0)) { GO localReady=0, globalReady; // Compute 'localReady' for ( my_size_t i = 0; i < nRows; ++i ) if (aggStat[i] == READY) localReady++; // Compute 'globalReady' sumAll(comm, localReady, globalReady); if(globalReady > 0) GetOStream(Warnings0) << "Warning: " << globalReady << " READY nodes left" << std::endl; } if (IsPrint(Statistics1)) { // Compute 'localSelected' LO localSelected=0; for ( my_size_t i = 0; i < nRows; ++i ) if ( aggStat[i] == SELECTED ) localSelected++; // Compute 'globalSelected' GO globalSelected; sumAll(comm, (GO)localSelected, globalSelected); // Compute 'globalNRows' GO globalNRows; sumAll(comm, (GO)nRows, globalNRows); GetOStream(Statistics1) << "Nodes aggregated = " << globalSelected << " (" << globalNRows << ")" << std::endl; } if (IsPrint(Statistics1)) { GO nAggregatesGlobal; sumAll(comm, (GO)nAggregates, nAggregatesGlobal); GetOStream(Statistics1) << "Total aggregates = " << nAggregatesGlobal << std::endl; } } // verbose /* ------------------------------------------------------------- */ /* clean up */ /* ------------------------------------------------------------- */ aggCurrent = aggHead; while ( aggCurrent != NULL ) { supernode = aggCurrent; aggCurrent = aggCurrent->next; delete supernode; } } // CoarsenUncoupled
void BlockedPFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps>::Build(Level& fineLevel, Level &coarseLevel) const { typedef Xpetra::Matrix<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps> MatrixClass; typedef Xpetra::CrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps> CrsMatrixClass; typedef Xpetra::CrsMatrixWrap<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps> CrsMatrixWrapClass; typedef Xpetra::BlockedCrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps> BlockedCrsOMatrix; typedef Xpetra::Map<LocalOrdinal, GlobalOrdinal, Node> MapClass; typedef Xpetra::MapFactory<LocalOrdinal, GlobalOrdinal, Node> MapFactoryClass; typedef Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> MapExtractorClass; typedef Xpetra::MapExtractorFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node> MapExtractorFactoryClass; //Teuchos::RCP<Teuchos::FancyOStream> fos = Teuchos::getFancyOStream(Teuchos::rcpFromRef(std::cout)); //std::ostringstream buf; buf << coarseLevel.GetLevelID(); // Level Get //RCP<Matrix> A = fineLevel. Get< RCP<Matrix> >("A", AFact_.get()); // IMPORTANT: use main factory manager for getting A RCP<Matrix> A = Get< RCP<Matrix> >(fineLevel, "A"); RCP<BlockedCrsOMatrix> bA = Teuchos::rcp_dynamic_cast<BlockedCrsOMatrix>(A); TEUCHOS_TEST_FOR_EXCEPTION(bA==Teuchos::null, Exceptions::BadCast, "MueLu::BlockedPFactory::Build: input matrix A is not of type BlockedCrsMatrix! error."); // plausibility check TEUCHOS_TEST_FOR_EXCEPTION(bA->Rows() != FactManager_.size(), Exceptions::RuntimeError, "MueLu::BlockedPFactory::Build: number of block rows of A does not match number of SubFactoryManagers. error."); TEUCHOS_TEST_FOR_EXCEPTION(bA->Cols() != FactManager_.size(), Exceptions::RuntimeError, "MueLu::BlockedPFactory::Build: number of block cols of A does not match number of SubFactoryManagers. error."); // build blocked prolongator std::vector<RCP<Matrix> > subBlockP; std::vector<RCP<const MapClass> > subBlockPRangeMaps; std::vector<RCP<const MapClass > > subBlockPDomainMaps; std::vector<GO> fullRangeMapVector; std::vector<GO> fullDomainMapVector; subBlockP.reserve(FactManager_.size()); // reserve size for block P operators subBlockPRangeMaps.reserve(FactManager_.size()); // reserve size for block P operators subBlockPDomainMaps.reserve(FactManager_.size()); // reserve size for block P operators // build and store the subblocks and the corresponding range and domain maps // since we put together the full range and domain map from the submaps we do not have // to use the maps from blocked A std::vector<Teuchos::RCP<const FactoryManagerBase> >::const_iterator it; for(it = FactManager_.begin(); it!=FactManager_.end(); ++it) { SetFactoryManager fineSFM (rcpFromRef(fineLevel), *it); SetFactoryManager coarseSFM(rcpFromRef(coarseLevel), *it); if(!restrictionMode_) { subBlockP.push_back(coarseLevel.Get<RCP<Matrix> >("P", (*it)->GetFactory("P").get())); // create and return block P operator } else { subBlockP.push_back(coarseLevel.Get<RCP<Matrix> >("R", (*it)->GetFactory("R").get())); // create and return block R operator } // check if prolongator/restrictor operators have strided maps TEUCHOS_TEST_FOR_EXCEPTION(subBlockP.back()->IsView("stridedMaps")==false, Exceptions::BadCast, "MueLu::BlockedPFactory::Build: subBlock P operator has no strided map information. error."); // append strided row map (= range map) to list of range maps. Teuchos::RCP<const Map> rangeMap = subBlockP.back()->getRowMap("stridedMaps"); /* getRangeMap(); //*/ subBlockPRangeMaps.push_back(rangeMap); // use plain range map to determine the DOF ids Teuchos::ArrayView< const GlobalOrdinal > nodeRangeMap = subBlockP.back()->getRangeMap()->getNodeElementList(); //subBlockPRangeMaps.back()->getNodeElementList(); fullRangeMapVector.insert(fullRangeMapVector.end(), nodeRangeMap.begin(), nodeRangeMap.end()); sort(fullRangeMapVector.begin(), fullRangeMapVector.end()); // append strided col map (= domain map) to list of range maps. Teuchos::RCP<const Map> domainMap = subBlockP.back()->getColMap("stridedMaps"); /* getDomainMap(); //*/ subBlockPDomainMaps.push_back(domainMap); // use plain domain map to determine the DOF ids Teuchos::ArrayView< const GlobalOrdinal > nodeDomainMap = subBlockP.back()->getDomainMap()->getNodeElementList(); //subBlockPDomainMaps.back()->getNodeElementList(); fullDomainMapVector.insert(fullDomainMapVector.end(), nodeDomainMap.begin(), nodeDomainMap.end()); sort(fullDomainMapVector.begin(), fullDomainMapVector.end()); } // extract map index base from maps of blocked A GO rangeIndexBase = 0; GO domainIndexBase = 0; if(!restrictionMode_) { // prolongation mode: just use index base of range and domain map of bA rangeIndexBase = bA->getRangeMap()->getIndexBase(); domainIndexBase= bA->getDomainMap()->getIndexBase(); } else { // restriction mode: switch range and domain map for blocked restriction operator rangeIndexBase = bA->getDomainMap()->getIndexBase(); domainIndexBase= bA->getRangeMap()->getIndexBase(); } // build full range map. // If original range map has striding information, then transfer it to the new range map RCP<const MapExtractorClass> rangeAMapExtractor = bA->getRangeMapExtractor(); Teuchos::ArrayView<GO> fullRangeMapGIDs(&fullRangeMapVector[0],fullRangeMapVector.size()); Teuchos::RCP<const StridedMap> stridedRgFullMap = Teuchos::rcp_dynamic_cast<const StridedMap>(rangeAMapExtractor->getFullMap()); Teuchos::RCP<const Map > fullRangeMap = Teuchos::null; if(stridedRgFullMap != Teuchos::null) { std::vector<size_t> stridedData = stridedRgFullMap->getStridingData(); fullRangeMap = StridedMapFactory::Build( bA->getRangeMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullRangeMapGIDs, rangeIndexBase, stridedData, bA->getRangeMap()->getComm(), stridedRgFullMap->getStridedBlockId(), stridedRgFullMap->getOffset()); } else { fullRangeMap = MapFactory::Build( bA->getRangeMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullRangeMapGIDs, rangeIndexBase, bA->getRangeMap()->getComm()); } RCP<const MapExtractorClass> domainAMapExtractor = bA->getDomainMapExtractor(); Teuchos::ArrayView<GO> fullDomainMapGIDs(&fullDomainMapVector[0],fullDomainMapVector.size()); Teuchos::RCP<const StridedMap> stridedDoFullMap = Teuchos::rcp_dynamic_cast<const StridedMap>(domainAMapExtractor->getFullMap()); Teuchos::RCP<const Map > fullDomainMap = Teuchos::null; if(stridedDoFullMap != Teuchos::null) { TEUCHOS_TEST_FOR_EXCEPTION(stridedDoFullMap==Teuchos::null, Exceptions::BadCast, "MueLu::BlockedPFactory::Build: full map in domain map extractor has no striding information! error."); std::vector<size_t> stridedData2 = stridedDoFullMap->getStridingData(); fullDomainMap = StridedMapFactory::Build( bA->getDomainMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullDomainMapGIDs, domainIndexBase, stridedData2, bA->getDomainMap()->getComm(), stridedDoFullMap->getStridedBlockId(), stridedDoFullMap->getOffset()); } else { fullDomainMap = MapFactory::Build( bA->getDomainMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullDomainMapGIDs, domainIndexBase, bA->getDomainMap()->getComm()); } // build map extractors Teuchos::RCP<const MapExtractorClass> rangeMapExtractor = MapExtractorFactoryClass::Build(fullRangeMap, subBlockPRangeMaps); Teuchos::RCP<const MapExtractorClass> domainMapExtractor = MapExtractorFactoryClass::Build(fullDomainMap, subBlockPDomainMaps); Teuchos::RCP<BlockedCrsOMatrix> bP = Teuchos::rcp(new BlockedCrsOMatrix(rangeMapExtractor,domainMapExtractor,10)); for(size_t i = 0; i<subBlockPRangeMaps.size(); i++) { Teuchos::RCP<CrsMatrixWrapClass> crsOpii = Teuchos::rcp_dynamic_cast<CrsMatrixWrapClass>(subBlockP[i]); Teuchos::RCP<CrsMatrixClass> crsMatii = crsOpii->getCrsMatrix(); bP->setMatrix(i,i,crsMatii); } bP->fillComplete(); //bP->describe(*fos,Teuchos::VERB_EXTREME); // Level Set if(!restrictionMode_) { // prolongation factory is in prolongation mode coarseLevel.Set("P", Teuchos::rcp_dynamic_cast<MatrixClass>(bP), this); } else { // prolongation factory is in restriction mode // we do not have to transpose the blocked R operator since the subblocks on the diagonal // are already valid R subblocks coarseLevel.Set("R", Teuchos::rcp_dynamic_cast<MatrixClass>(bP), this); } }
int main(int argc, char *argv[]) { // Communicators Teuchos::GlobalMPISession mpiSession(&argc, &argv); const Albany_MPI_Comm nativeComm = Albany_MPI_COMM_WORLD; const RCP<const Teuchos::Comm<int> > teuchosComm = Albany::createTeuchosCommFromMpiComm(nativeComm); // Standard output const RCP<Teuchos::FancyOStream> out = Teuchos::VerboseObjectBase::getDefaultOStream(); // Parse command-line argument for input file const std::string firstArg = (argc > 1) ? argv[1] : ""; if (firstArg.empty() || firstArg == "--help") { *out << "AlbanyRBGen input-file-path\n"; return 0; } const std::string inputFileName = argv[1]; // Parse XML input file const RCP<Teuchos::ParameterList> topLevelParams = Teuchos::createParameterList("Albany Parameters"); Teuchos::updateParametersFromXmlFileAndBroadcast(inputFileName, topLevelParams.ptr(), *teuchosComm); const bool sublistMustExist = true; // Setup discretization factory const RCP<Teuchos::ParameterList> discParams = Teuchos::sublist(topLevelParams, "Discretization", sublistMustExist); TEUCHOS_TEST_FOR_EXCEPT(discParams->get<std::string>("Method") != "Ioss"); const std::string outputParamLabel = "Exodus Output File Name"; const std::string sampledOutputParamLabel = "Reference Exodus Output File Name"; const RCP<const Teuchos::ParameterEntry> reducedOutputParamEntry = getEntryCopy(*discParams, outputParamLabel); const RCP<const Teuchos::ParameterEntry> sampledOutputParamEntry = getEntryCopy(*discParams, sampledOutputParamLabel); discParams->remove(outputParamLabel, /*throwIfNotExists =*/ false); discParams->remove(sampledOutputParamLabel, /*throwIfNotExists =*/ false); const RCP<const Teuchos::ParameterList> discParamsCopy = Teuchos::rcp(new Teuchos::ParameterList(*discParams)); const RCP<Teuchos::ParameterList> problemParams = Teuchos::sublist(topLevelParams, "Problem", sublistMustExist); const RCP<const Teuchos::ParameterList> problemParamsCopy = Teuchos::rcp(new Teuchos::ParameterList(*problemParams)); // Create original (full) discretization const RCP<Albany::AbstractDiscretization> disc = Albany::discretizationNew(topLevelParams, teuchosComm); // Determine mesh sample const RCP<Teuchos::ParameterList> samplingParams = Teuchos::sublist(topLevelParams, "Mesh Sampling", sublistMustExist); const int firstVectorRank = samplingParams->get("First Vector Rank", 0); const Teuchos::Ptr<const int> basisSizeMax = Teuchos::ptr(samplingParams->getPtr<int>("Basis Size Max")); const int sampleSize = samplingParams->get("Sample Size", 0); *out << "Sampling " << sampleSize << " nodes"; if (Teuchos::nonnull(basisSizeMax)) { *out << " based on no more than " << *basisSizeMax << " basis vectors"; } if (firstVectorRank != 0) { *out << " starting from vector rank " << firstVectorRank; } *out << "\n"; const RCP<Albany::STKDiscretization> stkDisc = Teuchos::rcp_dynamic_cast<Albany::STKDiscretization>(disc, /*throw_on_fail =*/ true); const RCP<MOR::AtomicBasisSource> rawBasisSource = Teuchos::rcp(new Albany::StkNodalBasisSource(stkDisc)); const RCP<MOR::AtomicBasisSource> basisSource = Teuchos::rcp( Teuchos::nonnull(basisSizeMax) ? new MOR::WindowedAtomicBasisSource(rawBasisSource, firstVectorRank, *basisSizeMax) : new MOR::WindowedAtomicBasisSource(rawBasisSource, firstVectorRank) ); MOR::CollocationMetricCriterionFactory criterionFactory(samplingParams); const Teuchos::RCP<const MOR::CollocationMetricCriterion> criterion = criterionFactory.instanceNew(basisSource->entryCountMax()); const Teuchos::RCP<MOR::GreedyAtomicBasisSample> sampler(new MOR::GreedyAtomicBasisSample(*basisSource, criterion)); sampler->sampleSizeInc(sampleSize); Teuchos::Array<stk::mesh::EntityId> sampleNodeIds; const Teuchos::ArrayView<const int> sampleAtoms = sampler->sample(); sampleNodeIds.reserve(sampleAtoms.size()); for (Teuchos::ArrayView<const int>::const_iterator it = sampleAtoms.begin(), it_end = sampleAtoms.end(); it != it_end; ++it) { sampleNodeIds.push_back(*it + 1); } *out << "Sample = " << sampleNodeIds << "\n"; // Choose first sample node as sensor const Teuchos::ArrayView<const stk::mesh::EntityId> sensorNodeIds = sampleNodeIds.view(0, 1); const Teuchos::Array<std::string> additionalNodeSets = Teuchos::tuple(std::string("sample_nodes"), std::string("sensors")); // Create sampled discretization if (Teuchos::nonnull(sampledOutputParamEntry)) { const RCP<Teuchos::ParameterList> discParamsLocalCopy = Teuchos::rcp(new Teuchos::ParameterList(*discParamsCopy)); discParamsLocalCopy->setEntry("Exodus Output File Name", *sampledOutputParamEntry); discParamsLocalCopy->set("Additional Node Sets", additionalNodeSets); topLevelParams->set("Discretization", *discParamsLocalCopy); topLevelParams->set("Problem", *problemParamsCopy); const bool performReduction = false; const RCP<Albany::AbstractDiscretization> sampledDisc = sampledDiscretizationNew(topLevelParams, teuchosComm, sampleNodeIds, sensorNodeIds, performReduction); if (Teuchos::nonnull(basisSizeMax)) { transferSolutionHistory(*stkDisc, *sampledDisc, *basisSizeMax + firstVectorRank); } else { transferSolutionHistory(*stkDisc, *sampledDisc); } } // Create reduced discretization if (Teuchos::nonnull(reducedOutputParamEntry)) { const RCP<Teuchos::ParameterList> discParamsLocalCopy = Teuchos::rcp(new Teuchos::ParameterList(*discParamsCopy)); discParamsLocalCopy->setEntry("Exodus Output File Name", *reducedOutputParamEntry); discParamsLocalCopy->set("Additional Node Sets", additionalNodeSets); topLevelParams->set("Discretization", *discParamsLocalCopy); topLevelParams->set("Problem", *problemParamsCopy); const bool performReduction = true; const RCP<Albany::AbstractDiscretization> reducedDisc = sampledDiscretizationNew(topLevelParams, teuchosComm, sampleNodeIds, sensorNodeIds, performReduction); if (Teuchos::nonnull(basisSizeMax)) { transferSolutionHistory(*stkDisc, *reducedDisc, *basisSizeMax + firstVectorRank); } else { transferSolutionHistory(*stkDisc, *reducedDisc); } } }
int main(int argc, char *argv[]) { Teuchos::oblackholestream blackhole; Teuchos::GlobalMPISession mpiSession(&argc,&argv,&blackhole); using Teuchos::TimeMonitor; using TpetraExamples::FDStencil; using TpetraExamples::ScaleKernel; // // Get the default communicator and node // auto &platform = Tpetra::DefaultPlatform::getDefaultPlatform(); auto comm = platform.getComm(); auto node = platform.getNode(); const int myImageID = comm->getRank(); const int numImages = comm->getSize(); // // Get example parameters from command-line processor // bool verbose = (myImageID==0); int numGlobal_user = 100*comm->getSize(); int numTimeTrials = 3; Teuchos::CommandLineProcessor cmdp(false,true); cmdp.setOption("verbose","quiet",&verbose,"Print messages and results."); cmdp.setOption("global-size",&numGlobal_user,"Global test size."); cmdp.setOption("num-time-trials",&numTimeTrials,"Number of trials in timing loops."); if (cmdp.parse(argc,argv) != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL) { return -1; } // // Say hello, print some communicator info // if (verbose) { std::cout << "\n" << Tpetra::version() << std::endl; std::cout << "Comm info: " << *comm; std::cout << "Node type: " << Teuchos::typeName(*node) << std::endl; std::cout << std::endl; } // // Create a simple map for domain and range // Tpetra::global_size_t numGlobalRows = numGlobal_user; auto map = Tpetra::createUniformContigMapWithNode<int,int>(numGlobalRows, comm, node); // const size_t numLocalRows = map->getNodeNumElements(); auto x = Tpetra::createVector<double>(map), y = Tpetra::createVector<double>(map); // Create a simple diagonal operator using lambda function auto fTwoOp = Tpetra::RTI::binaryOp<double>( [](double /*y*/, double x) { return 2.0 * x; } , map ); // y = 3*fTwoOp*x + 2*y = 3*2*1 + 2*1 = 8 x->putScalar(1.0); y->putScalar(1.0); fTwoOp->apply( *x, *y, Teuchos::NO_TRANS, 3.0, 2.0 ); // check that y == eights double norm = y->norm1(); if (verbose) { std::cout << "Tpetra::RTI::binaryOp" << std::endl << "norm(y) result == " << std::setprecision(2) << std::scientific << norm << ", expected value is " << numGlobalRows * 8.0 << std::endl; } // Create the same diagonal operator using a Kokkos kernel auto kTwoOp = Tpetra::RTI::kernelOp<double>( ScaleKernel<double>(2.0), map ); // y = 0.5*kTwop*x + 0.75*y = 0.5*2*1 + 0.75*8 = 7 kTwoOp->apply( *x, *y, Teuchos::NO_TRANS, 0.5, 0.75 ); // check that y == sevens norm = y->norm1(); if (verbose) { std::cout << "Tpetra::RTI::kernelOp" << std::endl << "norm(y) result == " << std::setprecision(2) << std::scientific << norm << ", expected value is " << numGlobalRows * 7.0 << std::endl; } // // Create a finite-difference stencil using a Kokkos kernel and non-trivial maps // decltype(map) colmap; if (numImages > 1) { Teuchos::Array<int> colElements; Teuchos::ArrayView<const int> rowElements = map->getNodeElementList(); // This isn't the most efficient Map/Import layout, but it makes for a very straight-forward kernel if (myImageID != 0) colElements.push_back( map->getMinGlobalIndex() - 1 ); colElements.insert(colElements.end(), rowElements.begin(), rowElements.end()); if (myImageID != numImages-1) colElements.push_back( map->getMaxGlobalIndex() + 1 ); colmap = Tpetra::createNonContigMapWithNode<int,int>(colElements(), comm, node); } else { colmap = map; } auto importer = createImport(map,colmap); // Finite-difference kernel = tridiag(-1, 2, -1) FDStencil<double> kern(myImageID, numImages, map->getNodeNumElements(), -1.0, 2.0, -1.0); auto FDStencilOp = Tpetra::RTI::kernelOp<double>( kern, map, map, importer ); // x = ones(), FD(x) = [1 zeros() 1] auto timeFDStencil = TimeMonitor::getNewTimer("FD RTI Stencil"); { TimeMonitor lcltimer(*timeFDStencil); for (int l=0; l != numTimeTrials; ++l) { FDStencilOp->apply( *x, *y ); } } norm = y->norm1(); if (verbose) { std::cout << std::endl << "TpetraExamples::FDStencil" << std::endl << "norm(y) result == " << std::setprecision(2) << std::scientific << norm << ", expected value is " << 2.0 << std::endl; } // // Create a finite-difference stencil using a CrsMatrix // auto FDMatrix = Tpetra::createCrsMatrix<double>(map); for (int r=map->getMinGlobalIndex(); r <= map->getMaxGlobalIndex(); ++r) { if (r == map->getMinAllGlobalIndex()) { FDMatrix->insertGlobalValues(r, Teuchos::tuple<int>(r,r+1), Teuchos::tuple<double>(2.0,-1.0)); } else if (r == map->getMaxAllGlobalIndex()) { FDMatrix->insertGlobalValues(r, Teuchos::tuple<int>(r-1,r), Teuchos::tuple<double>(-1.0,2.0)); } else { FDMatrix->insertGlobalValues(r, Teuchos::tuple<int>(r-1,r,r+1), Teuchos::tuple<double>(-1.0,2.0,-1.0)); } } FDMatrix->fillComplete(); auto timeFDMatrix = TimeMonitor::getNewTimer("FD CrsMatrix"); { TimeMonitor lcltimer(*timeFDMatrix); for (int l=0; l != numTimeTrials; ++l) { FDMatrix->apply(*x, *y); } } // // Print timings // if (verbose) { std::cout << std::endl; TimeMonitor::summarize( std::cout ); } if (verbose) { std::cout << "\nEnd Result: TEST PASSED" << std::endl; } return 0; }
void MockModelEvaluator::setParameter(const int l, const Teuchos::ArrayView<const double>& p) { parameterValues_[l].resize(p.size()); std::copy(p.begin(), p.end(), parameterValues_[l].begin()); }
int main(int argc, char *argv[]) { Teuchos::oblackholestream blackhole; Teuchos::GlobalMPISession mpiSession(&argc,&argv,&blackhole); // // Get the default communicator and node // auto &platform = Tpetra::DefaultPlatform::getDefaultPlatform(); auto comm = platform.getComm(); auto node = platform.getNode(); const int myImageID = comm->getRank(); const int numImages = comm->getSize(); const bool verbose = (myImageID==0); // // Say hello, print some communicator info // if (verbose) { std::cout << "\n" << Tpetra::version() << std::endl; std::cout << "Comm info: " << *comm; std::cout << "Node type: " << Teuchos::typeName(*node) << std::endl; std::cout << std::endl; } // // Create a simple map for domain and range // Tpetra::global_size_t numGlobalRows = 1000*numImages; auto map = Tpetra::createUniformContigMapWithNode<int,int>(numGlobalRows, comm, node); auto x = Tpetra::createVector<double>(map), y = Tpetra::createVector<double>(map); // Create a simple diagonal operator using lambda function auto fTwoOp = Tpetra::RTI::binaryOp<double>( [](double /*y*/, double x) { return 2.0 * x; } , map ); // y = 3*fTwoOp*x + 2*y = 3*2*1 + 2*1 = 8 x->putScalar(1.0); y->putScalar(1.0); fTwoOp->apply( *x, *y, Teuchos::NO_TRANS, 3.0, 2.0 ); // check that y == eights double norm = y->norm1(); if (verbose) { std::cout << "Tpetra::RTI::binaryOp" << std::endl << "norm(y) result == " << std::setprecision(2) << std::scientific << norm << ", expected value is " << numGlobalRows * 8.0 << std::endl; } // // Create a finite-difference stencil using a Kokkos kernel and non-trivial maps // decltype(map) colmap; if (numImages > 1) { Teuchos::Array<int> colElements; Teuchos::ArrayView<const int> rowElements = map->getNodeElementList(); // This isn't the most efficient Map/Import layout, but it makes for a very straight-forward kernel if (myImageID != 0) colElements.push_back( map->getMinGlobalIndex() - 1 ); colElements.insert(colElements.end(), rowElements.begin(), rowElements.end()); if (myImageID != numImages-1) colElements.push_back( map->getMaxGlobalIndex() + 1 ); colmap = Tpetra::createNonContigMapWithNode<int,int>(colElements(), comm, node); } else { colmap = map; } auto importer = createImport(map,colmap); // Finite-difference kernel = tridiag(-1, 2, -1) FDStencil<double> kern(myImageID, numImages, map->getNodeNumElements(), -1.0, 2.0, -1.0); auto FDStencilOp = Tpetra::RTI::kernelOp<double>( kern, map, map, importer ); // x = ones(), FD(x) = [1 zeros() 1] FDStencilOp->apply( *x, *y ); norm = y->norm1(); if (verbose) { std::cout << std::endl << "TpetraExamples::FDStencil" << std::endl << "norm(y) result == " << std::setprecision(2) << std::scientific << norm << ", expected value is " << 2.0 << std::endl; } std::cout << "\nEnd Result: TEST PASSED" << std::endl; return 0; }
// special constructor for generating a given subblock of a strided map static RCP<StridedMap> Build(const RCP<const StridedMap>& map, LocalOrdinal stridedBlockId) { TEUCHOS_TEST_FOR_EXCEPTION(stridedBlockId < 0, Exceptions::RuntimeError, "Xpetra::StridedMapFactory::Build: constructor expects stridedBlockId > -1."); TEUCHOS_TEST_FOR_EXCEPTION(map->getStridedBlockId() != -1, Exceptions::RuntimeError, "Xpetra::StridedMapFactory::Build: constructor expects a full map (stridedBlockId == -1)."); std::vector<size_t> stridingInfo = map->getStridingData(); Teuchos::ArrayView<const GlobalOrdinal> dofGids = map->getNodeElementList(); // std::sort(dofGids.begin(),dofGids.end()); // TODO: do we need this? // determine nStridedOffset size_t nStridedOffset = 0; for (int j = 0; j < map->getStridedBlockId(); j++) nStridedOffset += stridingInfo[j]; size_t numMyBlockDofs = (stridingInfo[stridedBlockId] * map->getNodeNumElements()) / map->getFixedBlockSize(); std::vector<GlobalOrdinal> subBlockDofGids(numMyBlockDofs); // TODO fill vector with dofs LocalOrdinal ind = 0; for (typename Teuchos::ArrayView< const GlobalOrdinal >::iterator it = dofGids.begin(); it!=dofGids.end(); ++it) if (map->GID2StridingBlockId(*it) == Teuchos::as<size_t>(stridedBlockId)) subBlockDofGids[ind++] = *it; const Teuchos::ArrayView<const LocalOrdinal> subBlockDofGids_view(&subBlockDofGids[0],subBlockDofGids.size()); return rcp(new StridedMap(map->lib(), Teuchos::OrdinalTraits<global_size_t>::invalid(), subBlockDofGids_view, map->getIndexBase(), stridingInfo, map->getComm(), stridedBlockId, map->getNode())); }
void RebalanceBlockRestrictionFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps>::Build(Level &fineLevel, Level &coarseLevel) const { FactoryMonitor m(*this, "Build", coarseLevel); //const Teuchos::ParameterList & pL = GetParameterList(); RCP<Teuchos::FancyOStream> out = Teuchos::fancyOStream(Teuchos::rcpFromRef(std::cout)); Teuchos::RCP<Matrix> originalTransferOp = Teuchos::null; originalTransferOp = Get< RCP<Matrix> >(coarseLevel, "R"); RCP<Xpetra::BlockedCrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps> > bOriginalTransferOp = Teuchos::rcp_dynamic_cast<Xpetra::BlockedCrsMatrix<Scalar, LocalOrdinal, GlobalOrdinal, Node, LocalMatOps> >(originalTransferOp); TEUCHOS_TEST_FOR_EXCEPTION(bOriginalTransferOp==Teuchos::null, Exceptions::BadCast, "MueLu::RebalanceBlockTransferFactory::Build: input matrix P or R is not of type BlockedCrsMatrix! error."); // plausibility check TEUCHOS_TEST_FOR_EXCEPTION(bOriginalTransferOp->Rows() != 2,Exceptions::RuntimeError, "MueLu::RebalanceBlockTransferFactory::Build: number of block rows of transfer operator is not equal 2. error."); TEUCHOS_TEST_FOR_EXCEPTION(bOriginalTransferOp->Cols() != 2,Exceptions::RuntimeError, "MueLu::RebalanceBlockTransferFactory::Build: number of block columns of transfer operator is not equal 2. error."); // rebuild rebalanced blocked P operator std::vector<GO> fullRangeMapVector; std::vector<GO> fullDomainMapVector; std::vector<RCP<const Map> > subBlockRRangeMaps; std::vector<RCP<const Map> > subBlockRDomainMaps; subBlockRRangeMaps.reserve(bOriginalTransferOp->Rows()); // reserve size for block P operators subBlockRDomainMaps.reserve(bOriginalTransferOp->Cols()); // reserve size for block P operators std::vector<Teuchos::RCP<Matrix> > subBlockRebR; subBlockRebR.reserve(bOriginalTransferOp->Cols()); int curBlockId = 0; Teuchos::RCP<const Import> rebalanceImporter = Teuchos::null; std::vector<Teuchos::RCP<const FactoryManagerBase> >::const_iterator it; for (it = FactManager_.begin(); it != FactManager_.end(); ++it) { // begin SubFactoryManager environment SetFactoryManager fineSFM (rcpFromRef(fineLevel), *it); SetFactoryManager coarseSFM(rcpFromRef(coarseLevel), *it); rebalanceImporter = coarseLevel.Get<Teuchos::RCP<const Import> >("Importer", (*it)->GetFactory("Importer").get()); // extract matrix block Teuchos::RCP<CrsMatrix> Rmii = bOriginalTransferOp->getMatrix(curBlockId, curBlockId); Teuchos::RCP<CrsMatrixWrap> Rwii = Teuchos::rcp(new CrsMatrixWrap(Rmii)); Teuchos::RCP<Matrix> Rii = Teuchos::rcp_dynamic_cast<Matrix>(Rwii); Teuchos::RCP<Matrix> rebRii; if(rebalanceImporter != Teuchos::null) { std::stringstream ss; ss << "Rebalancing restriction block R(" << curBlockId << "," << curBlockId << ")"; SubFactoryMonitor m1(*this, ss.str(), coarseLevel); { SubFactoryMonitor subM(*this, "Rebalancing restriction -- fusedImport", coarseLevel); // Note: The 3rd argument says to use originalR's domain map. RCP<Map> dummy; rebRii = MatrixFactory::Build(Rii,*rebalanceImporter,dummy,rebalanceImporter->getTargetMap()); } RCP<ParameterList> params = rcp(new ParameterList()); params->set("printLoadBalancingInfo", true); std::stringstream ss2; ss2 << "R(" << curBlockId << "," << curBlockId << ") rebalanced:"; GetOStream(Statistics0) << PerfUtils::PrintMatrixInfo(*rebRii, ss2.str(), params); } else { rebRii = Rii; RCP<ParameterList> params = rcp(new ParameterList()); params->set("printLoadBalancingInfo", true); std::stringstream ss2; ss2 << "R(" << curBlockId << "," << curBlockId << ") not rebalanced:"; GetOStream(Statistics0) << PerfUtils::PrintMatrixInfo(*rebRii, ss2.str(), params); } // fix striding information for rebalanced diagonal block rebRii RCP<const Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> > rgRMapExtractor = bOriginalTransferOp->getRangeMapExtractor(); // original map extractor Teuchos::RCP<const StridedMap> orig_stridedRgMap = Teuchos::rcp_dynamic_cast<const StridedMap>(rgRMapExtractor->getMap(Teuchos::as<size_t>(curBlockId))); Teuchos::RCP<const Map> stridedRgMap = Teuchos::null; if(orig_stridedRgMap != Teuchos::null) { std::vector<size_t> stridingData = orig_stridedRgMap->getStridingData(); Teuchos::ArrayView< const GlobalOrdinal > nodeRangeMapii = rebRii->getRangeMap()->getNodeElementList(); stridedRgMap = StridedMapFactory::Build( originalTransferOp->getRangeMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), nodeRangeMapii, rebRii->getRangeMap()->getIndexBase(), stridingData, originalTransferOp->getRangeMap()->getComm(), orig_stridedRgMap->getStridedBlockId(), orig_stridedRgMap->getOffset()); } RCP<const Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> > doRMapExtractor = bOriginalTransferOp->getDomainMapExtractor(); // original map extractor Teuchos::RCP<const StridedMap> orig_stridedDoMap = Teuchos::rcp_dynamic_cast<const StridedMap>(doRMapExtractor->getMap(Teuchos::as<size_t>(curBlockId))); Teuchos::RCP<const Map> stridedDoMap = Teuchos::null; if(orig_stridedDoMap != Teuchos::null) { std::vector<size_t> stridingData = orig_stridedDoMap->getStridingData(); Teuchos::ArrayView< const GlobalOrdinal > nodeDomainMapii = rebRii->getDomainMap()->getNodeElementList(); stridedDoMap = StridedMapFactory::Build( originalTransferOp->getDomainMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), nodeDomainMapii, rebRii->getDomainMap()->getIndexBase(), stridingData, originalTransferOp->getDomainMap()->getComm(), orig_stridedDoMap->getStridedBlockId(), orig_stridedDoMap->getOffset()); } TEUCHOS_TEST_FOR_EXCEPTION(stridedRgMap == Teuchos::null,Exceptions::RuntimeError, "MueLu::RebalanceBlockRestrictionFactory::Build: failed to generate striding information. error."); TEUCHOS_TEST_FOR_EXCEPTION(stridedDoMap == Teuchos::null,Exceptions::RuntimeError, "MueLu::RebalanceBlockRestrictionFactory::Build: failed to generate striding information. error."); // replace stridedMaps view in diagonal sub block if(rebRii->IsView("stridedMaps")) rebRii->RemoveView("stridedMaps"); rebRii->CreateView("stridedMaps", stridedRgMap, stridedDoMap); // store rebalanced subblock subBlockRebR.push_back(rebRii); // append strided row map (= range map) to list of range maps. Teuchos::RCP<const Map> rangeMapii = rebRii->getRowMap("stridedMaps"); //rebRii->getRangeMap(); subBlockRRangeMaps.push_back(rangeMapii); Teuchos::ArrayView< const GlobalOrdinal > nodeRangeMapii = rebRii->getRangeMap()->getNodeElementList(); fullRangeMapVector.insert(fullRangeMapVector.end(), nodeRangeMapii.begin(), nodeRangeMapii.end()); sort(fullRangeMapVector.begin(), fullRangeMapVector.end()); // append strided col map (= domain map) to list of range maps. Teuchos::RCP<const Map> domainMapii = rebRii->getColMap("stridedMaps"); //rebRii->getDomainMap(); subBlockRDomainMaps.push_back(domainMapii); Teuchos::ArrayView< const GlobalOrdinal > nodeDomainMapii = rebRii->getDomainMap()->getNodeElementList(); fullDomainMapVector.insert(fullDomainMapVector.end(), nodeDomainMapii.begin(), nodeDomainMapii.end()); sort(fullDomainMapVector.begin(), fullDomainMapVector.end()); //////////////////////////////////////////////////////////// // rebalance null space if(rebalanceImporter != Teuchos::null) { // rebalance null space std::stringstream ss2; ss2 << "Rebalancing nullspace block(" << curBlockId << "," << curBlockId << ")"; SubFactoryMonitor subM(*this, ss2.str(), coarseLevel); RCP<MultiVector> nullspace = coarseLevel.Get<RCP<MultiVector> >("Nullspace", (*it)->GetFactory("Nullspace").get()); RCP<MultiVector> permutedNullspace = MultiVectorFactory::Build(rebalanceImporter->getTargetMap(), nullspace->getNumVectors()); permutedNullspace->doImport(*nullspace, *rebalanceImporter, Xpetra::INSERT); // TODO think about this //if (pL.get<bool>("useSubcomm") == true) // TODO either useSubcomm is enabled everywhere or nowhere //permutedNullspace->replaceMap(permutedNullspace->getMap()->removeEmptyProcesses()); coarseLevel.Set<RCP<MultiVector> >("Nullspace", permutedNullspace, (*it)->GetFactory("Nullspace").get()); } // end rebalance null space else { // do nothing RCP<MultiVector> nullspace = coarseLevel.Get<RCP<MultiVector> >("Nullspace", (*it)->GetFactory("Nullspace").get()); coarseLevel.Set<RCP<MultiVector> >("Nullspace", nullspace, (*it)->GetFactory("Nullspace").get()); } //////////////////////////////////////////////////////////// curBlockId++; } // end for loop // extract map index base from maps of blocked P GO rangeIndexBase = originalTransferOp->getRangeMap()->getIndexBase(); GO domainIndexBase= originalTransferOp->getDomainMap()->getIndexBase(); // check this RCP<const Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> > rangeRMapExtractor = bOriginalTransferOp->getRangeMapExtractor(); // original map extractor Teuchos::ArrayView<GO> fullRangeMapGIDs(&fullRangeMapVector[0],fullRangeMapVector.size()); Teuchos::RCP<const StridedMap> stridedRgFullMap = Teuchos::rcp_dynamic_cast<const StridedMap>(rangeRMapExtractor->getFullMap()); Teuchos::RCP<const Map > fullRangeMap = Teuchos::null; if(stridedRgFullMap != Teuchos::null) { std::vector<size_t> stridedData = stridedRgFullMap->getStridingData(); fullRangeMap = StridedMapFactory::Build( originalTransferOp->getRangeMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullRangeMapGIDs, rangeIndexBase, stridedData, originalTransferOp->getRangeMap()->getComm(), stridedRgFullMap->getStridedBlockId(), stridedRgFullMap->getOffset()); } else { fullRangeMap = MapFactory::Build( originalTransferOp->getRangeMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullRangeMapGIDs, rangeIndexBase, originalTransferOp->getRangeMap()->getComm()); } RCP<const Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> > domainAMapExtractor = bOriginalTransferOp->getDomainMapExtractor(); Teuchos::ArrayView<GO> fullDomainMapGIDs(&fullDomainMapVector[0],fullDomainMapVector.size()); Teuchos::RCP<const StridedMap> stridedDoFullMap = Teuchos::rcp_dynamic_cast<const StridedMap>(domainAMapExtractor->getFullMap()); Teuchos::RCP<const Map > fullDomainMap = Teuchos::null; if(stridedDoFullMap != Teuchos::null) { TEUCHOS_TEST_FOR_EXCEPTION(stridedDoFullMap==Teuchos::null, Exceptions::BadCast, "MueLu::BlockedPFactory::Build: full map in domain map extractor has no striding information! error."); std::vector<size_t> stridedData2 = stridedDoFullMap->getStridingData(); fullDomainMap = StridedMapFactory::Build( originalTransferOp->getDomainMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullDomainMapGIDs, domainIndexBase, stridedData2, originalTransferOp->getDomainMap()->getComm(), stridedDoFullMap->getStridedBlockId(), stridedDoFullMap->getOffset()); } else { fullDomainMap = MapFactory::Build( originalTransferOp->getDomainMap()->lib(), Teuchos::OrdinalTraits<Xpetra::global_size_t>::invalid(), fullDomainMapGIDs, domainIndexBase, originalTransferOp->getDomainMap()->getComm()); } // build map extractors Teuchos::RCP<const Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> > rangeMapExtractor = Xpetra::MapExtractorFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node>::Build(fullRangeMap, subBlockRRangeMaps); Teuchos::RCP<const Xpetra::MapExtractor<Scalar, LocalOrdinal, GlobalOrdinal, Node> > domainMapExtractor = Xpetra::MapExtractorFactory<Scalar, LocalOrdinal, GlobalOrdinal, Node>::Build(fullDomainMap, subBlockRDomainMaps); Teuchos::RCP<BlockedCrsMatrix> bRebR = Teuchos::rcp(new BlockedCrsMatrix(rangeMapExtractor,domainMapExtractor,10)); for(size_t i = 0; i<subBlockRRangeMaps.size(); i++) { Teuchos::RCP<const CrsMatrixWrap> crsOpii = Teuchos::rcp_dynamic_cast<const CrsMatrixWrap>(subBlockRebR[i]); Teuchos::RCP<CrsMatrix> crsMatii = crsOpii->getCrsMatrix(); bRebR->setMatrix(i,i,crsMatii); } bRebR->fillComplete(); Set(coarseLevel, "R", Teuchos::rcp_dynamic_cast<Matrix>(bRebR)); // do nothing // TODO remove this! } // Build