void printBuckets(std::ostringstream& msg, BulkData& mesh) { const BucketVector & buckets = mesh.buckets(NODE_RANK); for (unsigned i=0; i < buckets.size(); i++) { const Bucket& bucket = *buckets[i]; msg << " bucket[" << i << "] = "; size_t bucket_size = bucket.size(); for (unsigned ie=0; ie < bucket_size; ie++) { msg << mesh.identifier(bucket[ie]) << ", "; } } }
stk::topology get_subcell_nodes(const BulkData& mesh, const Entity entity, EntityRank subcell_rank, unsigned subcell_identifier, EntityVector & subcell_nodes) { ThrowAssert(subcell_rank <= stk::topology::ELEMENT_RANK); subcell_nodes.clear(); // get cell topology stk::topology celltopology = mesh.bucket(entity).topology(); //error checking { //no celltopology defined if(celltopology == stk::topology::INVALID_TOPOLOGY) { return celltopology; } // valid ranks fall within the dimension of the cell topology const bool bad_rank = subcell_rank >= celltopology.dimension(); ThrowInvalidArgMsgIf( bad_rank, "subcell_rank is >= celltopology dimension\n"); // subcell_identifier must be less than the subcell count const bool bad_id = subcell_identifier >= celltopology.num_sub_topology(subcell_rank); ThrowInvalidArgMsgIf( bad_id, "subcell_id is >= subcell_count\n"); } // Get the cell topology of the subcell stk::topology subcell_topology = celltopology.sub_topology(subcell_rank, subcell_identifier); const int num_nodes_in_subcell = subcell_topology.num_nodes(); // For the subcell, get it's local nodes ids std::vector<unsigned> subcell_node_local_ids(num_nodes_in_subcell); celltopology.sub_topology_node_ordinals(subcell_rank, subcell_identifier, subcell_node_local_ids.begin()); Entity const *node_relations = mesh.begin_nodes(entity); subcell_nodes.reserve(num_nodes_in_subcell); for(int i = 0; i < num_nodes_in_subcell; ++i) { subcell_nodes.push_back(node_relations[subcell_node_local_ids[i]]); } return subcell_topology; }
void get_entities( const BulkData & mesh , EntityRank entity_rank , std::vector< Entity> & entities ) { const BucketVector & ks = mesh.buckets( entity_rank ); entities.clear(); size_t count = 0; const BucketVector::const_iterator ie = ks.end(); BucketVector::const_iterator ik = ks.begin(); for ( ; ik != ie ; ++ik ) { count += (*ik)->size(); } entities.reserve(count); ik = ks.begin(); for ( ; ik != ie ; ++ik ) { const Bucket & k = **ik ; size_t n = k.size(); for(size_t i = 0; i < n; ++i) { entities.push_back(k[i]); } } std::sort(entities.begin(), entities.end(), EntityLess(mesh)); }
void fillNumEntitiesPerRankOnThisProc(const BulkData & M, std::vector<size_t>&local, const Selector *selector) { const MetaData & S = MetaData::get(M); const EntityRank entity_rank_count = static_cast<EntityRank>(S.entity_rank_count()); size_t numEntityRanks = entity_rank_count; local.clear(); local.resize(numEntityRanks,0); Selector owns = S.locally_owned_part(); for(EntityRank i = stk::topology::NODE_RANK; i < numEntityRanks; ++i) { const BucketVector & ks = M.buckets(i); BucketVector::const_iterator ik; for(ik = ks.begin(); ik != ks.end(); ++ik) { if(selector && !(*selector)(**ik)) continue; if(owns(**ik)) { local[i] += (*ik)->size(); } } } }
static void checkBuckets( BulkData& mesh) { const BucketVector & buckets = mesh.buckets(NODE_RANK); for (unsigned i=0; i < buckets.size(); i++) { Bucket* bucket = buckets[i]; ASSERT_TRUE(bucket->assert_correct()); } }
void test_diffuse_field( BulkData & mesh , const Field<double,Cartesian> & arg_field , const ElementNodePointerField & arg_field_ptr , bool split_kernel ) { ElementMeanValueOp( arg_field , arg_field_ptr , mesh , split_kernel ); { const FieldBase * const f = & arg_field ; communicate_field_data( mesh , mesh.ghost_source() , mesh.ghost_destination() , std::vector<const FieldBase *>(1,f)); } NodeMeanValueOp( arg_field , arg_field , mesh , split_kernel ); }
static void checkBuckets( BulkData& mesh) { const std::vector<Bucket*> & buckets = mesh.buckets(0); for (unsigned i=0; i < buckets.size(); i++) { Bucket* bucket = buckets[i]; STKUNIT_ASSERT(bucket->assert_correct()); } }
bool Selector::is_empty(EntityRank entity_rank) const { if (m_expr.empty()) { return true; } BulkData * mesh = this->find_mesh(); ThrowRequireMsg(mesh != NULL, "ERROR, Selector::empty not available if selector expression does not involve any mesh Parts."); if (mesh->in_modifiable_state()) { BucketVector const& buckets = this->get_buckets(entity_rank); for(size_t i=0; i<buckets.size(); ++i) { if (buckets[i]->size() >0) { return false; } } return true; } return get_buckets(entity_rank).empty(); }
Entity declare_element_edge( BulkData & mesh, Entity elem, Entity edge, const unsigned local_edge_id, const stk::mesh::PartVector& parts) { verify_declare_element_edge(mesh, elem, local_edge_id); stk::topology elem_top = mesh.bucket(elem).topology(); stk::topology edge_top = elem_top.edge_topology(); return declare_element_to_entity(mesh, elem, edge, local_edge_id, parts, edge_top); }
void comm_mesh_counts( const BulkData & M , std::vector<size_t> & globalCounts, std::vector<size_t> & min_counts, std::vector<size_t> & max_counts, const Selector *selector) { std::vector<size_t> localEntityCounts; fillNumEntitiesPerRankOnThisProc(M, localEntityCounts, selector); size_t numEntityRanks = localEntityCounts.size(); globalCounts.resize(numEntityRanks, 0); min_counts.resize(numEntityRanks, 0); max_counts.resize(numEntityRanks, 0); all_reduce_sum(M.parallel(), &localEntityCounts[0], &globalCounts[0], numEntityRanks); all_reduce_min(M.parallel(), &localEntityCounts[0], &min_counts[0], numEntityRanks); all_reduce_max(M.parallel(), &localEntityCounts[0], &max_counts[0], numEntityRanks); return; }
bool is_sideset_equivalent_to_skin(BulkData &bulkData, stk::mesh::EntityVector &sidesetSides, const Part& skinnedPart, impl::SkinBoundaryErrorReporter &reporter) { stk::mesh::EntityVector skinnedSides = get_locally_owned_skinned_sides(bulkData, skinnedPart); stk::util::sort_and_unique(sidesetSides); stk::util::sort_and_unique(skinnedSides); bool result = stk::is_true_on_all_procs(bulkData.parallel(), sidesetSides == skinnedSides); if(!result) reporter.report(skinnedSides, sidesetSides, skinnedPart); return result; }
Entity declare_element_edge( BulkData & mesh, const stk::mesh::EntityId global_edge_id, Entity elem, const unsigned local_edge_id, const stk::mesh::PartVector &parts) { verify_declare_element_edge(mesh, elem, local_edge_id); stk::topology elem_top = mesh.bucket(elem).topology(); stk::topology edge_top = elem_top.edge_topology(); PartVector empty_parts; Entity edge = mesh.get_entity(edge_top.rank(), global_edge_id); if(!mesh.is_valid(edge)) { edge = mesh.declare_entity(edge_top.rank(), global_edge_id, empty_parts); // It seem like declare_element_edge should be called even if the edge is valid to make sure it is attached to the element. declare_element_edge(mesh, elem, edge, local_edge_id, parts); } return edge; }
Entity declare_element(BulkData & mesh, PartVector & parts, const EntityId elem_id, const EntityIdVector & node_ids) { MetaData & fem_meta = MetaData::get(mesh); stk::topology top = fem_meta.get_topology(*parts[0]); ThrowAssert(node_ids.size() >= top.num_nodes()); ThrowErrorMsgIf(top == stk::topology::INVALID_TOPOLOGY, "Part " << parts[0]->name() << " does not have a local topology"); PartVector empty; const EntityRank entity_rank = stk::topology::ELEMENT_RANK; Entity elem = mesh.declare_entity(entity_rank, elem_id, parts); const EntityRank node_rank = stk::topology::NODE_RANK; Permutation perm = stk::mesh::Permutation::INVALID_PERMUTATION; OrdinalVector ordinal_scratch; ordinal_scratch.reserve(64); PartVector part_scratch; part_scratch.reserve(64); for(unsigned i = 0; i < top.num_nodes(); ++i) { //declare node if it doesn't already exist Entity node = mesh.get_entity(node_rank, node_ids[i]); if(!mesh.is_valid(node)) { node = mesh.declare_entity(node_rank, node_ids[i], empty); } mesh.declare_relation(elem, node, i, perm, ordinal_scratch, part_scratch); } return elem; }
bool comm_mesh_counts( BulkData & M , std::vector<size_t> & counts , bool local_flag ) { const size_t zero = 0 ; // Count locally owned entities const FEMMetaData & S = FEMMetaData::get(M); const unsigned entity_rank_count = S.entity_rank_count(); const size_t comm_count = entity_rank_count + 1 ; std::vector<size_t> local( comm_count , zero ); std::vector<size_t> global( comm_count , zero ); ParallelMachine comm = M.parallel(); Part & owns = S.locally_owned_part(); for ( unsigned i = 0 ; i < entity_rank_count ; ++i ) { const std::vector<Bucket*> & ks = M.buckets( i ); std::vector<Bucket*>::const_iterator ik ; for ( ik = ks.begin() ; ik != ks.end() ; ++ik ) { if ( has_superset( **ik , owns ) ) { local[i] += (*ik)->size(); } } } local[ entity_rank_count ] = local_flag ; stk::all_reduce_sum( comm , & local[0] , & global[0] , comm_count ); counts.assign( global.begin() , global.begin() + entity_rank_count ); return 0 < global[ entity_rank_count ] ; }
void skin_mesh( BulkData & mesh, Selector const& element_selector, PartVector const& skin_parts, const Selector * secondary_selector) { ThrowErrorMsgIf( mesh.in_modifiable_state(), "mesh is not SYNCHRONIZED" ); Selector *air = nullptr; Selector tmp; if(secondary_selector != nullptr) { tmp = !(*secondary_selector); air = &tmp; } stk::mesh::ElemElemGraph elem_elem_graph(mesh, element_selector, air); elem_elem_graph.skin_mesh(skin_parts); }
void centroid_algorithm( const BulkData & bulkData , const VectorFieldType & elem_centroid , const ElementNodePointerFieldType & elem_node_coord , Part & elem_part, EntityRank element_rank ) { // Use the "homogeneous subset" concept (see the Domain Model document) // for field data storage. A "homogeneous subset" is called // a 'Bucket'. // Iterate the set of element buckets: const std::vector<Bucket*> & buckets = bulkData.buckets( element_rank ); for ( std::vector<Bucket*>::const_iterator k = buckets.begin() ; k != buckets.end() ; ++k ) { Bucket & bucket = **k ; // If this bucket is a subset of the given elem_part // then want to compute on it. if ( has_superset( bucket, elem_part ) ) { // Number of elements in the bucket: const unsigned size = bucket.size(); // Aggressive "gather" field data for the elements // in the bucket. // double * node_ptr[ nodes_per_element * number_of_elements ] double ** node_ptr = field_data( elem_node_coord , bucket.begin() ); // Element centroid field data // double elem_ptr[ SpatialDim * number_of_elements ] double * elem_ptr = field_data( elem_centroid , bucket.begin() ); // Call an element function to calculate centroid for // contiguous arrays of element field data. centroid< ElementTraits >( size , elem_ptr , node_ptr ); } } }
void unpack_entity_info( CommBuffer & buf, const BulkData & mesh , EntityKey & key , unsigned & owner , PartVector & parts , std::vector<Relation> & relations ) { unsigned nparts = 0 ; unsigned nrel = 0 ; buf.unpack<EntityKey>( key ); buf.unpack<unsigned>( owner ); buf.unpack<unsigned>( nparts ); parts.resize( nparts ); for ( unsigned i = 0 ; i < nparts ; ++i ) { unsigned part_ordinal = ~0u ; buf.unpack<unsigned>( part_ordinal ); parts[i] = & MetaData::get(mesh).get_part( part_ordinal ); } buf.unpack( nrel ); relations.clear(); relations.reserve( nrel ); for ( unsigned i = 0 ; i < nrel ; ++i ) { EntityKey rel_key ; unsigned rel_id = 0 ; unsigned rel_attr = 0 ; buf.unpack<EntityKey>( rel_key ); buf.unpack<unsigned>( rel_id ); buf.unpack<unsigned>( rel_attr ); Entity * const entity = mesh.get_entity( entity_rank(rel_key), entity_id(rel_key) ); if ( entity && EntityLogDeleted != entity->log_query() ) { Relation rel( * entity, rel_id ); rel.set_attribute(rel_attr); relations.push_back( rel ); } } }
void count_entities( const Selector & selector , const BulkData & mesh , std::vector< unsigned > & count ) { const size_t nranks = MetaData::get(mesh).entity_rank_count(); count.resize( nranks ); for ( size_t i = 0 ; i < nranks ; ++i ) { count[i] = 0 ; const BucketVector & ks = mesh.buckets( static_cast<EntityRank>(i) ); BucketVector::const_iterator ik ; for ( ik = ks.begin() ; ik != ks.end() ; ++ik ) { if ( selector(**ik) ) { count[i] += (*ik)->size(); } } } }
/** * Thinking in terms of a 3D grid of elements, get the elements in the * (x, y, z) position. Return NULL if this process doesn't know about this * element. */ Entity elem( size_t x , size_t y , size_t z ) const { return m_bulk_data.get_entity( stk::topology::ELEMENT_RANK, elem_id(x, y, z) ); }
void assert_is_destroyed(const BulkData& mesh, const Entity entity ) { ASSERT_TRUE( !mesh.is_valid(entity) || mesh.bucket(entity).capacity() == 0 ); }
Entity create_entity( EntityRank rank, Part& part_membership) { PartVector part_intersection; part_intersection.push_back ( &part_membership ); return bulk.declare_entity(rank, nextEntityId(), part_intersection); }
/** Copy data for the given fields, from owned entities to shared-but-not-owned entities. * I.e., shared-but-not-owned entities get an update of the field-data from the owned entity. */ inline void copy_owned_to_shared( const BulkData& mesh, const std::vector< const FieldBase *> & fields ) { communicate_field_data(*mesh.ghostings()[0], fields); }
void compute_memory_usage(const BulkData& bulk, MemoryUsage& mem_usage) { mem_usage.entity_rank_names = bulk.mesh_meta_data().entity_rank_names(); const FieldVector& fields = bulk.mesh_meta_data().get_fields(); mem_usage.num_fields = fields.size(); mem_usage.field_bytes = fields.size()*sizeof(FieldBase); for(size_t i=0; i<fields.size(); ++i) { mem_usage.field_bytes += fields[i]->name().length(); mem_usage.field_bytes += sizeof(FieldRestriction)*fields[i]->restrictions().size(); } const PartVector& parts = bulk.mesh_meta_data().get_parts(); mem_usage.num_parts = parts.size(); mem_usage.part_bytes = parts.size()*sizeof(Part); for(size_t i=0; i<parts.size(); ++i) { mem_usage.part_bytes += parts[i]->name().length(); mem_usage.part_bytes += sizeof(Part*) * parts[i]->supersets().size(); mem_usage.part_bytes += sizeof(Part*) * parts[i]->subsets().size(); } size_t total_bytes = mem_usage.field_bytes + mem_usage.part_bytes; mem_usage.entity_counts.clear(); mem_usage.downward_relation_counts.clear(); mem_usage.upward_relation_counts.clear(); mem_usage.bucket_counts.clear(); mem_usage.bucket_bytes.clear(); Selector all = bulk.mesh_meta_data().universal_part(); count_entities(all, bulk, mem_usage.entity_counts); size_t nranks = mem_usage.entity_counts.size(); mem_usage.downward_relation_counts.resize(nranks, 0); mem_usage.upward_relation_counts.resize(nranks, 0); mem_usage.bucket_counts.resize(nranks, 0); mem_usage.bucket_bytes.resize(nranks, 0); std::vector<Entity> entities; for(size_t i=0; i<nranks; ++i) { EntityRank rank_i = static_cast<EntityRank>(i); total_bytes += mem_usage.entity_counts[rank_i]*sizeof(Entity); get_entities(bulk, rank_i, entities); for(size_t n=0; n<entities.size(); ++n) { Entity entity = entities[n]; for(EntityRank r=stk::topology::NODE_RANK; r<rank_i; ++r) { unsigned num_rels = bulk.num_connectivity(entity, r); mem_usage.downward_relation_counts[r] += num_rels; ThrowErrorMsg("stk::mesh::compute_memory_usage need to be largely re-written for the new Connectivity scheme but is not needed for this 4.27.7."); } for(EntityRank r=static_cast<EntityRank>(rank_i+1); r<nranks; ++r) { unsigned num_rels = bulk.num_connectivity(entity, r); mem_usage.upward_relation_counts[r] += num_rels; ThrowErrorMsg("stk::mesh::compute_memory_usage need to be largely re-written for the new Connectivity scheme but is not needed for this 4.27.7."); } } const BucketVector& buckets = bulk.buckets(rank_i); mem_usage.bucket_counts[rank_i] = buckets.size(); for(size_t b=0; b<buckets.size(); ++b) { Bucket& bucket = *buckets[b]; mem_usage.bucket_bytes[rank_i] += bucket.allocation_size(); total_bytes += bucket.allocation_size(); } } mem_usage.total_bytes = total_bytes; }
BucketVectorEntityIteratorRange get_entities( EntityRank entity_rank, const BulkData& mesh ) { const std::vector<Bucket*>& buckets = mesh.buckets(entity_rank); return get_entity_range(buckets); }
void create_edges( BulkData & mesh ) { create_edges(mesh, mesh.mesh_meta_data().universal_part(), 0 ); }
void create_edges( BulkData & mesh, const Selector & element_selector, Part * part_to_insert_new_edges ) { // static size_t next_edge = static_cast<size_t>(mesh.parallel_rank()+1) << 32; // NOTE: This is a workaround to eliminate some bad behavior with the equation above when // the #proc is a power of two. The 256 below is the bin size of the Distributed Index. static size_t next_edge = (static_cast<size_t>(mesh.parallel_rank()+1) << 32) + 256 * mesh.parallel_rank(); mesh.modification_begin(); { { edge_map_type edge_map; //populate the edge_map with existing edges { BucketVector const & edge_buckets = mesh.buckets(stk::topology::EDGE_RANK); for (size_t i=0, ie=edge_buckets.size(); i<ie; ++i) { Bucket &b = *edge_buckets[i]; const unsigned num_nodes = b.topology().num_nodes(); EntityVector edge_nodes(num_nodes); for (size_t j=0, je=b.size(); j<je; ++j) { Entity edge = b[j]; Entity const *nodes_rel = b.begin_nodes(j); for (unsigned n=0; n<num_nodes; ++n) { edge_nodes[n] = nodes_rel[n]; } edge_map[edge_nodes] = edge; } } } // create edges and connect them to elements { BucketVector const& element_buckets = mesh.get_buckets(stk::topology::ELEMENT_RANK, element_selector & mesh.mesh_meta_data().locally_owned_part()); //create the edges for the elements in each bucket for (size_t i=0, e=element_buckets.size(); i<e; ++i) { Bucket &b = *element_buckets[i]; create_edge_impl functor( next_edge, edge_map, b, part_to_insert_new_edges); stk::topology::apply_functor< create_edge_impl > apply(functor); apply( b.topology() ); } } // connect existing faces to edges if (mesh.mesh_meta_data().spatial_dimension() == 3u) { BucketVector const& face_buckets = mesh.get_buckets(stk::topology::FACE_RANK, element_selector & (mesh.mesh_meta_data().locally_owned_part() | mesh.mesh_meta_data().globally_shared_part())); //create the edges for the faces in each bucket for (size_t i=0, e=face_buckets.size(); i<e; ++i) { Bucket &b = *face_buckets[i]; connect_face_impl functor(edge_map, b); stk::topology::apply_functor< connect_face_impl > apply(functor); apply( b.topology() ); } } } } mesh.modification_end( BulkData::MOD_END_COMPRESS_AND_SORT ); }
void communicate_field_data( const BulkData & mesh , const unsigned field_count , const FieldBase * fields[] , CommAll & sparse ) { const std::vector<Entity*> & entity_comm = mesh.entity_comm(); const unsigned parallel_size = mesh.parallel_size(); // Sizing for send and receive const unsigned zero = 0 ; std::vector<unsigned> msg_size( parallel_size , zero ); size_t j = 0; for ( j = 0 ; j < field_count ; ++j ) { const FieldBase & f = * fields[j] ; for ( std::vector<Entity*>::const_iterator i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) { Entity & e = **i ; const unsigned size = field_data_size( f , e ); if ( size ) { for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) { msg_size[ ec->proc ] += size ; } } } } // Allocate send and receive buffers: { const unsigned * const s_size = & msg_size[0] ; sparse.allocate_buffers( mesh.parallel(), parallel_size / 4 , s_size, s_size); } // Pack for send: for ( j = 0 ; j < field_count ; ++j ) { const FieldBase & f = * fields[j] ; for ( std::vector<Entity*>::const_iterator i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) { Entity & e = **i ; const unsigned size = field_data_size( f , e ); if ( size ) { unsigned char * ptr = reinterpret_cast<unsigned char *>(field_data( f , e )); for ( PairIterEntityComm ec = e.comm() ; ! ec.empty() && ec->ghost_id == 0 ; ++ec ) { CommBuffer & b = sparse.send_buffer( ec->proc ); b.pack<unsigned char>( ptr , size ); } } } } // Communicate: sparse.communicate(); }
Entity declare_element_to_entity(BulkData & mesh, Entity elem, Entity entity, const unsigned relationOrdinal, const PartVector& parts, stk::topology entity_top) { stk::topology elem_top = mesh.bucket(elem).topology(); std::vector<unsigned> entity_node_ordinals(entity_top.num_nodes()); elem_top.sub_topology_node_ordinals(mesh.entity_rank(entity), relationOrdinal, entity_node_ordinals.begin()); const stk::mesh::Entity *elem_nodes = mesh.begin_nodes(elem); EntityVector entity_top_nodes(entity_top.num_nodes()); elem_top.sub_topology_nodes(elem_nodes, mesh.entity_rank(entity), relationOrdinal, entity_top_nodes.begin()); Permutation perm = mesh.find_permutation(elem_top, elem_nodes, entity_top, &entity_top_nodes[0], relationOrdinal); OrdinalVector ordinal_scratch; ordinal_scratch.reserve(64); PartVector part_scratch; part_scratch.reserve(64); if(!parts.empty()) { mesh.change_entity_parts(entity, parts); } const stk::mesh::ConnectivityOrdinal *side_ordinals = mesh.begin_ordinals(elem, mesh.entity_rank(entity)); unsigned num_sides = mesh.count_valid_connectivity(elem, mesh.entity_rank(entity)); bool elem_to_side_exists = false; for(unsigned i = 0; i < num_sides; ++i) { if(side_ordinals[i] == relationOrdinal) { elem_to_side_exists = true; break; } } if(!elem_to_side_exists) { mesh.declare_relation(elem, entity, relationOrdinal, perm, ordinal_scratch, part_scratch); } const unsigned num_side_nodes = mesh.count_valid_connectivity(entity, stk::topology::NODE_RANK); if(0 == num_side_nodes) { Permutation node_perm = stk::mesh::Permutation::INVALID_PERMUTATION; Entity const *elem_nodes_local = mesh.begin_nodes(elem); for(unsigned i = 0; i < entity_top.num_nodes(); ++i) { Entity node = elem_nodes_local[entity_node_ordinals[i]]; mesh.declare_relation(entity, node, i, node_perm, ordinal_scratch, part_scratch); } } else { ThrowAssertMsg(num_side_nodes == entity_top.num_nodes(), "declare_element_to_entity: " << mesh.entity_key(entity) << " already exists with different number of nodes."); } return entity; }
bool centroid_algorithm_unit_test_dimensions( const BulkData & bulkData , const VectorFieldType & elem_centroid , const ElementNodePointerFieldType & elem_node_coord , Part & elem_part, EntityRank element_rank ) { bool result = true; // Use the "homogeneous subset" concept (see the Domain Model document) // for field data storage. A "homogeneous subset" is called // a 'Bucket'. // Iterate the set of element buckets: const std::vector<Bucket*> & buckets = bulkData.buckets( element_rank ); for ( std::vector<Bucket*>::const_iterator k = buckets.begin() ; k != buckets.end() ; ++k ) { Bucket & bucket = **k ; // If this bucket is a subset of the given elem_part // then want to compute on it. if ( has_superset( bucket, elem_part ) ) { // Number of elements in the bucket: const unsigned size = bucket.size(); // Unit testing the dimension feature { BucketArray< ElementNodePointerFieldType > array( elem_node_coord, bucket.begin(), bucket.end() ); const unsigned n1 = array.template dimension<0>(); const unsigned n2 = array.template dimension<1>(); if ( n1 != ElementTraits::node_count ) { std::cerr << "Error! n1 == " << n1 << " != " << ElementTraits::node_count << " == ElementTraits::node_count" << std::endl; result = false; } if ( n2 != size ) { std::cerr << "Error! n2 == " << n2 << " != " << size << " == size" << std::endl; result = false; } if ( (unsigned) array.size() != n1 * n2 ) { std::cerr << "Error! array.size() == " << array.size() << " != " << n1*n2 << " == n1*n2" << std::endl; result = false; } } { BucketArray< VectorFieldType > array( elem_centroid , bucket.begin(), bucket.end() ); const unsigned n1 = array.template dimension<0>(); const unsigned n2 = array.template dimension<1>(); if ( n1 != (unsigned) SpatialDim ) { std::cerr << "Error! n1 == " << n1 << " != " << SpatialDim << " == SpatialDim" << std::endl; result = false; } if ( n2 != size ) { std::cerr << "Error! n2 == " << n2 << " != " << size << " == size" << std::endl; result = false; } if ( (unsigned) array.size() != n1 * n2 ) { std::cerr << "Error! array.size() == " << array.size() << " != " << n1*n2 << " == n1*n2" << std::endl; result = false; } } } } return result; }
/** * Thinking in terms of a 3D grid of nodes, get the node in the (x, y, z) * position. Return NULL if this process doesn't know about this node. */ Entity node( size_t x , size_t y , size_t z ) const { return m_bulk_data.get_entity( stk::topology::NODE_RANK , node_id(x, y, z) ); }