void PartImpl::set_intersection_of( const PartVector & pv ) { TraceIfWatching("stk_classic::mesh::impl::PartImpl::set_intersection_of", LOG_PART, m_universe_ordinal); DiagIfWatching(LOG_PART, m_universe_ordinal, "Intersection: " << pv ); m_intersect = pv ; }
FieldBaseImpl::FieldBaseImpl( MetaData * arg_mesh_meta_data , unsigned arg_ordinal , const std::string & arg_name , const DataTraits & arg_traits , unsigned arg_rank, const shards::ArrayDimTag * const * arg_dim_tags, unsigned arg_number_of_states , FieldState arg_this_state ) : m_name( arg_name ), m_attribute(), m_data_traits( arg_traits ), m_meta_data( arg_mesh_meta_data ), m_ordinal( arg_ordinal ), m_num_states( arg_number_of_states ), m_this_state( arg_this_state ), m_field_rank( arg_rank ), m_dim_map(), m_selector_restrictions(), m_initial_value(NULL), m_initial_value_num_bytes(0) { TraceIfWatching("stk::mesh::impl::FieldBaseImpl::FieldBaseImpl", LOG_FIELD, m_ordinal); FieldBase * const pzero = NULL ; const shards::ArrayDimTag * const dzero = NULL ; Copy<MaximumFieldStates>( m_field_states , pzero ); Copy<MaximumFieldDimension>( m_dim_tags , dzero ); for ( unsigned i = 0 ; i < arg_rank ; ++i ) { m_dim_tags[i] = arg_dim_tags[i]; } }
void PartImpl::add_part_to_superset( Part & part ) { TraceIfWatching("stk_classic::mesh::impl::PartImpl::add_part_to_superset", LOG_PART, m_universe_ordinal); DiagIfWatching(LOG_PART, m_universe_ordinal, "New superset is: " << part ); insert( m_supersets, part ); }
bool EntityRepository::destroy_relation( Entity & e_from, Entity & e_to, const RelationIdentifier local_id ) { TraceIfWatching("stk::mesh::impl::EntityRepository::destroy_relation", LOG_ENTITY, e_from.key()); bool caused_change_fwd = e_from.m_entityImpl.destroy_relation(e_to, local_id); // Relationships should always be symmetrical if ( caused_change_fwd ) { bool caused_change_inv = e_to.m_entityImpl.destroy_relation(e_from, local_id); ThrowErrorMsgIf( !caused_change_inv, " Internal error - could not destroy inverse relation of " << print_entity_key( e_from ) << " to " << print_entity_key( e_to ) << " with local relation id of " << local_id); } // It is critical that the modification be done AFTER the relations are // changed so that the propagation can happen correctly. if ( caused_change_fwd ) { e_to.m_entityImpl.log_modified_and_propagate(); e_from.m_entityImpl.log_modified_and_propagate(); } return caused_change_fwd; }
void FieldBaseImpl::verify_and_clean_restrictions( const char * arg_method , const Part& superset, const Part& subset, const PartVector & arg_all_parts ) { TraceIfWatching("stk::mesh::impl::FieldBaseImpl::verify_and_clean_restrictions", LOG_FIELD, m_ordinal); FieldRestrictionVector & restrs = restrictions(); //Check whether both 'superset' and 'subset' are in this field's restrictions. //If they are, make sure they are compatible and remove the subset restriction. FieldRestrictionVector::iterator superset_restriction = restrs.end(); FieldRestrictionVector::iterator subset_restriction = restrs.end(); for (FieldRestrictionVector::iterator i = restrs.begin() ; i != restrs.end() ; ++i ) { if (i->part_ordinal() == superset.mesh_meta_data_ordinal()) { superset_restriction = i; if (subset_restriction != restrs.end() && subset_restriction->entity_rank() == superset_restriction->entity_rank()) break; } if (i->part_ordinal() == subset.mesh_meta_data_ordinal()) { subset_restriction = i; if (superset_restriction != restrs.end() && subset_restriction->entity_rank() == superset_restriction->entity_rank()) break; } } if (superset_restriction != restrs.end() && subset_restriction != restrs.end() && superset_restriction->entity_rank() == subset_restriction->entity_rank()) { ThrowErrorMsgIf( superset_restriction->not_equal_stride(*subset_restriction), "Incompatible field restrictions for parts "<<superset.name()<<" and "<<subset.name()); restrs.erase(subset_restriction); } }
std::pair<Entity*,bool> EntityRepository::internal_create_entity( const EntityKey & key ) { TraceIfWatching("stk::mesh::impl::EntityRepository::internal_create_entity", LOG_ENTITY, key); EntityMap::value_type tmp(key,NULL); const std::pair< EntityMap::iterator , bool > insert_result = m_entities.insert( tmp ); std::pair<Entity*,bool> result( insert_result.first->second , insert_result.second ); if ( insert_result.second ) { // A new entity Entity* new_entity = internal_allocate_entity(key); insert_result.first->second = result.first = new_entity; } else if ( EntityLogDeleted == result.first->log_query() ) { // resurrection result.first->m_entityImpl.log_resurrect(); result.second = true; } return result ; }
void PartImpl::add_relation( PartRelation relation ) { TraceIfWatching("stk_classic::mesh::impl::PartImpl::add_relation", LOG_PART, m_universe_ordinal); DiagIfWatching(LOG_PART, m_universe_ordinal, "New relation from: " << relation.m_root << ", to: " << relation.m_target ); m_relations.push_back(relation); }
void EntityRepository::declare_relation( Entity & e_from, Entity & e_to, const RelationIdentifier local_id, unsigned sync_count ) { TraceIfWatching("stk::mesh::impl::EntityRepository::declare_relation", LOG_ENTITY, e_from.key()); bool caused_change_fwd = e_from.m_entityImpl.declare_relation( e_to, local_id, sync_count); // Relationships should always be symmetrical if ( caused_change_fwd ) { // the setup for the converse relationship works slightly differently bool is_converse = true; bool caused_change_inv = e_to.m_entityImpl.declare_relation( e_from, local_id, sync_count, is_converse ); ThrowErrorMsgIf( !caused_change_inv, " Internal error - could not create inverse relation of " << print_entity_key( e_from ) << " to " << print_entity_key( e_to )); } // It is critical that the modification be done AFTER the relations are // added so that the propagation can happen correctly. if ( caused_change_fwd ) { e_to.m_entityImpl.log_modified_and_propagate(); e_from.m_entityImpl.log_modified_and_propagate(); } }
void FieldBaseImpl::set_field_states( FieldBase ** field_states) { TraceIfWatching("stk::mesh::impl::FieldBaseImpl::set_field_states", LOG_FIELD, m_ordinal); for (unsigned i = 0; i < m_num_states; ++i) { m_field_states[i] = field_states[i]; } }
void EntityRepository::destroy_later( Entity & e, Bucket* nil_bucket ) { TraceIfWatching("stk::mesh::impl::EntityRepository::destroy_later", LOG_ENTITY, e.key()); ThrowErrorMsgIf( e.log_query() == EntityLogDeleted, "double deletion of entity: " << print_entity_key( e )); change_entity_bucket( *nil_bucket, e, 0); e.m_entityImpl.log_deleted(); //important that this come last }
void EntityRepository::change_entity_bucket( Bucket & b, Entity & e, unsigned ordinal) { TraceIfWatching("stk::mesh::impl::EntityRepository::change_entity_bucket", LOG_ENTITY, e.key()); DiagIfWatching(LOG_ENTITY, e.key(), "New bucket: " << b << ", ordinal: " << ordinal); const bool modified_parts = ! e.m_entityImpl.is_bucket_valid() || ! b.equivalent( e.bucket() ); if ( modified_parts ) { e.m_entityImpl.log_modified_and_propagate(); } e.m_entityImpl.set_bucket_and_ordinal( &b, ordinal); }
void PartImpl::set_primary_entity_rank( EntityRank entity_rank ) { TraceIfWatching("stk_classic::mesh::impl::PartImpl::set_primary_entity_rank", LOG_PART, m_universe_ordinal); if ( entity_rank == m_entity_rank ) return; const bool rank_already_set = m_entity_rank != InvalidEntityRank && entity_rank != m_entity_rank; //const bool has_subsets = m_subsets.size() > 0; //ThrowErrorMsgIf( has_subsets, " Error: Part '" << m_name << "' has subsets"); if ( entity_rank == InvalidEntityRank ) return; ThrowErrorMsgIf( rank_already_set, " Error: Different entity rank has already been set on Part"); m_entity_rank = entity_rank; }
Entity & BulkData::declare_entity( EntityRank ent_rank , EntityId ent_id , const PartVector & parts ) { require_ok_to_modify(); require_good_rank_and_id(ent_rank, ent_id); EntityKey key( ent_rank , ent_id ); TraceIfWatching("stk_classic::mesh::BulkData::declare_entity", LOG_ENTITY, key); DiagIfWatching(LOG_ENTITY, key, "declaring entity with parts " << parts); std::pair< Entity * , bool > result = m_entity_repo.internal_create_entity( key ); Entity* declared_entity = result.first; if ( result.second ) { // A new application-created entity m_entity_repo.set_entity_owner_rank( *declared_entity, m_parallel_rank); m_entity_repo.set_entity_sync_count( *declared_entity, m_sync_count); DiagIfWatching(LOG_ENTITY, key, "new entity: " << *declared_entity); } else { // An existing entity, the owner must match. require_entity_owner( *declared_entity , m_parallel_rank ); DiagIfWatching(LOG_ENTITY, key, "existing entity: " << *declared_entity); } //------------------------------ Part * const owns = & m_mesh_meta_data.locally_owned_part(); std::vector<Part*> rem ; std::vector<Part*> add( parts ); add.push_back( owns ); change_entity_parts( *declared_entity , add , rem ); // m_transaction_log.insert_entity ( *(result.first) ); return *declared_entity ; }
void EntityRepository::internal_expunge_entity( EntityMap::iterator i ) { TraceIfWatching("stk::mesh::impl::EntityRepository::internal_expunge_entity", LOG_ENTITY, i->first); ThrowErrorMsgIf( i->second == NULL, "For key " << entity_rank(i->first) << " " << entity_id(i->first) << ", value was NULL"); ThrowErrorMsgIf( i->first != i->second->key(), "Key " << print_entity_key(MetaData::get( *i->second ), i->first) << " != " << print_entity_key(i->second)); Entity* deleted_entity = i->second; #ifdef SIERRA_MIGRATION destroy_fmwk_attr(deleted_entity->m_fmwk_attrs, m_use_pool); #endif destroy_entity(deleted_entity, m_use_pool); i->second = NULL; m_entities.erase( i ); }
void EntityRepository::log_created_parallel_copy( Entity & entity ) { TraceIfWatching("stk::mesh::impl::EntityRepository::log_created_parallel_copy", LOG_ENTITY, entity.key()); entity.m_entityImpl.log_created_parallel_copy(); }
bool EntityRepository::erase_ghosting( Entity & e, const Ghosting & ghosts) const { TraceIfWatching("stk::mesh::impl::EntityRepository::erase_ghosting", LOG_ENTITY, e.key()); return e.m_entityImpl.erase( ghosts ); }
void FieldBaseImpl::insert_restriction( const char * arg_method , EntityRank arg_entity_rank , const Selector & arg_selector , const unsigned * arg_stride, const void* arg_init_value ) { TraceIfWatching("stk::mesh::impl::FieldBaseImpl::insert_restriction", LOG_FIELD, m_ordinal); FieldRestriction tmp( arg_entity_rank , arg_selector ); { unsigned i = 0 ; if ( m_field_rank ) { for ( i = 0 ; i < m_field_rank ; ++i ) { tmp.stride(i) = arg_stride[i] ; } } else { // Scalar field is 0 == m_field_rank i = 1 ; tmp.stride(0) = 1 ; } // Remaining dimensions are 1, no change to stride for ( ; i < MaximumFieldDimension ; ++i ) { tmp.stride(i) = tmp.stride(i-1) ; } for ( i = 1 ; i < m_field_rank ; ++i ) { const bool bad_stride = 0 == tmp.stride(i) || 0 != tmp.stride(i) % tmp.stride(i-1); ThrowErrorMsgIf( bad_stride, arg_method << " FAILED for " << *this << " WITH BAD STRIDE!"); } } if (arg_init_value != NULL) { //insert_restriction can be called multiple times for the same field, giving //the field different lengths on different mesh-parts. //We will only store one initial-value array, we need to store the one with //maximum length for this field so that it can be used to initialize data //for all field-restrictions. For the parts on which the field is shorter, //a subset of the initial-value array will be used. // //We want to end up storing the longest arg_init_value array for this field. // //Thus, we call set_initial_value only if the current length is longer //than what's already been stored. //length in bytes is num-scalars X sizeof-scalar: size_t num_scalars = 1; //if rank > 0, then field is not a scalar field, so num-scalars is //obtained from the stride array: if (m_field_rank > 0) num_scalars = tmp.stride(m_field_rank-1); size_t sizeof_scalar = m_data_traits.size_of; size_t nbytes = sizeof_scalar * num_scalars; size_t old_nbytes = 0; if (get_initial_value() != NULL) { old_nbytes = get_initial_value_num_bytes(); } if (nbytes > old_nbytes) { set_initial_value(arg_init_value, num_scalars, nbytes); } } { FieldRestrictionVector & srvec = selector_restrictions(); bool restriction_already_exists = false; for(FieldRestrictionVector::const_iterator it=srvec.begin(), it_end=srvec.end(); it!=it_end; ++it) { if (tmp == *it) { restriction_already_exists = true; if (tmp.not_equal_stride(*it)) { ThrowErrorMsg("Incompatible selector field-restrictions!"); } } } if ( !restriction_already_exists ) { // New field restriction, verify we are not committed: ThrowRequireMsg(!m_meta_data->is_commit(), "mesh MetaData has been committed."); srvec.push_back( tmp ); } } }
void FieldBaseImpl::insert_restriction( const char * arg_method , EntityRank arg_entity_rank , const Part & arg_part , const unsigned * arg_stride, const void* arg_init_value ) { TraceIfWatching("stk::mesh::impl::FieldBaseImpl::insert_restriction", LOG_FIELD, m_ordinal); FieldRestriction tmp( arg_entity_rank , arg_part.mesh_meta_data_ordinal() ); { unsigned i = 0 ; if ( m_field_rank ) { for ( i = 0 ; i < m_field_rank ; ++i ) { tmp.stride(i) = arg_stride[i] ; } } else { // Scalar field is 0 == m_field_rank i = 1 ; tmp.stride(0) = 1 ; } // Remaining dimensions are 1, no change to stride for ( ; i < MaximumFieldDimension ; ++i ) { tmp.stride(i) = tmp.stride(i-1) ; } for ( i = 1 ; i < m_field_rank ; ++i ) { const bool bad_stride = 0 == tmp.stride(i) || 0 != tmp.stride(i) % tmp.stride(i-1); ThrowErrorMsgIf( bad_stride, arg_method << " FAILED for " << *this << " WITH BAD STRIDE " << print_restriction( tmp, arg_entity_rank, arg_part, m_field_rank ));; } } if (arg_init_value != NULL) { //insert_restriction can be called multiple times for the same field, giving //the field different lengths on different mesh-parts. //We will only store one initial-value array, we need to store the one with //maximum length for this field so that it can be used to initialize data //for all field-restrictions. For the parts on which the field is shorter, //a subset of the initial-value array will be used. // //We want to end up storing the longest arg_init_value array for this field. // //Thus, we call set_initial_value only if the current length is longer //than what's already been stored. //length in bytes is num-scalars X sizeof-scalar: size_t num_scalars = 1; //if rank > 0, then field is not a scalar field, so num-scalars is //obtained from the stride array: if (m_field_rank > 0) num_scalars = tmp.stride(m_field_rank-1); size_t sizeof_scalar = m_data_traits.size_of; size_t nbytes = sizeof_scalar * num_scalars; size_t old_nbytes = 0; if (get_initial_value() != NULL) { old_nbytes = get_initial_value_num_bytes(); } if (nbytes > old_nbytes) { set_initial_value(arg_init_value, num_scalars, nbytes); } } { FieldRestrictionVector & restrs = restrictions(); FieldRestrictionVector::iterator restr = restrs.begin(); FieldRestrictionVector::iterator last_restriction = restrs.end(); restr = std::lower_bound(restr,last_restriction,tmp); const bool new_restriction = ( ( restr == last_restriction ) || !(*restr == tmp) ); if ( new_restriction ) { // New field restriction, verify we are not committed: ThrowRequireMsg(!m_meta_data->is_commit(), "mesh MetaData has been committed."); unsigned num_subsets = 0; for(FieldRestrictionVector::iterator i=restrs.begin(), iend=restrs.end(); i!=iend; ++i) { if (i->entity_rank() != arg_entity_rank) continue; const Part& partI = *m_meta_data->get_parts()[i->part_ordinal()]; bool found_subset = contain(arg_part.subsets(), partI); if (found_subset) { ThrowErrorMsgIf( i->not_equal_stride(tmp), arg_method << " FAILED for " << *this << " " << print_restriction( *i, arg_entity_rank, arg_part, m_field_rank ) << " WITH INCOMPATIBLE REDECLARATION " << print_restriction( tmp, arg_entity_rank, arg_part, m_field_rank )); *i = tmp; ++num_subsets; } bool found_superset = contain(arg_part.supersets(), partI); if (found_superset) { ThrowErrorMsgIf( i->not_equal_stride(tmp), arg_method << " FAILED for " << *this << " " << print_restriction( *i, arg_entity_rank, arg_part, m_field_rank ) << " WITH INCOMPATIBLE REDECLARATION " << print_restriction( tmp, arg_entity_rank, arg_part, m_field_rank )); //if there's already a restriction for a superset of this part, then //there's nothing to do and we're out of here.. return; } } if (num_subsets == 0) { restrs.insert( restr , tmp ); } else { //if subsets were found, we replaced them with the new restriction. so now we need //to sort and unique the vector, and trim it to remove any duplicates: std::sort(restrs.begin(), restrs.end()); FieldRestrictionVector::iterator it = std::unique(restrs.begin(), restrs.end()); restrs.resize(it - restrs.begin()); } } else { ThrowErrorMsgIf( restr->not_equal_stride(tmp), arg_method << " FAILED for " << *this << " " << print_restriction( *restr, arg_entity_rank, arg_part, m_field_rank ) << " WITH INCOMPATIBLE REDECLARATION " << print_restriction( tmp, arg_entity_rank, arg_part, m_field_rank )); } } }
bool BulkData::destroy_entity( Entity * & entity_in ) { Entity & entity = *entity_in ; TraceIfWatching("stk_classic::mesh::BulkData::destroy_entity", LOG_ENTITY, entity.key()); DiagIfWatching(LOG_ENTITY, entity.key(), "entity state: " << entity); require_ok_to_modify( ); bool has_upward_relation = false ; for ( PairIterRelation irel = entity.relations() ; ! irel.empty() && ! has_upward_relation ; ++irel ) { has_upward_relation = entity.entity_rank() <= irel->entity_rank(); } if ( has_upward_relation ) { return false ; } if ( EntityLogDeleted == entity.log_query() ) { // Cannot already be destroyed. return false ; } //------------------------------ // Immediately remove it from relations and buckets. // Postpone deletion until modification_end to be sure that // 1) No attempt is made to re-create it. // 2) Parallel index is cleaned up. // 3) Parallel sharing is cleaned up. // 4) Parallel ghosting is cleaned up. // // Must clean up the parallel lists before fully deleting the entity. // It is important that relations be destroyed in reverse order so that // the higher (back) relations are destroyed first. while ( ! entity.relations().empty() ) { destroy_relation( entity , * entity.relations().back().entity(), entity.relations().back().identifier()); } // We need to save these items and call remove_entity AFTER the call to // destroy_later because remove_entity may destroy the bucket // which would cause problems in m_entity_repo.destroy_later because it // makes references to the entity's original bucket. Bucket& orig_bucket = entity.bucket(); unsigned orig_bucket_ordinal = entity.bucket_ordinal(); // Set the bucket to 'bucket_nil' which: // 1) has no parts at all // 2) has no field data // 3) has zero capacity // // This keeps the entity-bucket methods from catastrophically failing // with a bad bucket pointer. m_entity_repo.destroy_later( entity, m_bucket_repository.get_nil_bucket() ); m_bucket_repository.remove_entity( &orig_bucket , orig_bucket_ordinal ); // Add destroyed entity to the transaction // m_transaction_log.delete_entity ( *entity_in ); // Set the calling entity-pointer to NULL; // hopefully the user-code will clean up any outstanding // references to this entity. entity_in = NULL ; return true ; }
void BulkData::internal_change_entity_parts( Entity & entity , const OrdinalVector & add_parts , const OrdinalVector & remove_parts, bool always_propagate_internal_changes ) { TraceIfWatching("stk_classic::mesh::BulkData::internal_change_entity_parts", LOG_ENTITY, entity.key()); DiagIfWatching(LOG_ENTITY, entity.key(), "entity state: " << entity); DiagIfWatching(LOG_ENTITY, entity.key(), "add_parts: " << add_parts); DiagIfWatching(LOG_ENTITY, entity.key(), "remove_parts: " << remove_parts); Bucket * const k_old = m_entity_repo.get_entity_bucket( entity ); const unsigned i_old = entity.bucket_ordinal() ; if ( k_old && k_old->member_all( add_parts ) && ! k_old->member_any( remove_parts ) ) { // Is already a member of all add_parts, // is not a member of any remove_parts, // thus nothing to do. return ; } OrdinalVector parts_removed ; OrdinalVector parts_total ; // The final part list //-------------------------------- if ( k_old ) { // Keep any of the existing bucket's parts // that are not a remove part. // This will include the 'intersection' parts. // // These parts are properly ordered and unique. const std::pair<const unsigned *, const unsigned*> bucket_parts = k_old->superset_part_ordinals(); const unsigned * parts_begin = bucket_parts.first; const unsigned * parts_end = bucket_parts.second; const unsigned num_bucket_parts = parts_end - parts_begin; parts_total.reserve( num_bucket_parts + add_parts.size() ); parts_total.insert( parts_total.begin(), parts_begin , parts_end); if ( !remove_parts.empty() ) { parts_removed.reserve(remove_parts.size()); filter_out( parts_total , remove_parts , parts_removed ); } } else { parts_total.reserve(add_parts.size()); } if ( !add_parts.empty() ) { merge_in( parts_total , add_parts ); } if ( parts_total.empty() ) { // Always a member of the universal part. const unsigned univ_ord = m_mesh_meta_data.universal_part().mesh_meta_data_ordinal(); parts_total.push_back( univ_ord ); } //-------------------------------- // Move the entity to the new bucket. Bucket * k_new = m_bucket_repository.declare_bucket( entity.entity_rank(), parts_total.size(), & parts_total[0] , m_mesh_meta_data.get_fields() ); // If changing buckets then copy its field values from old to new bucket if ( k_old ) { m_bucket_repository.copy_fields( *k_new , k_new->size() , *k_old , i_old ); } else { m_bucket_repository.initialize_fields( *k_new , k_new->size() ); } // Set the new bucket m_entity_repo.change_entity_bucket( *k_new, entity, k_new->size() ); m_bucket_repository.add_entity_to_bucket( entity, *k_new ); // If changing buckets then remove the entity from the bucket, if ( k_old && k_old->capacity() > 0) { m_bucket_repository.remove_entity( k_old , i_old ); } // Update the change counter to the current cycle. m_entity_repo.set_entity_sync_count( entity, m_sync_count ); // Propagate part changes through the entity's relations. //(Only propagate part changes for parts which have a primary-entity-rank that matches // the entity's rank. Other parts don't get induced...) const PartVector& all_parts = m_mesh_meta_data.get_parts(); OrdinalVector rank_parts_removed; for(OrdinalVector::const_iterator pr=parts_removed.begin(), prend=parts_removed.end(); pr!=prend; ++pr) { if (all_parts[*pr]->primary_entity_rank() == entity.entity_rank()) { rank_parts_removed.push_back(*pr); } } if (always_propagate_internal_changes || !rank_parts_removed.empty() || !m_mesh_meta_data.get_field_relations().empty()) { internal_propagate_part_changes( entity , rank_parts_removed ); } #ifndef NDEBUG //ensure_part_superset_consistency( entity ); #endif }
bool EntityRepository::insert_comm_info( Entity & e, const EntityCommInfo & comm_info) const { TraceIfWatching("stk::mesh::impl::EntityRepository::insert_comm_info", LOG_ENTITY, e.key()); return e.m_entityImpl.insert( comm_info ); }