void arena::free_arena () { __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" ); poison_value( my_guard ); intptr_t drained = 0; for ( unsigned i = 1; i <= my_num_slots; ++i ) drained += mailbox(i).drain(); #if __TBB_TASK_PRIORITY && TBB_USE_ASSERT for ( intptr_t i = 0; i < num_priority_levels; ++i ) __TBB_ASSERT(my_task_stream[i].empty() && my_task_stream[i].drain()==0, "Not all enqueued tasks were executed"); #elif !__TBB_TASK_PRIORITY __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed"); #endif /* !__TBB_TASK_PRIORITY */ #if __TBB_COUNT_TASK_NODES my_market->update_task_node_count( -drained ); #endif /* __TBB_COUNT_TASK_NODES */ my_market->release(); #if __TBB_TASK_GROUP_CONTEXT __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" ); my_master_default_ctx->~task_group_context(); NFS_Free(my_master_default_ctx); #endif /* __TBB_TASK_GROUP_CONTEXT */ #if __TBB_STATISTICS for( unsigned i = 0; i < my_num_slots; ++i ) NFS_Free( my_slots[i].my_counters ); #endif /* __TBB_STATISTICS */ void* storage = &mailbox(my_num_slots); __TBB_ASSERT( my_num_threads_active == 0, NULL ); __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL ); this->~arena(); #if TBB_USE_ASSERT > 1 memset( storage, 0, allocation_size(my_max_num_workers) ); #endif /* TBB_USE_ASSERT */ NFS_Free( storage ); }
void market::destroy () { #if __TBB_COUNT_TASK_NODES if ( my_task_node_count ) runtime_warning( "Leaked %ld task objects\n", (intptr_t)my_task_node_count ); #endif /* __TBB_COUNT_TASK_NODES */ this->~market(); NFS_Free( this ); __TBB_InitOnce::remove_ref(); }
void concurrent_vector_base::helper::extend_segment( concurrent_vector_base& v ) { const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64; segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_segment, sizeof(segment_t), NULL ); std::memset( static_cast<void*>(s), 0, pointers_per_long_segment*sizeof(segment_t) ); // If other threads are trying to set pointers in the short segment, wait for them to finish their // assignments before we copy the short segment to the long segment. atomic_backoff backoff; while( !v.my_storage[0].array || !v.my_storage[1].array ) backoff.pause(); s[0] = v.my_storage[0]; s[1] = v.my_storage[1]; if( v.my_segment.compare_and_swap( s, v.my_storage )!=v.my_storage ) NFS_Free(s); }
void concurrent_vector_base::internal_clear( internal_array_op1 destroy, bool reclaim_storage ) { // Set "my_early_size" early, so that subscripting errors can be caught. // FIXME - doing so may be hurting exception saftey __TBB_ASSERT( my_segment, NULL ); size_type finish = my_early_size; my_early_size = 0; while( finish>0 ) { segment_index_t k_old = segment_index_of(finish-1); segment_t& s = my_segment[k_old]; __TBB_ASSERT( s.array, NULL ); size_type base = segment_base(k_old); size_type j_end = finish-base; __TBB_ASSERT( j_end, NULL ); (*destroy)( s.array, j_end ); finish = base; } // Free the arrays if ( reclaim_storage ) { size_t k = helper::find_segment_end(*this); while( k>0 ) { --k; segment_t& s = my_segment[k]; void* array = s.array; s.array = NULL; NFS_Free( array ); } // Clear short segment. my_storage[0].array = NULL; my_storage[1].array = NULL; segment_t* s = my_segment; if ( s!=my_storage ) { my_segment = my_storage; NFS_Free( s ); } } }
void arena::free_arena () { __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" ); poison_value( my_guard ); intptr_t drained = 0; for ( unsigned i = 1; i <= my_num_slots; ++i ) drained += mailbox(i).drain(); __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed"); #if __TBB_COUNT_TASK_NODES my_market->update_task_node_count( -drained ); #endif /* __TBB_COUNT_TASK_NODES */ my_market->release(); #if __TBB_TASK_GROUP_CONTEXT __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" ); my_master_default_ctx->~task_group_context(); NFS_Free(my_master_default_ctx); #endif /* __TBB_TASK_GROUP_CONTEXT */ #if __TBB_STATISTICS for( unsigned i = 0; i < my_num_slots; ++i ) NFS_Free( slot[i].my_counters ); #endif /* __TBB_STATISTICS */ void* storage = &mailbox(my_num_slots); this->~arena(); NFS_Free( storage ); }
//------------------------------------------------------------------------ // Methods of affinity_partitioner_base_v3 //------------------------------------------------------------------------ void affinity_partitioner_base_v3::resize( unsigned factor ) { // Check factor to avoid asking for number of workers while there might be no arena. size_t new_size = factor ? factor*(governor::max_number_of_workers()+1) : 0; if( new_size!=my_size ) { if( my_array ) { NFS_Free( my_array ); // Following two assignments must be done here for sake of exception safety. my_array = NULL; my_size = 0; } if( new_size ) { my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL )); memset( my_array, 0, sizeof(affinity_id)*new_size ); my_size = new_size; } } }
void arena::free_arena () { // Drain mailboxes // TODO: each scheduler should plug-and-drain its own mailbox when it terminates. intptr_t drain_count = 0; for( unsigned i=1; i<=prefix().number_of_slots; ++i ) drain_count += mailbox(i).drain(); #if __TBB_COUNT_TASK_NODES prefix().task_node_count -= drain_count; if( prefix().task_node_count ) { runtime_warning( "Leaked %ld task objects\n", long(prefix().task_node_count) ); } #endif /* __TBB_COUNT_TASK_NODES */ void* storage = &mailbox(prefix().number_of_slots); delete[] prefix().worker_list; prefix().~ArenaPrefix(); NFS_Free( storage ); }