Пример #1
0
void arena::free_arena () {
    __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" );
    poison_value( my_guard );
    intptr_t drained = 0;
    for ( unsigned i = 1; i <= my_num_slots; ++i )
        drained += mailbox(i).drain();
#if __TBB_TASK_PRIORITY && TBB_USE_ASSERT
    for ( intptr_t i = 0; i < num_priority_levels; ++i )
        __TBB_ASSERT(my_task_stream[i].empty() && my_task_stream[i].drain()==0, "Not all enqueued tasks were executed");
#elif !__TBB_TASK_PRIORITY
    __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed");
#endif /* !__TBB_TASK_PRIORITY */
#if __TBB_COUNT_TASK_NODES
    my_market->update_task_node_count( -drained );
#endif /* __TBB_COUNT_TASK_NODES */
    my_market->release();
#if __TBB_TASK_GROUP_CONTEXT
    __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" );
    my_master_default_ctx->~task_group_context();
    NFS_Free(my_master_default_ctx);
#endif /* __TBB_TASK_GROUP_CONTEXT */
#if __TBB_STATISTICS
    for( unsigned i = 0; i < my_num_slots; ++i )
        NFS_Free( my_slots[i].my_counters );
#endif /* __TBB_STATISTICS */
    void* storage  = &mailbox(my_num_slots);
    __TBB_ASSERT( my_num_threads_active == 0, NULL );
    __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL );
    this->~arena();
#if TBB_USE_ASSERT > 1
    memset( storage, 0, allocation_size(my_max_num_workers) );
#endif /* TBB_USE_ASSERT */
    NFS_Free( storage );
}
Пример #2
0
observer_proxy::~observer_proxy () {
    __TBB_ASSERT( !my_ref_count, "Attempt to destroy proxy still in use" );
    poison_value(my_ref_count);
    poison_pointer(my_prev);
    poison_pointer(my_next);
    --observer_proxy_count;
}
Пример #3
0
void
poison_mem(void *v, size_t len)
{
	uint32_t *ip = v;
	size_t i;
	uint32_t poison;

	poison = poison_value(v);

	if (len > POISON_SIZE)
		len = POISON_SIZE;
	len = len / sizeof(*ip);
	for (i = 0; i < len; i++)
		ip[i] = poison;
}
Пример #4
0
int
poison_check(void *v, size_t len, size_t *pidx, uint32_t *pval)
{
	uint32_t *ip = v;
	size_t i;
	uint32_t poison;

	poison = poison_value(v);

	if (len > POISON_SIZE)
		len = POISON_SIZE;
	len = len / sizeof(*ip);
	for (i = 0; i < len; i++) {
		if (ip[i] != poison) {
			*pidx = i;
			*pval = poison;
			return 1;
		}
	}
	return 0;
}
Пример #5
0
void arena::free_arena () {
    __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" );
    poison_value( my_guard );
    intptr_t drained = 0;
    for ( unsigned i = 1; i <= my_num_slots; ++i )
        drained += mailbox(i).drain();
    __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed");
#if __TBB_COUNT_TASK_NODES
    my_market->update_task_node_count( -drained );
#endif /* __TBB_COUNT_TASK_NODES */
    my_market->release();
#if __TBB_TASK_GROUP_CONTEXT
    __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" );
    my_master_default_ctx->~task_group_context();
    NFS_Free(my_master_default_ctx);
#endif /* __TBB_TASK_GROUP_CONTEXT */
#if __TBB_STATISTICS
    for( unsigned i = 0; i < my_num_slots; ++i )
        NFS_Free( slot[i].my_counters );
#endif /* __TBB_STATISTICS */
    void* storage  = &mailbox(my_num_slots);
    this->~arena();
    NFS_Free( storage );
}
Пример #6
0
task_group_context::~task_group_context () {
    if ( __TBB_load_relaxed(my_kind) == binding_completed ) {
        if ( governor::is_set(my_owner) ) {
            // Local update of the context list
            uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
            my_owner->my_local_ctx_list_update.store<relaxed>(1);
            // Prevent load of nonlocal update flag from being hoisted before the
            // store to local update flag.
            atomic_fence();
            if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
                spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                my_owner->my_local_ctx_list_update.store<relaxed>(0);
            }
            else {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                // Release fence is necessary so that update of our neighbors in
                // the context list was committed when possible concurrent destroyer
                // proceeds after local update flag is reset by the following store.
                my_owner->my_local_ctx_list_update.store<release>(0);
                if ( local_count_snapshot != the_context_state_propagation_epoch ) {
                    // Another thread was propagating cancellation request when we removed
                    // ourselves from the list. We must ensure that it is not accessing us
                    // when this destructor finishes. We'll be able to acquire the lock
                    // below only after the other thread finishes with us.
                    spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
                }
            }
        }
        else {
            // Nonlocal update of the context list
            // Synchronizes with generic_scheduler::cleanup_local_context_list()
            // TODO: evaluate and perhaps relax, or add some lock instead
            if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
            }
            else {
                //TODO: evaluate and perhaps relax
                my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
                //TODO: evaluate and perhaps remove
                spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
                my_owner->my_context_list_mutex.lock();
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                my_owner->my_context_list_mutex.unlock();
                //TODO: evaluate and perhaps relax
                my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
            }
        }
    }
#if __TBB_FP_CONTEXT
    internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
#endif
    poison_value(my_version_and_traits);
    if ( my_exception )
        my_exception->destroy();
    ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
}