Exemple #1
0
task_group_context::~task_group_context () {
    if ( my_kind != isolated ) {
        generic_scheduler *s = (generic_scheduler*)my_owner;
        if ( governor::is_set(s) ) {
            // Local update of the context list 
            uintptr_t local_count_snapshot = s->local_cancel_count;
            s->local_ctx_list_update = 1;
            __TBB_full_memory_fence();
            if ( s->nonlocal_ctx_list_update ) {
                spin_mutex::scoped_lock lock(s->context_list_mutex);
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                s->local_ctx_list_update = 0;
            }
            else {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                __TBB_store_with_release( s->local_ctx_list_update, 0 );
                if ( local_count_snapshot != global_cancel_count ) {
                    // Another thread was propagating cancellation request when we removed
                    // ourselves from the list. We must ensure that it is not accessing us 
                    // when this destructor finishes. We'll be able to acquire the lock 
                    // below only after the other thread finishes with us.
                    spin_mutex::scoped_lock lock(s->context_list_mutex);
                }
            }
        }
        else {
            // Nonlocal update of the context list 
            if ( __TBB_FetchAndStoreW(&my_kind, dying) == detached ) {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
            }
            else {
                __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, 1);
                spin_wait_until_eq( s->local_ctx_list_update, 0u );
                s->context_list_mutex.lock();
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                s->context_list_mutex.unlock();
                __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, -1);
            }
        }
    }
#if TBB_USE_DEBUG
    my_version_and_traits = 0xDeadBeef;
#endif /* TBB_USE_DEBUG */
    if ( my_exception )
        my_exception->destroy();
    if (itt_caller != ITT_CALLER_NULL) ITT_STACK(caller_destroy, itt_caller);
}
task_group_context::~task_group_context () {
    if ( __TBB_load_relaxed(my_kind) == binding_completed ) {
        if ( governor::is_set(my_owner) ) {
            // Local update of the context list
            uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
            my_owner->my_local_ctx_list_update.store<relaxed>(1);
            // Prevent load of nonlocal update flag from being hoisted before the
            // store to local update flag.
            atomic_fence();
            if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
                spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                my_owner->my_local_ctx_list_update.store<relaxed>(0);
            }
            else {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                // Release fence is necessary so that update of our neighbors in
                // the context list was committed when possible concurrent destroyer
                // proceeds after local update flag is reset by the following store.
                my_owner->my_local_ctx_list_update.store<release>(0);
                if ( local_count_snapshot != the_context_state_propagation_epoch ) {
                    // Another thread was propagating cancellation request when we removed
                    // ourselves from the list. We must ensure that it is not accessing us
                    // when this destructor finishes. We'll be able to acquire the lock
                    // below only after the other thread finishes with us.
                    spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
                }
            }
        }
        else {
            // Nonlocal update of the context list
            // Synchronizes with generic_scheduler::cleanup_local_context_list()
            // TODO: evaluate and perhaps relax, or add some lock instead
            if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
            }
            else {
                //TODO: evaluate and perhaps relax
                my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
                //TODO: evaluate and perhaps remove
                spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
                my_owner->my_context_list_mutex.lock();
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                my_owner->my_context_list_mutex.unlock();
                //TODO: evaluate and perhaps relax
                my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
            }
        }
    }
#if __TBB_FP_CONTEXT
    internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
#endif
    poison_value(my_version_and_traits);
    if ( my_exception )
        my_exception->destroy();
    ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
}