コード例 #1
0
void task_group_context::bind_to ( generic_scheduler *local_sched ) {
    __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" );
    __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" );
    my_parent = local_sched->my_innermost_running_task->prefix().context;
#if __TBB_FP_CONTEXT
    // Inherit FPU settings only if the context has not captured FPU settings yet.
    if ( !(my_version_and_traits & fp_settings) )
        copy_fp_settings(*my_parent);
#endif

    // Condition below prevents unnecessary thrashing parent context's cache line
    if ( !(my_parent->my_state & may_have_children) )
        my_parent->my_state |= may_have_children; // full fence is below
    if ( my_parent->my_parent ) {
        // Even if this context were made accessible for state change propagation
        // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)
        // above), it still could be missed if state propagation from a grand-ancestor
        // was underway concurrently with binding.
        // Speculative propagation from the parent together with epoch counters
        // detecting possibility of such a race allow to avoid taking locks when
        // there is no contention.

        // Acquire fence is necessary to prevent reordering subsequent speculative
        // loads of parent state data out of the scope where epoch counters comparison
        // can reliably validate it.
        uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
        // Speculative propagation of parent's state. The speculation will be
        // validated by the epoch counters check further on.
        my_cancellation_requested = my_parent->my_cancellation_requested;
#if __TBB_TASK_PRIORITY
        my_priority = my_parent->my_priority;
#endif /* __TBB_TASK_PRIORITY */
        register_with( local_sched ); // Issues full fence

        // If no state propagation was detected by the following condition, the above
        // full fence guarantees that the parent had correct state during speculative
        // propagation before the fence. Otherwise the propagation from parent is
        // repeated under the lock.
        if ( local_count_snapshot != the_context_state_propagation_epoch ) {
            // Another thread may be propagating state change right now. So resort to lock.
            context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
            my_cancellation_requested = my_parent->my_cancellation_requested;
#if __TBB_TASK_PRIORITY
            my_priority = my_parent->my_priority;
#endif /* __TBB_TASK_PRIORITY */
        }
    }
    else {
        register_with( local_sched ); // Issues full fence
        // As we do not have grand-ancestors, concurrent state propagation (if any)
        // may originate only from the parent context, and thus it is safe to directly
        // copy the state from it.
        my_cancellation_requested = my_parent->my_cancellation_requested;
#if __TBB_TASK_PRIORITY
        my_priority = my_parent->my_priority;
#endif /* __TBB_TASK_PRIORITY */
    }
    __TBB_store_relaxed(my_kind, binding_completed);
}
コード例 #2
0
void concurrent_monitor::notify_one_relaxed() {
    if( waitset_ec.empty() )
        return;
    waitset_node_t* n;
    const waitset_node_t* end = waitset_ec.end();
    {
        tbb::spin_mutex::scoped_lock l( mutex_ec );
        __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 );
        n = waitset_ec.front();
        if( n!=end ) {
            waitset_ec.remove( *n );
            to_thread_context(n)->in_waitset = false;
        }
    }
    if( n!=end )
        to_thread_context(n)->semaphore().V();
}
コード例 #3
0
void concurrent_monitor::prepare_wait( thread_context& thr, uintptr_t ctx ) {
    if( !thr.ready )
        thr.init();
    // this is good place to pump previous spurious wakeup
    else if( thr.spurious ) {
        thr.spurious = false;
        thr.semaphore().P();
    }
    thr.context = ctx;
    thr.in_waitset = true;
    {
        tbb::spin_mutex::scoped_lock l( mutex_ec );
        __TBB_store_relaxed( thr.epoch, __TBB_load_relaxed(epoch) );
        waitset_ec.add( (waitset_t::node_t*)&thr );
    }
    atomic_fence();
}
コード例 #4
0
//------------------------------------------------------------------------
// Methods of allocate_root_with_context_proxy
//------------------------------------------------------------------------
task& allocate_root_with_context_proxy::allocate( size_t size ) const {
    internal::generic_scheduler* s = governor::local_scheduler();
    __TBB_ASSERT( s, "Scheduler auto-initialization failed?" );
    task& t = s->allocate_task( size, NULL, &my_context );
    // Supported usage model prohibits concurrent initial binding. Thus we do not
    // need interlocked operations or fences to manipulate with my_context.my_kind
    if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::binding_required ) {
        // If we are in the outermost task dispatch loop of a master thread, then
        // there is nothing to bind this context to, and we skip the binding part
        // treating the context as isolated.
        if ( s->my_innermost_running_task == s->my_dummy_task )
            __TBB_store_relaxed(my_context.my_kind, task_group_context::isolated);
        else
            my_context.bind_to( s );
    }
    ITT_STACK_CREATE(my_context.itt_caller);
    return t;
}
コード例 #5
0
void concurrent_monitor::notify_all_relaxed() {
    if( waitset_ec.empty() )
        return;
    waitset_t temp;
    const waitset_node_t* end;
    {
        tbb::spin_mutex::scoped_lock l( mutex_ec );
        __TBB_store_relaxed( epoch, __TBB_load_relaxed(epoch) + 1 );
        waitset_ec.flush_to( temp );
        end = temp.end();
        for( waitset_node_t* n=temp.front(); n!=end; n=n->next )
            to_thread_context(n)->in_waitset = false;
    }
    waitset_node_t* nxt;
    for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) {
        nxt = n->next;
        to_thread_context(n)->semaphore().V();
    }
#if TBB_USE_ASSERT
    temp.clear();
#endif
}
コード例 #6
0
ファイル: task.cpp プロジェクト: ElaraFX/tbb
//------------------------------------------------------------------------
// Methods of allocate_root_with_context_proxy
//------------------------------------------------------------------------
task& allocate_root_with_context_proxy::allocate( size_t size ) const {
    internal::generic_scheduler* s = governor::local_scheduler();
    __TBB_ASSERT( s, "Scheduler auto-initialization failed?" );
    __TBB_ASSERT( &my_context, "allocate_root(context) argument is a dereferenced NULL pointer" );
    task& t = s->allocate_task( size, NULL, &my_context );
    // Supported usage model prohibits concurrent initial binding. Thus we do not
    // need interlocked operations or fences to manipulate with my_context.my_kind
    if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::binding_required ) {
        // If we are in the outermost task dispatch loop of a master thread, then
        // there is nothing to bind this context to, and we skip the binding part
        // treating the context as isolated.
        if ( s->master_outermost_level() )
            __TBB_store_relaxed(my_context.my_kind, task_group_context::isolated);
        else
            my_context.bind_to( s );
    }
#if __TBB_FP_CONTEXT
    if ( __TBB_load_relaxed(my_context.my_kind) == task_group_context::isolated &&
            !(my_context.my_version_and_traits & task_group_context::fp_settings) )
        my_context.copy_fp_settings( *s->my_arena->my_default_ctx );
#endif
    ITT_STACK_CREATE(my_context.itt_caller);
    return t;
}