Пример #1
0
 /*override*/Buffer* get_buffer( void* ) {
     unsigned long next_input;
     unsigned free_buffer = 0; 
     { // lock protected scope
         tbb::spin_mutex::scoped_lock lock(input_lock);
         if( current_token>=StreamSize )
             return NULL;
         next_input = current_token++; 
         // once in a while, emulate waiting for input; this only makes sense for serial input
         if( is_serial() && WaitTest.required() )
             WaitTest.probe( );
         while( free_buffer<MaxBuffer )
             if( __TBB_load_with_acquire(buffer[free_buffer].is_busy) )
                 ++free_buffer;
             else {
                 buffer[free_buffer].is_busy = true;
                 break;
             }
     }
     ASSERT( free_buffer<my_number_of_tokens, "premature reuse of buffer" );
     Buffer* b = &buffer[free_buffer]; 
     ASSERT( &buffer[0] <= b, NULL ); 
     ASSERT( b <= &buffer[MaxBuffer-1], NULL ); 
     ASSERT( b->id == Buffer::unused, NULL);
     b->id = next_input;
     ASSERT( b->sequence_number == Buffer::unused, NULL);
     return b;
 }
Пример #2
0
void task_group_context::bind_to ( generic_scheduler *local_sched ) {
    __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" );
    __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" );
    my_parent = local_sched->my_innermost_running_task->prefix().context;
#if __TBB_FP_CONTEXT
    // Inherit FPU settings only if the context has not captured FPU settings yet.
    if ( !(my_version_and_traits & fp_settings) )
        copy_fp_settings(*my_parent);
#endif

    // Condition below prevents unnecessary thrashing parent context's cache line
    if ( !(my_parent->my_state & may_have_children) )
        my_parent->my_state |= may_have_children; // full fence is below
    if ( my_parent->my_parent ) {
        // Even if this context were made accessible for state change propagation
        // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)
        // above), it still could be missed if state propagation from a grand-ancestor
        // was underway concurrently with binding.
        // Speculative propagation from the parent together with epoch counters
        // detecting possibility of such a race allow to avoid taking locks when
        // there is no contention.

        // Acquire fence is necessary to prevent reordering subsequent speculative
        // loads of parent state data out of the scope where epoch counters comparison
        // can reliably validate it.
        uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
        // Speculative propagation of parent's state. The speculation will be
        // validated by the epoch counters check further on.
        my_cancellation_requested = my_parent->my_cancellation_requested;
#if __TBB_TASK_PRIORITY
        my_priority = my_parent->my_priority;
#endif /* __TBB_TASK_PRIORITY */
        register_with( local_sched ); // Issues full fence

        // If no state propagation was detected by the following condition, the above
        // full fence guarantees that the parent had correct state during speculative
        // propagation before the fence. Otherwise the propagation from parent is
        // repeated under the lock.
        if ( local_count_snapshot != the_context_state_propagation_epoch ) {
            // Another thread may be propagating state change right now. So resort to lock.
            context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
            my_cancellation_requested = my_parent->my_cancellation_requested;
#if __TBB_TASK_PRIORITY
            my_priority = my_parent->my_priority;
#endif /* __TBB_TASK_PRIORITY */
        }
    }
    else {
        register_with( local_sched ); // Issues full fence
        // As we do not have grand-ancestors, concurrent state propagation (if any)
        // may originate only from the parent context, and thus it is safe to directly
        // copy the state from it.
        my_cancellation_requested = my_parent->my_cancellation_requested;
#if __TBB_TASK_PRIORITY
        my_priority = my_parent->my_priority;
#endif /* __TBB_TASK_PRIORITY */
    }
    __TBB_store_relaxed(my_kind, binding_completed);
}
Пример #3
0
void market::wait_workers () {
    // usable for this kind of scheduler only
    __TBB_ASSERT(governor::needsWaitWorkers(), NULL);
    // wait till terminating last worker decresed my_ref_count
    while (__TBB_load_with_acquire(my_ref_count) > 1)
        __TBB_Yield();
    __TBB_ASSERT(1 == my_ref_count, NULL);
    release();
}
Пример #4
0
void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
    spin_mutex::scoped_lock lock(my_context_list_mutex);
    // Acquire fence is necessary to ensure that the subsequent node->my_next load
    // returned the correct value in case it was just inserted in another thread.
    // The fence also ensures visibility of the correct my_parent value.
    context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next);
    while ( node != &my_context_list_head ) {
        task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
        if ( ctx.*mptr_state != new_state )
            ctx.propagate_task_group_state( mptr_state, src, new_state );
        node = node->my_next;
        __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Local context list contains destroyed object" );
    }
    // Sync up local propagation epoch with the global one. Release fence prevents
    // reordering of possible store to *mptr_state after the sync point.
    __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch);
}
Пример #5
0
//------------------------------------------------------------------------
// Methods of allocate_root_with_context_proxy
//------------------------------------------------------------------------
task& allocate_root_with_context_proxy::allocate( size_t size ) const {
    internal::generic_scheduler* v = governor::local_scheduler();
    __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" );
    task_prefix& p = v->innermost_running_task->prefix();
    task& t = v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, &my_context) );
    // The supported usage model prohibits concurrent initial binding. Thus we 
    // do not need interlocked operations or fences here.
    if ( my_context.my_kind == task_group_context::binding_required ) {
        __TBB_ASSERT ( my_context.my_owner, "Context without owner" );
        __TBB_ASSERT ( !my_context.my_parent, "Parent context set before initial binding" );
        // If we are in the outermost task dispatch loop of a master thread, then
        // there is nothing to bind this context to, and we skip the binding part.
        if ( v->innermost_running_task != v->dummy_task ) {
            // By not using the fence here we get faster code in case of normal execution 
            // flow in exchange of a bit higher probability that in cases when cancellation 
            // is in flight we will take deeper traversal branch. Normally cache coherency 
            // mechanisms are efficient enough to deliver updated value most of the time.
            uintptr_t local_count_snapshot = ((generic_scheduler*)my_context.my_owner)->local_cancel_count;
            __TBB_store_with_release(my_context.my_parent, p.context);
            uintptr_t global_count_snapshot = __TBB_load_with_acquire(global_cancel_count);
            if ( !my_context.my_cancellation_requested ) {
                if ( local_count_snapshot == global_count_snapshot ) {
                    // It is possible that there is active cancellation request in our 
                    // parents chain. Fortunately the equality of the local and global 
                    // counters means that if this is the case it's already been propagated
                    // to our parent.
                    my_context.my_cancellation_requested = p.context->my_cancellation_requested;
                } else {
                    // Another thread was propagating cancellation request at the moment 
                    // when we set our parent, but since we do not use locks we could've 
                    // been skipped. So we have to make sure that we get the cancellation 
                    // request if one of our ancestors has been canceled.
                    my_context.propagate_cancellation_from_ancestors();
                }
            }
        }
        my_context.my_kind = task_group_context::binding_completed;
    }
    // else the context either has already been associated with its parent or is isolated
    ITT_STACK_CREATE(my_context.itt_caller);
    return t;
}
Пример #6
0
void* itt_load_pointer_with_acquire_v3( const void* src ) {
    void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src));
    ITT_NOTIFY(sync_acquired, const_cast<void*>(src));
    return result;
}
Пример #7
0
bool arena::is_out_of_work() {
    // TODO: rework it to return at least a hint about where a task was found; better if the task itself.
    for(;;) {
        pool_state_t snapshot = my_pool_state;
        switch( snapshot ) {
            case SNAPSHOT_EMPTY:
                return true;
            case SNAPSHOT_FULL: {
                // Use unique id for "busy" in order to avoid ABA problems.
                const pool_state_t busy = pool_state_t(&busy);
                // Request permission to take snapshot
                if( my_pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) {
                    // Got permission. Take the snapshot.
                    // NOTE: This is not a lock, as the state can be set to FULL at 
                    //       any moment by a thread that spawns/enqueues new task.
                    size_t n = my_limit;
                    // Make local copies of volatile parameters. Their change during
                    // snapshot taking procedure invalidates the attempt, and returns
                    // this thread into the dispatch loop.
#if __TBB_TASK_PRIORITY
                    intptr_t top_priority = my_top_priority;
                    uintptr_t reload_epoch = my_reload_epoch;
                    // Inspect primary task pools first
#endif /* __TBB_TASK_PRIORITY */
                    size_t k; 
                    for( k=0; k<n; ++k ) {
                        if( my_slots[k].task_pool != EmptyTaskPool &&
                            __TBB_load_relaxed(my_slots[k].head) < __TBB_load_relaxed(my_slots[k].tail) )
                        {
                            // k-th primary task pool is nonempty and does contain tasks.
                            break;
                        }
                    }
                    __TBB_ASSERT( k <= n, NULL );
                    bool work_absent = k == n;
#if __TBB_TASK_PRIORITY
                    // Variable tasks_present indicates presence of tasks at any priority
                    // level, while work_absent refers only to the current priority.
                    bool tasks_present = !work_absent || my_orphaned_tasks;
                    bool dequeuing_possible = false;
                    if ( work_absent ) {
                        // Check for the possibility that recent priority changes
                        // brought some tasks to the current priority level

                        uintptr_t abandonment_epoch = my_abandonment_epoch;
                        // Master thread's scheduler needs special handling as it 
                        // may be destroyed at any moment (workers' schedulers are 
                        // guaranteed to be alive while at least one thread is in arena).
                        // Have to exclude concurrency with task group state change propagation too.
                        my_market->my_arenas_list_mutex.lock();
                        generic_scheduler *s = my_slots[0].my_scheduler;
                        if ( s && __TBB_CompareAndSwapW(&my_slots[0].my_scheduler, (intptr_t)LockedMaster, (intptr_t)s) == (intptr_t)s ) {
                            __TBB_ASSERT( my_slots[0].my_scheduler == LockedMaster && s != LockedMaster, NULL );
                            work_absent = !may_have_tasks( s, my_slots[0], tasks_present, dequeuing_possible );
                            __TBB_store_with_release( my_slots[0].my_scheduler, s );
                        }
                        my_market->my_arenas_list_mutex.unlock();
                        // The following loop is subject to data races. While k-th slot's
                        // scheduler is being examined, corresponding worker can either
                        // leave to RML or migrate to another arena.
                        // But the races are not prevented because all of them are benign.
                        // First, the code relies on the fact that worker thread's scheduler
                        // object persists until the whole library is deinitialized.  
                        // Second, in the worst case the races can only cause another 
                        // round of stealing attempts to be undertaken. Introducing complex 
                        // synchronization into this coldest part of the scheduler's control 
                        // flow does not seem to make sense because it both is unlikely to 
                        // ever have any observable performance effect, and will require 
                        // additional synchronization code on the hotter paths.
                        for( k = 1; work_absent && k < n; ++k )
                            work_absent = !may_have_tasks( my_slots[k].my_scheduler, my_slots[k], tasks_present, dequeuing_possible );
                        // Preclude premature switching arena off because of a race in the previous loop.
                        work_absent = work_absent
                                      && !__TBB_load_with_acquire(my_orphaned_tasks)
                                      && abandonment_epoch == my_abandonment_epoch;
                    }
#endif /* __TBB_TASK_PRIORITY */
                    // Test and test-and-set.
                    if( my_pool_state==busy ) {
#if __TBB_TASK_PRIORITY
                        bool no_fifo_tasks = my_task_stream[top_priority].empty();
                        work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
                                      && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
#else
                        bool no_fifo_tasks = my_task_stream.empty();
                        work_absent = work_absent && no_fifo_tasks;
#endif /* __TBB_TASK_PRIORITY */
                        if( work_absent ) {
#if __TBB_TASK_PRIORITY
                            if ( top_priority > my_bottom_priority ) {
                                if ( my_market->lower_arena_priority(*this, top_priority - 1, top_priority)
                                     && !my_task_stream[top_priority].empty() )
                                {
                                    atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
                                }
                            }
                            else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
#endif /* __TBB_TASK_PRIORITY */
                                // save current demand value before setting SNAPSHOT_EMPTY,
                                // to avoid race with advertise_new_work.
                                int current_demand = (int)my_max_num_workers;
                                if( my_pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) {
                                    // This thread transitioned pool to empty state, and thus is 
                                    // responsible for telling RML that there is no other work to do.
                                    my_market->adjust_demand( *this, -current_demand );
#if __TBB_TASK_PRIORITY
                                    // Check for the presence of enqueued tasks "lost" on some of
                                    // priority levels because updating arena priority and switching
                                    // arena into "populated" (FULL) state happen non-atomically.
                                    // Imposing atomicity would require task::enqueue() to use a lock,
                                    // which is unacceptable. 
                                    bool switch_back = false;
                                    for ( int p = 0; p < num_priority_levels; ++p ) {
                                        if ( !my_task_stream[p].empty() ) {
                                            switch_back = true;
                                            if ( p < my_bottom_priority || p > my_top_priority )
                                                my_market->update_arena_priority(*this, p);
                                        }
                                    }
                                    if ( switch_back )
                                        advertise_new_work</*Spawned*/false>();
#endif /* __TBB_TASK_PRIORITY */
                                    return true;
                                }
                                return false;
#if __TBB_TASK_PRIORITY
                            }
#endif /* __TBB_TASK_PRIORITY */
                        }
                        // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it.
                        my_pool_state.compare_and_swap( SNAPSHOT_FULL, busy );
                    }
                } 
                return false;
            }
            default:
                // Another thread is taking a snapshot.
                return false;
        }
    }
}
Пример #8
0
inline void WaitForException () {
    int n = 0;
    while ( ++n < c_Timeout && !__TBB_load_with_acquire(g_ExceptionCaught) )
        __TBB_Yield();
    ASSERT_WARNING( n < c_Timeout, "WaitForException failed" );
}