Exemplo n.º 1
0
/** Propagates cancellation down the tree of dependent contexts by walking each 
    thread's local list of contexts **/
void market::propagate_cancellation ( task_group_context& ctx ) {
    __TBB_ASSERT ( ctx.my_cancellation_requested, "No cancellation request in the context" );
    // The whole propagation algorithm is under the lock in order to ensure correctness 
    // in case of parallel cancellations at the different levels of the context tree.
    // See the note 1 at the bottom of this file.
    global_market_mutex_type::scoped_lock lock(theMarketMutex);
    // Advance global cancellation epoch
    uintptr_t global_epoch = __TBB_FetchAndAddWrelease(&global_cancel_count, 1);
    // Propagate to all workers and masters and sync up their local epochs with the global one
    unsigned num_workers = my_num_workers;
    for ( unsigned i = 0; i < num_workers; ++i ) {
        generic_scheduler *s = my_workers[i];
        // If the worker is only about to be registered, skip it.
        if ( s ) {
            s->propagate_cancellation();
            s->local_cancel_count = global_epoch;
        }
    }
    arena_list_type::iterator it = my_arenas.begin();
    for ( ; it != my_arenas.end(); ++it ) {
        generic_scheduler *s = it->slot[0].my_scheduler;
        // If the master is under construction, skip it.
        if ( s ) { 
            s->propagate_cancellation();
            s->local_cancel_count = global_epoch;
        }
    }
}
Exemplo n.º 2
0
bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
    if ( !(src.my_state & task_group_context::may_have_children) )
        return true;
    // The whole propagation algorithm is under the lock in order to ensure correctness
    // in case of concurrent state changes at the different levels of the context tree.
    // See comment at the bottom of scheduler.cpp
    context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
    if ( src.*mptr_state != new_state )
        // Another thread has concurrently changed the state. Back down.
        return false;
    // Advance global state propagation epoch
    __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1);
    // Propagate to all workers and masters and sync up their local epochs with the global one
    unsigned num_workers = my_first_unused_worker_idx;
    for ( unsigned i = 0; i < num_workers; ++i ) {
        generic_scheduler *s = my_workers[i];
        // If the worker is only about to be registered, skip it.
        if ( s )
            s->propagate_task_group_state( mptr_state, src, new_state );
    }
    // Propagate to all master threads
    // The whole propagation sequence is locked, thus no contention is expected
    for( scheduler_list_type::iterator it = my_masters.begin(); it != my_masters.end(); it++  )
        it->propagate_task_group_state( mptr_state, src, new_state );
    return true;
}
Exemplo n.º 3
0
//! Release read lock on the given mutex
void spin_rw_mutex_v3::internal_release_reader()
{
    __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
    ITT_NOTIFY(sync_releasing, this); // release reader
    __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER);
}