bool task_group_context::cancel_group_execution () { __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state"); if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) { // This task group and any descendants have already been canceled. // (A newly added descendant would inherit its parent's my_cancellation_requested, // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.) return false; } governor::local_scheduler_weak()->my_market->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 ); return true; }
/** This method must be invoked under my_arenas_list_mutex. **/ arena* market::arena_in_need ( arena_list_type &arenas, arena *&next ) { if ( arenas.empty() ) return NULL; arena_list_type::iterator it = next; __TBB_ASSERT( it != arenas.end(), NULL ); do { arena& a = *it; if ( ++it == arenas.end() ) it = arenas.begin(); if ( a.num_workers_active() < a.my_num_workers_allotted ) { a.my_references += 2; // add a worker #if __TBB_TRACK_PRIORITY_LEVEL_SATURATION ++a.my_num_workers_present; ++my_priority_levels[a.my_top_priority].workers_present; #endif /* __TBB_TRACK_PRIORITY_LEVEL_SATURATION */ as_atomic(next) = &*it; // a subject for innocent data race under the reader lock // TODO: rework global round robin policy to local or random to avoid this write return &a; } } while ( it != next ); return NULL; }