void observer_list::clear () { __TBB_ASSERT( this != &the_global_observer_list, "Method clear() cannot be used on the list of global observers" ); // Though the method will work fine for the empty list, we require the caller // to check for the list emptiness before invoking it to avoid extra overhead. __TBB_ASSERT( !empty(), NULL ); { scoped_lock lock(mutex(), /*is_writer=*/true); observer_proxy *next = my_head; while ( observer_proxy *p = next ) { __TBB_ASSERT( p->my_version >= 6, NULL ); next = p->my_next; // Both proxy p and observer p->my_observer (if non-null) are guaranteed // to be alive while the list is locked. task_scheduler_observer_v3 *obs = p->my_observer; // Make sure that possible concurrent observer destruction does not // conflict with the proxy list cleanup. if ( !obs || !(p = (observer_proxy*)__TBB_FetchAndStoreW(&obs->my_proxy, 0)) ) continue; __TBB_ASSERT( !next || p == next->my_prev, NULL ); __TBB_ASSERT( is_alive(p->my_ref_count), "Observer's proxy died prematurely" ); __TBB_ASSERT( p->my_ref_count == 1, "Reference for observer is missing" ); __TBB_ASSERT( !obs->my_busy_count, "Local observer in an empty arena cannot be marked as busy" ); store<relaxed>( obs->my_busy_count, interface6::task_scheduler_observer::v6_trait ); #if TBB_USE_ASSERT p->my_observer = NULL; p->my_ref_count = 0; #endif /* TBB_USE_ASSERT */ remove(p); delete p; } } while( my_head ) __TBB_Yield(); }
task_group_context::~task_group_context () { if ( my_kind != isolated ) { generic_scheduler *s = (generic_scheduler*)my_owner; if ( governor::is_set(s) ) { // Local update of the context list uintptr_t local_count_snapshot = s->local_cancel_count; s->local_ctx_list_update = 1; __TBB_full_memory_fence(); if ( s->nonlocal_ctx_list_update ) { spin_mutex::scoped_lock lock(s->context_list_mutex); my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; s->local_ctx_list_update = 0; } else { my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; __TBB_store_with_release( s->local_ctx_list_update, 0 ); if ( local_count_snapshot != global_cancel_count ) { // Another thread was propagating cancellation request when we removed // ourselves from the list. We must ensure that it is not accessing us // when this destructor finishes. We'll be able to acquire the lock // below only after the other thread finishes with us. spin_mutex::scoped_lock lock(s->context_list_mutex); } } } else { // Nonlocal update of the context list if ( __TBB_FetchAndStoreW(&my_kind, dying) == detached ) { my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; } else { __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, 1); spin_wait_until_eq( s->local_ctx_list_update, 0u ); s->context_list_mutex.lock(); my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; s->context_list_mutex.unlock(); __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, -1); } } } #if TBB_USE_DEBUG my_version_and_traits = 0xDeadBeef; #endif /* TBB_USE_DEBUG */ if ( my_exception ) my_exception->destroy(); if (itt_caller != ITT_CALLER_NULL) ITT_STACK(caller_destroy, itt_caller); }
void task_scheduler_observer_v3::observe( bool enable ) { if( enable ) { if( !my_proxy ) { my_proxy = new observer_proxy( *this ); if ( !my_proxy->is_global() ) { // Local observer activation generic_scheduler* s = governor::local_scheduler_if_initialized(); #if __TBB_TASK_ARENA intptr_t tag = my_proxy->get_v6_observer()->my_context_tag; if( tag != interface6::task_scheduler_observer::implicit_tag ) { // explicit arena task_arena *a = reinterpret_cast<task_arena*>(tag); a->check_init(); my_proxy->my_list = &a->my_arena->my_observers; } else #endif { if( !s ) s = governor::init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true ); __TBB_ASSERT( __TBB_InitOnce::initialization_done(), NULL ); __TBB_ASSERT( s && s->my_arena, NULL ); my_proxy->my_list = &s->my_arena->my_observers; } my_proxy->my_list->insert(my_proxy); my_busy_count = 0; // Notify newly activated observer and other pending ones if it belongs to current arena if(s && &s->my_arena->my_observers == my_proxy->my_list ) my_proxy->my_list->notify_entry_observers( s->my_last_local_observer, s->is_worker() ); } else { // Obsolete. Global observer activation if( !__TBB_InitOnce::initialization_done() ) DoOneTimeInitializations(); my_busy_count = 0; my_proxy->my_list = &the_global_observer_list; my_proxy->my_list->insert(my_proxy); if( generic_scheduler* s = governor::local_scheduler_if_initialized() ) { // Notify newly created observer of its own thread. // Any other pending observers are notified too. the_global_observer_list.notify_entry_observers( s->my_last_global_observer, s->is_worker() ); } } } } else { // Make sure that possible concurrent proxy list cleanup does not conflict // with the observer destruction here. if ( observer_proxy* proxy = (observer_proxy*)__TBB_FetchAndStoreW(&my_proxy, 0) ) { // List destruction should not touch this proxy after we've won the above interlocked exchange. __TBB_ASSERT( proxy->my_observer == this, NULL ); __TBB_ASSERT( is_alive(proxy->my_ref_count), "Observer's proxy died prematurely" ); __TBB_ASSERT( proxy->my_ref_count >= 1, "reference for observer missing" ); observer_list &list = *proxy->my_list; { // Ensure that none of the list walkers relies on observer pointer validity observer_list::scoped_lock lock(list.mutex(), /*is_writer=*/true); proxy->my_observer = NULL; } intptr_t trait = proxy->my_version == 6 ? interface6::task_scheduler_observer::v6_trait : 0; // Proxy may still be held by other threads (to track the last notified observer) list.remove_ref(proxy); while( my_busy_count ) __TBB_Yield(); store<relaxed>( my_busy_count, trait ); } } }