task_group_context::~task_group_context () { if ( my_kind != isolated ) { generic_scheduler *s = (generic_scheduler*)my_owner; if ( governor::is_set(s) ) { // Local update of the context list uintptr_t local_count_snapshot = s->local_cancel_count; s->local_ctx_list_update = 1; __TBB_full_memory_fence(); if ( s->nonlocal_ctx_list_update ) { spin_mutex::scoped_lock lock(s->context_list_mutex); my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; s->local_ctx_list_update = 0; } else { my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; __TBB_store_with_release( s->local_ctx_list_update, 0 ); if ( local_count_snapshot != global_cancel_count ) { // Another thread was propagating cancellation request when we removed // ourselves from the list. We must ensure that it is not accessing us // when this destructor finishes. We'll be able to acquire the lock // below only after the other thread finishes with us. spin_mutex::scoped_lock lock(s->context_list_mutex); } } } else { // Nonlocal update of the context list if ( __TBB_FetchAndStoreW(&my_kind, dying) == detached ) { my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; } else { __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, 1); spin_wait_until_eq( s->local_ctx_list_update, 0u ); s->context_list_mutex.lock(); my_node.my_prev->my_next = my_node.my_next; my_node.my_next->my_prev = my_node.my_prev; s->context_list_mutex.unlock(); __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, -1); } } } #if TBB_USE_DEBUG my_version_and_traits = 0xDeadBeef; #endif /* TBB_USE_DEBUG */ if ( my_exception ) my_exception->destroy(); if (itt_caller != ITT_CALLER_NULL) ITT_STACK(caller_destroy, itt_caller); }
//! Try to acquire read lock on the given mutex bool spin_rw_mutex_v3::internal_try_acquire_reader() { // for a reader: acquire if no active or waiting writers state_t s = state; if( !(s & (WRITER|WRITER_PENDING)) ) { // no writers state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER ); if( !( t&WRITER )) { // got the lock ITT_NOTIFY(sync_acquired, this); return true; // successfully stored increased number of readers } // writer got there first, undo the increment __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER ); } return false; }
/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ bool spin_rw_mutex_v3::internal_upgrade() { state_t s = state; __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " ); // check and set writer-pending flag // required conditions: either no pending writers, or we are the only reader // (with multiple readers and pending writer, another upgrade could have been requested) while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) { state_t old_s = s; if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) { internal::atomic_backoff backoff; ITT_NOTIFY(sync_prepare, this); // the state should be 0...0111, i.e. 1 reader and waiting writer; // both new readers and writers are blocked while( (state & READERS) != ONE_READER ) // more than 1 reader backoff.pause(); __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer"); __TBB_FetchAndAddW( &state, - (intptr_t)(ONE_READER+WRITER_PENDING)); ITT_NOTIFY(sync_acquired, this); return true; // successfully upgraded } } // slow reacquire internal_release_reader(); return internal_acquire_writer(); // always returns false }
//! Acquire read lock on given mutex. void spin_rw_mutex_v3::internal_acquire_reader() { ITT_NOTIFY(sync_prepare, this); for( internal::atomic_backoff backoff;;backoff.pause() ){ state_t s = const_cast<volatile state_t&>(state); // ensure reloading if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER ); if( !( t&WRITER )) break; // successfully stored increased number of readers // writer got there first, undo the increment __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER ); } } ITT_NOTIFY(sync_acquired, this); __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" ); }
tbb::task* execute () { volatile int anchor = 0; for ( int i = 0; i < NumIterations; ++i ) anchor += i; __TBB_FetchAndAddW(g_LeavesExecuted + m_tid, 1); #if __TBB_TASK_PRIORITY ASSERT( !m_opts || (m_opts & Flog) || (!(m_opts & TestPreemption) ^ (m_tid == PreemptionActivatorId)), NULL ); if ( (m_opts & TestPreemption) && g_LeavesExecuted[0] > P && group_priority() == tbb::priority_normal ) { ASSERT( m_tid == PreemptionActivatorId, NULL ); ASSERT( (PreemptionActivatorId == 1 ? High > tbb::priority_normal : Low < tbb::priority_normal), NULL ); set_group_priority( PreemptionActivatorId == 1 ? High : Low ); } #endif /* __TBB_TASK_PRIORITY */ return NULL; }
//! Downgrade writer to a reader void spin_rw_mutex_v3::internal_downgrade() { ITT_NOTIFY(sync_releasing, this); __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER)); __TBB_ASSERT( state & READERS, "invalid state after downgrade: no readers" ); }