Ejemplo n.º 1
0
//------------------------------------------------------------------------
// micro_queue
//------------------------------------------------------------------------
void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) {
    k &= -concurrent_queue_rep::n_queue;
    page* p = NULL;
    size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1);
    if ( !index ) {
        size_t n = sizeof(page) + base.items_per_page*base.item_size;
        p = static_cast<page*>(operator new( n ));
        p->mask = 0;
        p->next = NULL;
    }
    {
        push_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue ); 
        spin_wait_until_eq( tail_counter, k );
        if ( p ) {
            spin_mutex::scoped_lock lock( page_mutex );
            if ( page* q = tail_page )
                q->next = p;
            else
                head_page = p; 
            tail_page = p;
        } else {
            p = tail_page;
        }
        base.copy_item( *p, index, item );
        // If no exception was thrown, mark item as present.
        p->mask |= uintptr(1)<<index;
    } 
}
Ejemplo n.º 2
0
task_group_context::~task_group_context () {
    if ( my_kind != isolated ) {
        generic_scheduler *s = (generic_scheduler*)my_owner;
        if ( governor::is_set(s) ) {
            // Local update of the context list 
            uintptr_t local_count_snapshot = s->local_cancel_count;
            s->local_ctx_list_update = 1;
            __TBB_full_memory_fence();
            if ( s->nonlocal_ctx_list_update ) {
                spin_mutex::scoped_lock lock(s->context_list_mutex);
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                s->local_ctx_list_update = 0;
            }
            else {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                __TBB_store_with_release( s->local_ctx_list_update, 0 );
                if ( local_count_snapshot != global_cancel_count ) {
                    // Another thread was propagating cancellation request when we removed
                    // ourselves from the list. We must ensure that it is not accessing us 
                    // when this destructor finishes. We'll be able to acquire the lock 
                    // below only after the other thread finishes with us.
                    spin_mutex::scoped_lock lock(s->context_list_mutex);
                }
            }
        }
        else {
            // Nonlocal update of the context list 
            if ( __TBB_FetchAndStoreW(&my_kind, dying) == detached ) {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
            }
            else {
                __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, 1);
                spin_wait_until_eq( s->local_ctx_list_update, 0u );
                s->context_list_mutex.lock();
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                s->context_list_mutex.unlock();
                __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, -1);
            }
        }
    }
#if TBB_USE_DEBUG
    my_version_and_traits = 0xDeadBeef;
#endif /* TBB_USE_DEBUG */
    if ( my_exception )
        my_exception->destroy();
    if (itt_caller != ITT_CALLER_NULL) ITT_STACK(caller_destroy, itt_caller);
}
Ejemplo n.º 3
0
void WINAPI destroy_condvar_using_event( condition_variable_using_event* cv_event )
{
    HANDLE my_event = cv_event->event;
    EnterCriticalSection( &cv_event->mutex );
    // NULL is an invalid HANDLE value
    cv_event->event = NULL;
    if( cv_event->n_waiters>0 ) {
        LeaveCriticalSection( &cv_event->mutex );
        spin_wait_until_eq( cv_event->n_waiters, 0 );
        // make sure the last thread completes its access to cv
        EnterCriticalSection( &cv_event->mutex );
    }
    LeaveCriticalSection( &cv_event->mutex );
    CloseHandle( my_event );
}
Ejemplo n.º 4
0
bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) {
    k &= -concurrent_queue_rep::n_queue;
    spin_wait_until_eq( head_counter, k );
    spin_wait_while_eq( tail_counter, k );
    page& p = *head_page;
    __TBB_ASSERT( &p, NULL );
    size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1);
    bool success = false; 
    {
        pop_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); 
        if ( p.mask & uintptr(1)<<index ) {
            success = true;
            base.assign_and_destroy_item( dst, p, index );
        }
    }
    return success;
}
Ejemplo n.º 5
0
task_group_context::~task_group_context () {
    if ( __TBB_load_relaxed(my_kind) == binding_completed ) {
        if ( governor::is_set(my_owner) ) {
            // Local update of the context list
            uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
            my_owner->my_local_ctx_list_update.store<relaxed>(1);
            // Prevent load of nonlocal update flag from being hoisted before the
            // store to local update flag.
            atomic_fence();
            if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
                spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                my_owner->my_local_ctx_list_update.store<relaxed>(0);
            }
            else {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                // Release fence is necessary so that update of our neighbors in
                // the context list was committed when possible concurrent destroyer
                // proceeds after local update flag is reset by the following store.
                my_owner->my_local_ctx_list_update.store<release>(0);
                if ( local_count_snapshot != the_context_state_propagation_epoch ) {
                    // Another thread was propagating cancellation request when we removed
                    // ourselves from the list. We must ensure that it is not accessing us
                    // when this destructor finishes. We'll be able to acquire the lock
                    // below only after the other thread finishes with us.
                    spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
                }
            }
        }
        else {
            // Nonlocal update of the context list
            // Synchronizes with generic_scheduler::cleanup_local_context_list()
            // TODO: evaluate and perhaps relax, or add some lock instead
            if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
            }
            else {
                //TODO: evaluate and perhaps relax
                my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
                //TODO: evaluate and perhaps remove
                spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
                my_owner->my_context_list_mutex.lock();
                my_node.my_prev->my_next = my_node.my_next;
                my_node.my_next->my_prev = my_node.my_prev;
                my_owner->my_context_list_mutex.unlock();
                //TODO: evaluate and perhaps relax
                my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
            }
        }
    }
#if __TBB_FP_CONTEXT
    internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
#endif
    poison_value(my_version_and_traits);
    if ( my_exception )
        my_exception->destroy();
    ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
}