Пример #1
0
void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) {
    __TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL );
    //size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size);
    size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size);
    index = tmp;
    segment_index_t k_old = segment_index_of( tmp );
    size_type base = segment_base(k_old);
    helper::extend_segment_if_necessary(*this,k_old);
    segment_t& s = my_segment[k_old];
    void* array = s.array;
    if ( !array ) {
        // FIXME - consider factoring this out and share with internal_grow_by
	if ( base==tmp ) {
	    __TBB_ASSERT( !s.array, NULL );
            size_t n = segment_size(k_old);
	    array = NFS_Allocate( n, element_size, NULL );
	    ITT_NOTIFY( sync_releasing, &s.array );
	    s.array = array;
	} else {
	    ITT_NOTIFY(sync_prepare, &s.array);
	    spin_wait_while_eq( s.array, (void*)0 );
	    ITT_NOTIFY(sync_acquired, &s.array);
	    array = s.array;
	}
    }
    size_type j_begin = tmp-base;
    return (void*)((char*)array+element_size*j_begin);
}
Пример #2
0
void concurrent_vector_base::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op1 init ) {
    __TBB_ASSERT( start<finish, "start must be less than finish" );
    size_t tmp = start;
    do {
        segment_index_t k_old = segment_index_of( tmp );
        size_type base = segment_base(k_old);
        size_t n = segment_size(k_old);
        helper::extend_segment_if_necessary(*this,k_old);
        segment_t& s = my_segment[k_old];
        void* array = s.array;
        if ( !array ) {
            if ( base==tmp ) {
                __TBB_ASSERT( !s.array, NULL );
                array = NFS_Allocate( n, element_size, NULL );
                ITT_NOTIFY( sync_releasing, &s.array );
                s.array = array;
            } else {
                ITT_NOTIFY(sync_prepare, &s.array);
                spin_wait_while_eq( s.array, (void*)0 );
                ITT_NOTIFY(sync_acquired, &s.array);
                array = s.array;
            }
        }
        size_type j_begin = tmp-base;
        size_type j_end = n > finish-base ? finish-base : n;
        (*init)( (void*)((char*)array+element_size*j_begin), j_end-j_begin );
        tmp = base+j_end;
    } while( tmp<finish );
}
Пример #3
0
void spin_mutex::scoped_lock::internal_acquire( spin_mutex& m ) {
    __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" );
    ITT_NOTIFY(sync_prepare, &m);
    my_unlock_value = __TBB_LockByte(m.flag);
    my_mutex = &m;
    ITT_NOTIFY(sync_acquired, &m);
}
Пример #4
0
/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */
bool spin_rw_mutex_v3::internal_upgrade() 
{
    state_t s = state;
    __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " );
    // check and set writer-pending flag
    // required conditions: either no pending writers, or we are the only reader
    // (with multiple readers and pending writer, another upgrade could have been requested)
    while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) {
        state_t old_s = s;
        if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) {
            internal::atomic_backoff backoff;
            ITT_NOTIFY(sync_prepare, this);
            // the state should be 0...0111, i.e. 1 reader and waiting writer;
            // both new readers and writers are blocked
            while( (state & READERS) != ONE_READER ) // more than 1 reader
                backoff.pause(); 
            __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer");

            __TBB_FetchAndAddW( &state,  - (intptr_t)(ONE_READER+WRITER_PENDING));
            ITT_NOTIFY(sync_acquired, this);
            return true; // successfully upgraded
        }
    }
    // slow reacquire
    internal_release_reader();
    return internal_acquire_writer(); // always returns false
}
Пример #5
0
void call_itt_notify_v5(int t, void *ptr) {
    switch (t) {
    case 0: ITT_NOTIFY(sync_prepare, ptr); break;
    case 1: ITT_NOTIFY(sync_cancel, ptr); break;
    case 2: ITT_NOTIFY(sync_acquired, ptr); break;
    case 3: ITT_NOTIFY(sync_releasing, ptr); break;
    }
}
void spin_mutex::scoped_lock::internal_release() {
    __TBB_ASSERT( my_mutex, "release on spin_mutex::scoped_lock that is not holding a lock" );

    ITT_NOTIFY(sync_releasing, my_mutex);
    __TBB_UnlockByte(my_mutex->flag);
    my_mutex = NULL;
}
Пример #7
0
//! Acquire write lock on the given mutex.
bool spin_rw_mutex_v3::internal_acquire_writer()
{
    ITT_NOTIFY(sync_prepare, this);
    for( internal::atomic_backoff backoff;;backoff.pause() ){
        state_t s = const_cast<volatile state_t&>(state); // ensure reloading
        if( !(s & BUSY) ) { // no readers, no writers
            if( CAS(state, WRITER, s)==s )
                break; // successfully stored writer flag
            backoff.reset(); // we could be very close to complete op.
        } else if( !(s & WRITER_PENDING) ) { // no pending writers
            __TBB_AtomicOR(&state, WRITER_PENDING);
        }
    }
    ITT_NOTIFY(sync_acquired, this);
    return false;
}
Пример #8
0
//! Acquire read lock on given mutex.
void spin_rw_mutex_v3::internal_acquire_reader()
{
    ITT_NOTIFY(sync_prepare, this);
    for( internal::atomic_backoff backoff;;backoff.pause() ){
        state_t s = const_cast<volatile state_t&>(state); // ensure reloading
        if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests
            state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER );
            if( !( t&WRITER )) 
                break; // successfully stored increased number of readers
            // writer got there first, undo the increment
            __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER );
        }
    }

    ITT_NOTIFY(sync_acquired, this);
    __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
}
Пример #9
0
void spin_mutex::scoped_lock::internal_release() {
    __TBB_ASSERT( my_mutex, "release on spin_mutex::scoped_lock that is not holding a lock" );
    __TBB_ASSERT( !(my_unlock_value&1), "corrupted scoped_lock?" );

    ITT_NOTIFY(sync_releasing, my_mutex);
    __TBB_store_with_release(my_mutex->flag, static_cast<unsigned char>(my_unlock_value));
    my_mutex = NULL;
}
Пример #10
0
void reader_writer_lock::start_read(scoped_lock_read *I) {
    ITT_NOTIFY(sync_prepare, this);
    I->next = reader_head.fetch_and_store(I);
    if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags
        // unblock and/or update statuses of non-blocking readers
        if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers
            unblock_readers();
        }
    }
    __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking.");
    spin_wait_while_eq(I->status, waiting); // block
    if (I->next) {
        __TBB_ASSERT(I->next->status == waiting, NULL);
        rdr_count_and_flags += RC_INCR;
        I->next->status = active; // wake successor
    }
    ITT_NOTIFY(sync_acquired, this);
}
bool spin_mutex::scoped_lock::internal_try_acquire( spin_mutex& m ) {
    __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" );
    bool result = bool( __TBB_TryLockByte(m.flag) );
    if( result ) {
        my_mutex = &m;
        ITT_NOTIFY(sync_acquired, &m);
    }
    return result;
}
Пример #12
0
//! Try to acquire write lock on the given mutex
bool spin_rw_mutex_v3::internal_try_acquire_writer()
{
    // for a writer: only possible to acquire if no active readers or writers
    state_t s = state;
    if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101
        if( CAS(state, WRITER, s)==s ) {
            ITT_NOTIFY(sync_acquired, this);
            return true; // successfully stored writer flag
        }
    return false;
}
Пример #13
0
bool reader_writer_lock::start_write(scoped_lock *I) {
    tbb_thread::id id = this_tbb_thread::get_id();
    scoped_lock *pred = NULL;
    if (I->status == waiting_nonblocking) {
        if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) {
            delete I;
            return false;
        }
    }
    else {
        ITT_NOTIFY(sync_prepare, this);
        pred = writer_tail.fetch_and_store(I);
    }
    if (pred)
        pred->next = I;
    else {
        set_next_writer(I);
        if (I->status == waiting_nonblocking) {
            if (I->next) { // potentially more writers
                set_next_writer(I->next);
            }
            else { // no more writers
                writer_head.fetch_and_store(NULL);
                if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
                    spin_wait_while_eq(I->next, (scoped_lock *)NULL);  // wait for new writer to be added
                    __TBB_ASSERT(I->next, "There should be a node following the last writer.");
                    set_next_writer(I->next);
                }
            }
            delete I;
            return false;
        }
    }
    spin_wait_while_eq(I->status, waiting);
    ITT_NOTIFY(sync_acquired, this);
    my_current_writer = id;
    return true;
}
Пример #14
0
//! Try to acquire read lock on the given mutex
bool spin_rw_mutex_v3::internal_try_acquire_reader()
{
    // for a reader: acquire if no active or waiting writers
    state_t s = state;
    if( !(s & (WRITER|WRITER_PENDING)) ) { // no writers
        state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER );
        if( !( t&WRITER )) {  // got the lock
            ITT_NOTIFY(sync_acquired, this);
            return true; // successfully stored increased number of readers
        }
        // writer got there first, undo the increment
        __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER );
    }
    return false;
}
Пример #15
0
// Tries to acquire the reader_writer_lock for read.    This function does not block.
// Return Value: True or false, depending on whether the lock is acquired or not.
bool reader_writer_lock::try_lock_read() {
    if (is_current_writer()) { // recursive lock attempt
        return false;
    }
    else {
        if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present
            rdr_count_and_flags -= RC_INCR;
            return false;
        }
        else { // no writers
            ITT_NOTIFY(sync_acquired, this);
            return true;
        }
    }
}
Пример #16
0
void reader_writer_lock::end_write(scoped_lock *I) {
    __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock.");
    my_current_writer = tbb_thread::id();
    ITT_NOTIFY(sync_releasing, this);
    if (I->next) { // potentially more writers
        writer_head = I->next;
        writer_head->status = active;
    }
    else { // No more writers; clear writer flag, test reader interest flag
        __TBB_ASSERT(writer_head, NULL);
        if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) {
            unblock_readers();
        }
        writer_head.fetch_and_store(NULL);
        if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added
            spin_wait_while_eq(I->next, (scoped_lock *)NULL);  // wait for new writer to be added
            __TBB_ASSERT(I->next, "There should be a node following the last writer.");
            set_next_writer(I->next);
        }
    }
}
Пример #17
0
//! Release writer lock on the given mutex
void spin_rw_mutex_v3::internal_release_writer()
{
    ITT_NOTIFY(sync_releasing, this);
    __TBB_AtomicAND( &state, READERS );
}
Пример #18
0
//! Release read lock on the given mutex
void spin_rw_mutex_v3::internal_release_reader()
{
    __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" );
    ITT_NOTIFY(sync_releasing, this); // release reader
    __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER);
}
Пример #19
0
//! Downgrade writer to a reader
void spin_rw_mutex_v3::internal_downgrade() {
    ITT_NOTIFY(sync_releasing, this);
    __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER));
    __TBB_ASSERT( state & READERS, "invalid state after downgrade: no readers" );
}
Пример #20
0
void* itt_load_pointer_with_acquire_v3( const void* src ) {
    void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src));
    ITT_NOTIFY(sync_acquired, const_cast<void*>(src));
    return result;
}
Пример #21
0
void itt_store_pointer_with_release_v3( void* dst, void* src ) {
    ITT_NOTIFY(sync_releasing, dst);
    __TBB_store_with_release(*static_cast<void**>(dst),src);
}
Пример #22
0
void reader_writer_lock::end_read() {
    ITT_NOTIFY(sync_releasing, this);
    __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, "unlock() called but no readers hold the lock.");
    rdr_count_and_flags -= RC_INCR;
}
Пример #23
0
void arena::process( generic_scheduler& s ) {
    __TBB_ASSERT( is_alive(my_guard), NULL );
    __TBB_ASSERT( governor::is_set(&s), NULL );
    __TBB_ASSERT( !s.innermost_running_task, NULL );

    __TBB_ASSERT( my_num_slots != 1, NULL );
    // Start search for an empty slot from the one we occupied the last time
    unsigned index = s.arena_index < my_num_slots ? s.arena_index : s.random.get() % (my_num_slots - 1) + 1,
             end = index;
    __TBB_ASSERT( index != 0, "A worker cannot occupy slot 0" );
    __TBB_ASSERT( index < my_num_slots, NULL );

    // Find a vacant slot
    for ( ;; ) {
        if ( !slot[index].my_scheduler && __TBB_CompareAndSwapW( &slot[index].my_scheduler, (intptr_t)&s, 0 ) == 0 )
            break;
        if ( ++index == my_num_slots )
            index = 1;
        if ( index == end ) {
            // Likely this arena is already saturated
            if ( --my_num_threads_active == 0 )
                close_arena();
            return;
        }
    }
    ITT_NOTIFY(sync_acquired, &slot[index]);
    s.my_arena = this;
    s.arena_index = index;
    s.attach_mailbox( affinity_id(index+1) );

    slot[index].hint_for_push = index ^ unsigned(&s-(generic_scheduler*)NULL)>>16; // randomizer seed
    slot[index].hint_for_pop  = index; // initial value for round-robin

    unsigned new_limit = index + 1;
    unsigned old_limit = my_limit;
    while ( new_limit > old_limit ) {
        if ( my_limit.compare_and_swap(new_limit, old_limit) == old_limit )
            break;
        old_limit = my_limit;
    }

    for ( ;; ) {
        // Try to steal a task.
        // Passing reference count is technically unnecessary in this context,
        // but omitting it here would add checks inside the function.
        __TBB_ASSERT( is_alive(my_guard), NULL );
        task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true );
        if (t) {
            // A side effect of receive_or_steal_task is that innermost_running_task can be set.
            // But for the outermost dispatch loop of a worker it has to be NULL.
            s.innermost_running_task = NULL;
            s.local_wait_for_all(*s.dummy_task,t);
        }
        ++my_num_threads_leaving;
        __TBB_ASSERT ( slot[index].head == slot[index].tail, "Worker cannot leave arena while its task pool is not empty" );
        __TBB_ASSERT( slot[index].task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" );
        // Revalidate quitting condition
        // This check prevents relinquishing more than necessary workers because 
        // of the non-atomicity of the decision making procedure
        if ( num_workers_active() >= my_num_workers_allotted || !my_num_workers_requested )
            break;
        --my_num_threads_leaving;
        __TBB_ASSERT( !slot[0].my_scheduler || my_num_threads_active > 0, "Who requested more workers after the last one left the dispatch loop and the master's gone?" );
    }
#if __TBB_STATISTICS
    ++s.my_counters.arena_roundtrips;
    *slot[index].my_counters += s.my_counters;
    s.my_counters.reset();
#endif /* __TBB_STATISTICS */
    __TBB_store_with_release( slot[index].my_scheduler, (generic_scheduler*)NULL );
    s.inbox.detach();
    __TBB_ASSERT( s.inbox.is_idle_state(true), NULL );
    __TBB_ASSERT( !s.innermost_running_task, NULL );
    __TBB_ASSERT( is_alive(my_guard), NULL );
    // Decrementing my_num_threads_active first prevents extra workers from leaving
    // this arena prematurely, but can result in some workers returning back just
    // to repeat the escape attempt. If instead my_num_threads_leaving is decremented
    // first, the result is the opposite - premature leaving is allowed and gratuitous
    // return is prevented. Since such a race has any likelihood only when multiple
    // workers are in the stealing loop, and consequently there is a lack of parallel
    // work in this arena, we'd rather let them go out and try get employment in 
    // other arenas (before returning into this one again).
    --my_num_threads_leaving;
    if ( !--my_num_threads_active )
        close_arena();
}