void concurrent_vector_base::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op1 init ) { __TBB_ASSERT( start<finish, "start must be less than finish" ); size_t tmp = start; do { segment_index_t k_old = segment_index_of( tmp ); size_type base = segment_base(k_old); size_t n = segment_size(k_old); helper::extend_segment_if_necessary(*this,k_old); segment_t& s = my_segment[k_old]; void* array = s.array; if ( !array ) { if ( base==tmp ) { __TBB_ASSERT( !s.array, NULL ); array = NFS_Allocate( n, element_size, NULL ); ITT_NOTIFY( sync_releasing, &s.array ); s.array = array; } else { ITT_NOTIFY(sync_prepare, &s.array); spin_wait_while_eq( s.array, (void*)0 ); ITT_NOTIFY(sync_acquired, &s.array); array = s.array; } } size_type j_begin = tmp-base; size_type j_end = n > finish-base ? finish-base : n; (*init)( (void*)((char*)array+element_size*j_begin), j_end-j_begin ); tmp = base+j_end; } while( tmp<finish ); }
void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) { __TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL ); //size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size); size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size); index = tmp; segment_index_t k_old = segment_index_of( tmp ); size_type base = segment_base(k_old); helper::extend_segment_if_necessary(*this,k_old); segment_t& s = my_segment[k_old]; void* array = s.array; if ( !array ) { // FIXME - consider factoring this out and share with internal_grow_by if ( base==tmp ) { __TBB_ASSERT( !s.array, NULL ); size_t n = segment_size(k_old); array = NFS_Allocate( n, element_size, NULL ); ITT_NOTIFY( sync_releasing, &s.array ); s.array = array; } else { ITT_NOTIFY(sync_prepare, &s.array); spin_wait_while_eq( s.array, (void*)0 ); ITT_NOTIFY(sync_acquired, &s.array); array = s.array; } } size_type j_begin = tmp-base; return (void*)((char*)array+element_size*j_begin); }
bool reader_writer_lock::start_write(scoped_lock *I) { tbb_thread::id id = this_tbb_thread::get_id(); scoped_lock *pred = NULL; if (I->status == waiting_nonblocking) { if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) { delete I; return false; } } else { ITT_NOTIFY(sync_prepare, this); pred = writer_tail.fetch_and_store(I); } if (pred) pred->next = I; else { set_next_writer(I); if (I->status == waiting_nonblocking) { if (I->next) { // potentially more writers set_next_writer(I->next); } else { // no more writers writer_head.fetch_and_store(NULL); if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added __TBB_ASSERT(I->next, "There should be a node following the last writer."); set_next_writer(I->next); } } delete I; return false; } } spin_wait_while_eq(I->status, waiting); ITT_NOTIFY(sync_acquired, this); my_current_writer = id; return true; }
bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) { k &= -concurrent_queue_rep::n_queue; spin_wait_until_eq( head_counter, k ); spin_wait_while_eq( tail_counter, k ); page& p = *head_page; __TBB_ASSERT( &p, NULL ); size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1); bool success = false; { pop_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); if ( p.mask & uintptr(1)<<index ) { success = true; base.assign_and_destroy_item( dst, p, index ); } } return success; }
void reader_writer_lock::start_read(scoped_lock_read *I) { ITT_NOTIFY(sync_prepare, this); I->next = reader_head.fetch_and_store(I); if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags // unblock and/or update statuses of non-blocking readers if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers unblock_readers(); } } __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking."); spin_wait_while_eq(I->status, waiting); // block if (I->next) { __TBB_ASSERT(I->next->status == waiting, NULL); rdr_count_and_flags += RC_INCR; I->next->status = active; // wake successor } ITT_NOTIFY(sync_acquired, this); }
void reader_writer_lock::end_write(scoped_lock *I) { __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock."); my_current_writer = tbb_thread::id(); ITT_NOTIFY(sync_releasing, this); if (I->next) { // potentially more writers writer_head = I->next; writer_head->status = active; } else { // No more writers; clear writer flag, test reader interest flag __TBB_ASSERT(writer_head, NULL); if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) { unblock_readers(); } writer_head.fetch_and_store(NULL); if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added __TBB_ASSERT(I->next, "There should be a node following the last writer."); set_next_writer(I->next); } } }