void reader_writer_lock::unblock_readers() { // clear rdr interest flag, increment rdr count __TBB_ASSERT(rdr_count_and_flags&RFLAG, NULL); rdr_count_and_flags += RC_INCR-RFLAG; __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, NULL); // indicate clear of window if (rdr_count_and_flags & WFLAG1 && !(rdr_count_and_flags & WFLAG2)) { __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2); } // unblock waiting readers scoped_lock_read *head = reader_head.fetch_and_store(NULL); __TBB_ASSERT(head, NULL); __TBB_ASSERT(head->status == waiting, NULL); head->status = active; }
//! Acquire write lock on the given mutex. bool spin_rw_mutex_v3::internal_acquire_writer() { ITT_NOTIFY(sync_prepare, this); for( internal::atomic_backoff backoff;;backoff.pause() ){ state_t s = const_cast<volatile state_t&>(state); // ensure reloading if( !(s & BUSY) ) { // no readers, no writers if( CAS(state, WRITER, s)==s ) break; // successfully stored writer flag backoff.reset(); // we could be very close to complete op. } else if( !(s & WRITER_PENDING) ) { // no pending writers __TBB_AtomicOR(&state, WRITER_PENDING); } } ITT_NOTIFY(sync_acquired, this); return false; }
void reader_writer_lock::set_next_writer(scoped_lock *W) { writer_head = W; if (W->status == waiting_nonblocking) { if (rdr_count_and_flags.compare_and_swap(WFLAG1+WFLAG2, 0) == 0) { W->status = active; } } else { if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2 } else { // no reader in timing window __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2); } spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish W->status = active; } }