void test_and () { v = init; atomic_fetch_and (&v, 0); if (v != 0) abort (); v = init; atomic_fetch_and_explicit (&v, init, memory_order_consume); if (v != init) abort (); atomic_fetch_and (&v, 0); if (v != 0) abort (); v = ~v; atomic_fetch_and_explicit (&v, init, memory_order_release); if (v != init) abort (); atomic_fetch_and (&v, 0); if (v != 0) abort (); v = ~v; atomic_fetch_and_explicit (&v, 0, memory_order_seq_cst); if (v != 0) abort (); }
static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, const timespec* abs_timeout_or_null) { if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) { return EDEADLK; } while (true) { int result = __pthread_rwlock_tryrdlock(rwlock); if (result == 0 || result == EAGAIN) { return result; } result = check_timespec(abs_timeout_or_null); if (result != 0) { return result; } int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); if (__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) { continue; } rwlock->pending_lock.lock(); rwlock->pending_reader_count++; // We rely on the fact that all atomic exchange operations on the same object (here it is // rwlock->state) always appear to occur in a single total order. If the pending flag is added // before unlocking, the unlocking thread will wakeup the waiter. Otherwise, we will see the // state is unlocked and will not wait anymore. old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_READERS_FLAG, memory_order_relaxed); int old_serial = rwlock->pending_reader_wakeup_serial; rwlock->pending_lock.unlock(); int futex_result = 0; if (!__can_acquire_read_lock(old_state, rwlock->writer_nonrecursive_preferred)) { futex_result = __futex_wait_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, old_serial, true, abs_timeout_or_null); } rwlock->pending_lock.lock(); rwlock->pending_reader_count--; if (rwlock->pending_reader_count == 0) { atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_READERS_FLAG, memory_order_relaxed); } rwlock->pending_lock.unlock(); if (futex_result == -ETIMEDOUT) { return ETIMEDOUT; } } }
void test_fetch_and () { v = init; if (atomic_fetch_and_explicit (&v, 0, memory_order_relaxed) != init) abort (); if (atomic_fetch_and_explicit (&v, init, memory_order_consume) != 0) abort (); if (atomic_fetch_and_explicit (&v, 0, memory_order_acquire) != 0) abort (); v = ~v; if (atomic_fetch_and_explicit (&v, init, memory_order_release) != init) abort (); if (atomic_fetch_and_explicit (&v, 0, memory_order_acq_rel) != init) abort (); if (atomic_fetch_and_explicit (&v, 0, memory_order_seq_cst) != 0) abort (); if (atomic_fetch_and (&v, 0) != 0) abort (); }
static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, const timespec* abs_timeout_or_null) { if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) == __get_thread()->tid) { return EDEADLK; } while (true) { int result = __pthread_rwlock_trywrlock(rwlock); if (result == 0) { return result; } result = check_timespec(abs_timeout_or_null); if (result != 0) { return result; } int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); if (__can_acquire_write_lock(old_state)) { continue; } rwlock->pending_lock.lock(); rwlock->pending_writer_count++; old_state = atomic_fetch_or_explicit(&rwlock->state, STATE_HAVE_PENDING_WRITERS_FLAG, memory_order_relaxed); int old_serial = rwlock->pending_writer_wakeup_serial; rwlock->pending_lock.unlock(); int futex_result = 0; if (!__can_acquire_write_lock(old_state)) { futex_result = __futex_wait_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, old_serial, true, abs_timeout_or_null); } rwlock->pending_lock.lock(); rwlock->pending_writer_count--; if (rwlock->pending_writer_count == 0) { atomic_fetch_and_explicit(&rwlock->state, ~STATE_HAVE_PENDING_WRITERS_FLAG, memory_order_relaxed); } rwlock->pending_lock.unlock(); if (futex_result == -ETIMEDOUT) { return ETIMEDOUT; } } }
void vlc_cancel (vlc_thread_t thread_id) { atomic_int *addr; atomic_store(&thread_id->killed, true); vlc_mutex_lock(&thread_id->wait.lock); addr = thread_id->wait.addr; if (addr != NULL) { atomic_fetch_and_explicit(addr, -2, memory_order_relaxed); vlc_addr_broadcast(addr); } vlc_mutex_unlock(&thread_id->wait.lock); }
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) { pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); if (__state_owned_by_writer(old_state)) { if (atomic_load_explicit(&rwlock->writer_tid, memory_order_relaxed) != __get_thread()->tid) { return EPERM; } atomic_store_explicit(&rwlock->writer_tid, 0, memory_order_relaxed); old_state = atomic_fetch_and_explicit(&rwlock->state, ~STATE_OWNED_BY_WRITER_FLAG, memory_order_release); if (!__state_have_pending_readers_or_writers(old_state)) { return 0; } } else if (__state_owned_by_readers(old_state)) { old_state = atomic_fetch_sub_explicit(&rwlock->state, STATE_READER_COUNT_CHANGE_STEP, memory_order_release); if (!__state_is_last_reader(old_state) || !__state_have_pending_readers_or_writers(old_state)) { return 0; } } else { return EPERM; } // Wake up pending readers or writers. rwlock->pending_lock.lock(); if (rwlock->pending_writer_count != 0) { rwlock->pending_writer_wakeup_serial++; rwlock->pending_lock.unlock(); __futex_wake_ex(&rwlock->pending_writer_wakeup_serial, rwlock->pshared, 1); } else if (rwlock->pending_reader_count != 0) { rwlock->pending_reader_wakeup_serial++; rwlock->pending_lock.unlock(); __futex_wake_ex(&rwlock->pending_reader_wakeup_serial, rwlock->pshared, INT_MAX); } else { // It happens when waiters are woken up by timeout. rwlock->pending_lock.unlock(); } return 0; }
TEST(stdatomic, atomic_fetch_and) { atomic_int i = ATOMIC_VAR_INIT(0x123); ASSERT_EQ(0x123, atomic_fetch_and(&i, 0x00f)); ASSERT_EQ(0x003, atomic_fetch_and_explicit(&i, 0x2, memory_order_relaxed)); ASSERT_EQ(0x002, atomic_load(&i)); }