int pthread_spin_unlock (pthread_spinlock_t *lock) { /* CONCURRENCTY NOTES: The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in pthread_spin_lock. On hppa we must not use a plain `stw` to reset the guard lock. This has to do with the kernel compare-and-swap helper that is used to implement all of the atomic operations. The kernel CAS helper uses its own internal locks and that means that to create a true happens-before relationship between any two threads, the second thread must observe the internal lock having a value of 0 (it must attempt to take the lock with ldcw). This creates the ordering required for a second thread to observe the effects of the RMW of the kernel CAS helper in any other thread. Therefore if a variable is used in an atomic macro it must always be manipulated with atomic macros in order for memory ordering rules to be preserved. */ atomic_exchange_rel (lock, 0); return 0; }
int pthread_spin_unlock (pthread_spinlock_t *lock) { /* The LWS-CAS operation on hppa is a synthetic atomic operation that doesn't provide the type of coherency that we need. Therefore we force that coherency by using LWS-CAS again. */ atomic_exchange_rel (lock, 0); return 0; }
int lll_unlock_wake_cb (int *futex) { int val = atomic_exchange_rel (futex, 0); if (__builtin_expect (val > 1, 0)) lll_futex_wake (futex, 1); return 0; }
int pthread_spin_unlock (pthread_spinlock_t *lock) { #ifdef __tilegx__ /* Use exchange() to bypass the write buffer. */ atomic_exchange_rel (lock, 0); #else atomic_full_barrier (); *lock = 0; #endif return 0; }