int libat_compare_exchange_16 (U_16 *mptr, U_16 *eptr, U_16 newval, int smodel, int fmodel __attribute__((unused))) { if (((smodel) == 0)) return __atomic_compare_exchange_n (mptr, eptr, newval, 0, 0, 0); else if (((smodel) != 5)) return __atomic_compare_exchange_n (mptr, eptr, newval, 0, 4, 0); else return __atomic_compare_exchange_n (mptr, eptr, newval, 0, 5, 0); }
void test_atomic_bool (_Atomic _Bool *a) { enum { SEQ_CST = __ATOMIC_SEQ_CST }; __atomic_fetch_add (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_add." } */ __atomic_fetch_sub (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_sub." } */ __atomic_fetch_and (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_and." } */ __atomic_fetch_xor (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_xor." } */ __atomic_fetch_or (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_or." } */ __atomic_fetch_nand (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_nand." } */ __atomic_add_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_add_fetch." } */ __atomic_sub_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_sub_fetch." } */ __atomic_and_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_and_fetch." } */ __atomic_xor_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_xor_fetch." } */ __atomic_or_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_or_fetch." } */ __atomic_nand_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_nand_fetch." } */ /* The following are valid and must be accepted. */ _Bool val = 0, ret = 0; __atomic_exchange (a, &val, &ret, SEQ_CST); __atomic_exchange_n (a, val, SEQ_CST); __atomic_compare_exchange (a, &val, &ret, !1, SEQ_CST, SEQ_CST); __atomic_compare_exchange_n (a, &val, ret, !1, SEQ_CST, SEQ_CST); __atomic_test_and_set (a, SEQ_CST); __atomic_clear (a, SEQ_CST); }
/** * Get a tcb and initialize it with the entry and entry_arg. * * @param entry routine to call to start thread * @param entry_arg is the argument to pass to entry * * Return AC_NULL if an error, i.e. nono available */ STATIC tcb_x86* get_tcb(void*(*entry)(void*), void* entry_arg) { tcb_x86* ptcb; // There must always be at least one ac_threads ac_debug_assert(pthreads != AC_NULL); // Search all of the ac_threads for an empty tcb ac_threads* pcur = pthreads; do { // Find an empty tcb; for (ac_u32 i = 0; i < pcur->max_count; i++) { ptcb = &pcur->tcbs[i]; ac_u32 empty = AC_THREAD_ID_EMPTY; ac_s32* pthread_id = &ptcb->thread_id; ac_bool ok = __atomic_compare_exchange_n(pthread_id, &empty, AC_THREAD_ID_STARTING, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); if (ok) { // Found an empty tcb, initialize and return it tcb_init(ptcb, i, entry, entry_arg); return ptcb; } } pcur = pcur->pnext; } while (pcur != pthreads); // No empty tcbs return AC_NULL; }
/** * Allocate size bytes * * @param: size is the number of bytes in each item * * @return: pointer to the memory */ void* ac_malloc(ac_size_t size) { ac_size_t cur_idx; ac_size_t next_idx; ac_bool ok; // We must return AC_NULL if size is 0 if (size == 0) { return AC_NULL; } // Roundup to next alignment factor size = (size + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1); // Loop until we can allocate or we have no more memory do { cur_idx = __atomic_load_n(&idx, __ATOMIC_ACQUIRE); next_idx = cur_idx + size; if ((next_idx >= MAX_IDX) || (next_idx <= cur_idx)) { return AC_NULL; } ok = __atomic_compare_exchange_n(&idx, &idx, next_idx, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); } while (!ok); return &mem_array[cur_idx]; }
void gomp_mutex_lock_slow (gomp_mutex_t *mutex, int oldval) { /* First loop spins a while. */ while (oldval == 1) { if (do_spin (mutex, 1)) { /* Spin timeout, nothing changed. Set waiting flag. */ oldval = __atomic_exchange_n (mutex, -1, MEMMODEL_ACQUIRE); if (oldval == 0) return; futex_wait (mutex, -1); break; } else { /* Something changed. If now unlocked, we're good to go. */ oldval = 0; if (__atomic_compare_exchange_n (mutex, &oldval, 1, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) return; } } /* Second loop waits until mutex is unlocked. We always exit this loop with wait flag set, so next unlock will awaken a thread. */ while ((oldval = __atomic_exchange_n (mutex, -1, MEMMODEL_ACQUIRE))) do_wait (mutex, -1); }
int main () { E e = ZERO; __atomic_compare_exchange_n (&e, &e, e, true, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); }
__attribute__((__always_inline__)) static inline bool ReallyWaitForConditionVariable(volatile uintptr_t *puControl, _MCFCRT_ConditionVariableUnlockCallback pfnUnlockCallback, _MCFCRT_ConditionVariableRelockCallback pfnRelockCallback, intptr_t nContext, size_t uMaxSpinCountInitial, bool bMayTimeOut, uint64_t u64UntilFastMonoClock, bool bRelockIfTimeOut){ size_t uMaxSpinCount, uSpinMultiplier; bool bSignaled, bSpinnable; { uintptr_t uOld, uNew; uOld = __atomic_load_n(puControl, __ATOMIC_RELAXED); do { const size_t uSpinFailureCount = (uOld & MASK_SPIN_FAILURE_COUNT) / SPIN_FAILURE_COUNT_ONE; if(uMaxSpinCountInitial > MIN_SPIN_COUNT){ uMaxSpinCount = (uMaxSpinCountInitial >> uSpinFailureCount) | MIN_SPIN_COUNT; uSpinMultiplier = MAX_SPIN_MULTIPLIER >> uSpinFailureCount; } else { uMaxSpinCount = uMaxSpinCountInitial; uSpinMultiplier = 0; } bSignaled = (uOld & MASK_THREADS_RELEASED) != 0; bSpinnable = false; if(!bSignaled){ if(uMaxSpinCount != 0){ const size_t uThreadsSpinning = (uOld & MASK_THREADS_SPINNING) / THREADS_SPINNING_ONE; bSpinnable = uThreadsSpinning < THREADS_SPINNING_MAX; } if(!bSpinnable){ break; } uNew = uOld + THREADS_SPINNING_ONE; } else { const bool bSpinFailureCountDecremented = uSpinFailureCount != 0; uNew = uOld - THREADS_RELEASED_ONE - bSpinFailureCountDecremented * SPIN_FAILURE_COUNT_ONE; } } while(_MCFCRT_EXPECT_NOT(!__atomic_compare_exchange_n(puControl, &uOld, uNew, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED))); }
/** * Register a irq handler and its parameter. * * return 0 if OK */ ac_u32 ac_exception_irq_register(int_handler handler, identify_and_clear_source iacs, ac_uptr param) { ac_u32 status = 1; // Currently there is no unregtister so we're just racing // with the interrupt handler itself. The interrupt handler // will be looking at the handler only and if its not AC_NULL // assume is good. Thus we'll update that last when we add // a new entry. for (ac_u32 i = irq_handler_count; i < MAX_HANDLERS; i++) { ac_bool* pavailable = &irq_handlers[i].available; ac_bool expected = AC_TRUE; ac_bool ok = __atomic_compare_exchange_n(pavailable, &expected, AC_FALSE, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); if (ok) { irq_handlers[i].param = param; irq_handlers[i].iacs = iacs; int_handler* phandler = &irq_handlers[i].handler; __atomic_store_n(phandler, handler, __ATOMIC_RELEASE); irq_handler_count += 1; status = 0; break; } } return status; }
void gomp_sem_wait_slow (gomp_sem_t *sem, int count) { /* First loop spins a while. */ while (count == 0) if (do_spin (sem, 0) /* Spin timeout, nothing changed. Set waiting flag. */ && __atomic_compare_exchange_n (sem, &count, SEM_WAIT, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) { futex_wait (sem, SEM_WAIT); count = *sem; break; } /* Something changed. If it wasn't the wait flag, we're good to go. */ else if (__builtin_expect (((count = *sem) & SEM_WAIT) == 0 && count != 0, 1)) { if (__atomic_compare_exchange_n (sem, &count, count - SEM_INC, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) return; } /* Second loop waits until semaphore is posted. We always exit this loop with wait flag set, so next post will awaken a thread. */ while (1) { unsigned int wake = count & ~SEM_WAIT; int newval = SEM_WAIT; if (wake != 0) newval |= wake - SEM_INC; if (__atomic_compare_exchange_n (sem, &count, newval, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) { if (wake != 0) { /* If we can wake more threads, do so now. */ if (wake > SEM_INC) gomp_sem_post_slow (sem); break; } do_wait (sem, SEM_WAIT); count = *sem; } } }
bool btSpinMutex::tryLock() { int expected = 0; bool weak = false; const int memOrderSuccess = __ATOMIC_ACQ_REL; const int memOrderFail = __ATOMIC_ACQUIRE; return __atomic_compare_exchange_n(&mLock, &expected, int(1), weak, memOrderSuccess, memOrderFail); }
int gomp_test_lock_30 (omp_lock_t *lock) { int oldval = 0; return __atomic_compare_exchange_n (lock, &oldval, 1, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED); }
static INLINE void tcache_set(const struct timeval *const tv, struct tm *const tm) { unsigned stale = TCACHE_STALE; if (__atomic_compare_exchange_n(&g_tcache_mode, &stale, TCACHE_FLUID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { g_tcache_tv = *tv; g_tcache_tm = *tm; __atomic_and_fetch(&g_tcache_mode, ~TCACHE_FLUID, __ATOMIC_RELEASE); } }
// Perform atomic 'compare and swap' operation on the pointer. // The pointer is compared to 'cmp' argument and if they are // equal, its value is set to 'val'. Old value of the pointer // is returned. inline T *cas (T *cmp_, T *val_) { #if defined ZMQ_ATOMIC_PTR_WINDOWS return (T*) InterlockedCompareExchangePointer ( (volatile PVOID*) &ptr, val_, cmp_); #elif defined ZMQ_ATOMIC_PTR_INTRINSIC T *old = cmp_; __atomic_compare_exchange_n (&ptr, (volatile T**) &old, val_, false, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); return old; #elif defined ZMQ_ATOMIC_PTR_CXX11 ptr.compare_exchange_strong(cmp_, val_, std::memory_order_acq_rel); return cmp_; #elif defined ZMQ_ATOMIC_PTR_ATOMIC_H return (T*) atomic_cas_ptr (&ptr, cmp_, val_); #elif defined ZMQ_ATOMIC_PTR_TILE return (T*) arch_atomic_val_compare_and_exchange (&ptr, cmp_, val_); #elif defined ZMQ_ATOMIC_PTR_X86 T *old; __asm__ volatile ( "lock; cmpxchg %2, %3" : "=a" (old), "=m" (ptr) : "r" (val_), "m" (ptr), "0" (cmp_) : "cc"); return old; #elif defined ZMQ_ATOMIC_PTR_ARM T *old; unsigned int flag; __asm__ volatile ( " dmb sy\n\t" "1: ldrex %1, [%3]\n\t" " mov %0, #0\n\t" " teq %1, %4\n\t" " it eq\n\t" " strexeq %0, %5, [%3]\n\t" " teq %0, #0\n\t" " bne 1b\n\t" " dmb sy\n\t" : "=&r"(flag), "=&r"(old), "+Qo"(ptr) : "r"(&ptr), "r"(cmp_), "r"(val_) : "cc"); return old; #elif defined ZMQ_ATOMIC_PTR_MUTEX sync.lock (); T *old = (T*) ptr; if (ptr == cmp_) ptr = val_; sync.unlock (); return old; #else #error atomic_ptr is not implemented for this platform #endif }
/** * Handle the one_shot interrupt. * * NOTE: Interrupts are enabled so __atomic operations are used. */ void periodic_handler(ac_uptr param) { irq_param* pirq_param = (irq_param*)param; ac_bool ac_true = AC_TRUE; ac_bool* psource = &pirq_param->source; ac_bool ok = __atomic_compare_exchange_n(psource, &ac_true, AC_FALSE, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); if (ok) { __atomic_add_fetch(&periodic_counter, 1, __ATOMIC_RELEASE); ac_debug_printf("periodic: %d inc counter\n\n", pirq_param->timer); } }
inline bool Atomic<T>::cswap ( const Atomic<T> &oldval, const Atomic<T> &newval ) { #ifdef HAVE_NEW_GCC_ATOMIC_OPS // FIXME: The atomics passed are const T* oldv = const_cast<T*>(&oldval._value); T* newv = const_cast<T*>(&newval._value); return __atomic_compare_exchange_n( &_value, oldv, newv, /* weak */ false, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE ); #else return __sync_bool_compare_and_swap ( &_value, oldval.value(), newval.value() ); #endif }
/** * Handle the periodic interrupt. * * NOTE: Interrupts are enabled so __atomic operations are used. */ static void periodic_handler(ac_uptr param) { irq_param* pirq_param = (irq_param*)param; // Test if pirq_param->source is AC_TRUE which means this did fire ac_bool ac_true = AC_TRUE; ac_bool* psource = &pirq_param->source; ac_bool ok = __atomic_compare_exchange_n(psource, &ac_true, AC_FALSE, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); if (ok) { // Yes, this interupt occurred. ac_debug_printf("periodic: %d handled\n", pirq_param->timer); } }
P_LIB_API pboolean p_atomic_int_compare_and_exchange (volatile pint *atomic, pint oldval, pint newval) { pint tmp_int = oldval; return (pboolean) __atomic_compare_exchange_n (atomic, &tmp_int, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
P_LIB_API pboolean p_atomic_pointer_compare_and_exchange (volatile void *atomic, ppointer oldval, ppointer newval) { ppointer tmp_pointer = oldval; return (pboolean) __atomic_compare_exchange_n ((volatile psize *) atomic, (psize *) &tmp_pointer, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
void acquire(int* lock) { int ll = LOCKED; int ul = UNLOCKED; do { ul = UNLOCKED; _mm_pause(); } while ( __atomic_compare_exchange_n(lock, &ul, ll, 1, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) != 1); }
int MCF_CRT_AtEndModule(void (*pfnProc)(intptr_t), intptr_t nContext){ AtExitNode *const pNode = malloc(sizeof(AtExitNode)); if(!pNode){ return -1; } pNode->pfnProc = pfnProc; pNode->nContext = nContext; pNode->pPrev = __atomic_load_n(&g_pAtExitHead, __ATOMIC_SEQ_CST); while(EXPECT(!__atomic_compare_exchange_n(&g_pAtExitHead, &(pNode->pPrev), pNode, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))){ // 空的。 } return 0; }
/** * Create a thread and invoke the entry passing entry_arg. If * the entry routine returns the thread is considered dead * and will not be rescheduled and its stack is reclamined. * Any other global memory associated with the thread still * exists and is left untouched. * * @param stack_size is 0 a "default" stack size will be used. * @param entry is the routine to run * @param entry_arg is the argument passed to entry. * * @return a ac_thread_rslt contains a status and an opaque ac_thread_hdl_t. * if rslt.status == 0 the thread was created and ac_thread_hdl_t * is valid. */ ac_thread_rslt_t ac_thread_create(ac_size_t stack_size, void*(*entry)(void*), void* entry_arg) { ac_thread_rslt_t rslt; ac_tcb* pthe_tcb = AC_NULL; int error = 0; pthread_attr_t attr; pthread_attr_init(&attr); if (stack_size > 0) { error |= pthread_attr_setstacksize(&attr, (size_t)stack_size); if (error != 0) { goto done; } } // Find an empty slot for (ac_u32 i = 0; i < pthreads->max_count; i++) { pthread_t empty = AC_THREAD_ID_EMPTY; ac_tcb* pcur_tcb = &pthreads->tcbs[i]; pthread_t* pthread_id = &pcur_tcb->thread_id; ac_bool ok = __atomic_compare_exchange_n(pthread_id, &empty, AC_THREAD_ID_NOT_EMPTY, AC_TRUE, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); if (ok) { pcur_tcb->entry = entry; pcur_tcb->entry_arg = entry_arg; error |= pthread_create((pthread_t *)pthread_id, &attr, entry_trampoline, pcur_tcb); ac_assert(*pthread_id != AC_THREAD_ID_EMPTY); ac_assert(*pthread_id != AC_THREAD_ID_NOT_EMPTY); if (error == 0) { pthe_tcb = pcur_tcb; break; } else { // Mark as empty and try again, although probably won't work __atomic_store_n(pthread_id, AC_THREAD_ID_EMPTY, __ATOMIC_RELEASE); } } } pthread_attr_destroy(&attr); done: rslt.hdl = (ac_thread_hdl_t)pthe_tcb; rslt.status = (rslt.hdl != 0) ? 0 : 1; return (ac_thread_rslt_t)rslt; }
int sem_post(sem_t* sem) { while ( true ) { int old_value = __atomic_load_n(&sem->value, __ATOMIC_SEQ_CST); if ( old_value == INT_MAX ) return errno = EOVERFLOW; int new_value = old_value + 1; if ( !__atomic_compare_exchange_n(&sem->value, &old_value, new_value, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED) ) continue; return 0; } }
int gomp_test_nest_lock_25 (omp_nest_lock_25_t *lock) { int otid, tid = gomp_tid (); otid = 0; if (__atomic_compare_exchange_n (&lock->owner, &otid, tid, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) { lock->count = 1; return 1; } if (otid == tid) return ++lock->count; return 0; }
void unlock(struct mcs_spinlock *node) { struct mcs_spinlock *last = node; if (! node->next) { // I'm the last in the queue if (__atomic_compare_exchange_n(&tail, &last, NULL, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED) ) { return; } else { // Another process executed exchange but // didn't asssign our next yet, so wait while (! node->next); } } else { // We force a memory barrier to ensure the critical section was executed before the next __atomic_thread_fence (__ATOMIC_RELEASE); } node->next->locked = 0; }
int main () { ac = __atomic_exchange_n (&bc, cc, __ATOMIC_RELAXED); if (bc != 1) abort (); as = __atomic_load_n (&bs, __ATOMIC_SEQ_CST); if (bs != 1) abort (); __atomic_store_n (&ac, bc, __ATOMIC_RELAXED); if (ac != 1) abort (); __atomic_compare_exchange_n (&as, &bs, cs, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); if (as != 1) abort (); ac = __atomic_fetch_add (&cc, 15, __ATOMIC_SEQ_CST); if (cc != 1) abort (); /* This should be translated to __atomic_fetch_add for the library */ as = __atomic_add_fetch (&cs, 10, __ATOMIC_RELAXED); if (cs != 1) abort (); /* The fake external function should return 10. */ if (__atomic_is_lock_free (4, 0) != 10) abort (); /* PR 51040 was caused by arithmetic code not patching up nand_fetch properly when used an an external function. Look for proper return value here. */ ac = 0x3C; bc = __atomic_nand_fetch (&ac, 0x0f, __ATOMIC_RELAXED); if (bc != ac) abort (); return 0; }
int gomp_test_nest_lock_30 (omp_nest_lock_t *lock) { void *me = gomp_icv (true); int oldval; if (lock->owner == me) return ++lock->count; oldval = 0; if (__atomic_compare_exchange_n (&lock->lock, &oldval, 1, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) { lock->owner = me; lock->count = 1; return 1; } return 0; }
void gomp_set_nest_lock_25 (omp_nest_lock_25_t *lock) { int otid, tid = gomp_tid (); while (1) { otid = 0; if (__atomic_compare_exchange_n (&lock->owner, &otid, tid, false, MEMMODEL_ACQUIRE, MEMMODEL_RELAXED)) { lock->count = 1; return; } if (otid == tid) { lock->count++; return; } do_wait (&lock->owner, otid); } }
bool IpcQueue::_waitHeadFutex() { struct Ops { static void woken(Worklet *worklet) { auto self = frg::container_of(worklet, &IpcQueue::_worklet); auto irq_lock = frigg::guard(&irqMutex()); auto lock = frigg::guard(&self->_mutex); self->_waitInFutex = false; self->_progress(); } }; auto node = _nodeQueue.front(); while(true) { auto futex = __atomic_load_n(&_queueAccessor.get()->headFutex, __ATOMIC_ACQUIRE); do { if(_nextIndex != (futex & kHeadMask)) return false; // TODO: Contract violation errors should be reported to user-space. assert(futex == _nextIndex); } while(!__atomic_compare_exchange_n(&_queueAccessor.get()->headFutex, &futex, _nextIndex | kHeadWaiters, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)); auto fa = reinterpret_cast<Address>(_pointer) + offsetof(QueueStruct, headFutex); _worklet.setup(&Ops::woken); _futex.setup(&_worklet); _waitInFutex = _space->futexSpace.checkSubmitWait(fa, [&] { return __atomic_load_n(&_queueAccessor.get()->headFutex, __ATOMIC_RELAXED) == (_nextIndex | kHeadWaiters); }, &_futex); if(_waitInFutex) return true; } }
int main () { if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED)) abort (); if (expected != 0) abort (); if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) abort (); if (expected != max) abort (); if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE)) abort (); if (expected != max) abort (); if (v != 0) abort (); if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) abort (); if (expected != 0) abort (); if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) abort (); if (expected != 0) abort (); if (v != max) abort (); /* Now test the generic version. */ v = 0; if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) abort (); if (expected != 0) abort (); if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) abort (); if (expected != max) abort (); if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE)) abort (); if (expected != max) abort (); if (v != 0) abort (); if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) abort (); if (expected != 0) abort (); if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) abort (); if (expected != 0) abort (); if (v != max) abort (); return 0; }
template <> bool A<>::foo () { int a; do if (a) return false; while (__atomic_compare_exchange_n (&b, &a, 0, 1, 4, 0)); }