unsigned Atomic::XOR(unsigned value) { #if defined(_OPENTHREADS_ATOMIC_USE_GCC_BUILTINS) return __sync_fetch_and_xor(&_value, value); #elif defined(_OPENTHREADS_ATOMIC_USE_WIN32_INTERLOCKED) return _InterlockedXor(&_value, value); #elif defined(_OPENTHREADS_ATOMIC_USE_BSD_ATOMIC) return OSAtomicXor32((uint32_t)value, (uint32_t *)&_value); #else # error This implementation should happen inline in the include file #endif }
LONG test_InterlockedXor(LONG volatile *value, LONG mask) { return _InterlockedXor(value, mask); }
/* _Atomic_fetch_xor_4 */ _Uint4_t _Fetch_xor_seq_cst_4(volatile _Uint4_t *_Tgt, _Uint4_t _Value) { /* xor _Value with *_Tgt atomically with sequentially consistent memory order */ _Value = _InterlockedXor((volatile long *)_Tgt, _Value); return (_Value); }
long test_InterlockedXor(long volatile *value, long mask) { return _InterlockedXor(value, mask); }
Int32 KInterlockedXor(Int32 volatile* var, Int32 add) { return _InterlockedXor((volatile long*)var, add); }