gsize (g_atomic_pointer_or) (volatile void *atomic, gsize val) { #if GLIB_SIZEOF_VOID_P == 8 return InterlockedOr64 (atomic, val); #else return InterlockedOr (atomic, val); #endif }
static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) { assert(type == __ATOMIC_ACQUIRE); // These return the previous value - but since we do an OR with 0, // it's equivalent to a plain load. #ifdef _WIN64 return InterlockedOr64(ptr, 0); #else return InterlockedOr(ptr, 0); #endif }
__host__ __device__ typename enable_if< sizeof(Integer64) == 8, Integer64 >::type atomic_fetch_xor(Integer64 *x, Integer64 y) { #if defined(__CUDA_ARCH__) return atomicXor(x, y); #elif defined(__GNUC__) return __atomic_fetch_xor(x, y, __ATOMIC_SEQ_CST); #elif defined(_MSC_VER) return InterlockedOr64(x, y); #elif defined(__clang__) return __c11_atomic_fetch_xor(x, y) #else #error "No atomic_fetch_xor implementation." #endif }