HDINLINE void BitData<TYPE, NUMBITS>::setBitsToNull() { #if !defined(__CUDA_ARCH__) // Host code path *(this->data) &= ~(TO_BITS(NUMBITS) << this->bit); #else atomicAnd(this->data, ~(TO_BITS(NUMBITS) << this->bit)); #endif }
HDINLINE void BitData<TYPE, 1u > ::operator=(const TYPE &rhs) { #if !defined(__CUDA_ARCH__) // Host code path if (rhs) *(this->data) |= (1u << this->bit); else *(this->data) &= (~(1u << this->bit)); #else if (rhs) atomicOr(this->data, 1u << this->bit); else atomicAnd(this->data, ~(1u << this->bit)); #endif }
__global__ void HIP_FUNCTION(testKernel,int *g_odata) { // access thread id const unsigned int tid = hipBlockDim_x * hipBlockIdx_x + hipThreadIdx_x; // Test various atomic instructions // Arithmetic atomic instructions // Atomic addition atomicAdd(&g_odata[0], 10); // Atomic subtraction (final should be 0) atomicSub(&g_odata[1], 10); // Atomic exchange atomicExch(&g_odata[2], tid); // Atomic maximum atomicMax(&g_odata[3], tid); // Atomic minimum atomicMin(&g_odata[4], tid); // Atomic increment (modulo 17+1) //atomicInc((unsigned int *)&g_odata[5], 17); atomicInc((unsigned int *)&g_odata[5]); // Atomic decrement // atomicDec((unsigned int *)&g_odata[6], 137); atomicDec((unsigned int *)&g_odata[6]); // Atomic compare-and-swap atomicCAS(&g_odata[7], tid-1, tid); // Bitwise atomic instructions // Atomic AND atomicAnd(&g_odata[8], 2*tid+7); // Atomic OR atomicOr(&g_odata[9], 1 << tid); // Atomic XOR atomicXor(&g_odata[10], tid); }
__host__ __device__ typename enable_if< sizeof(Integer64) == 8, Integer64 >::type atomic_fetch_and(Integer64 *x, Integer64 y) { #if defined(__CUDA_ARCH__) return atomicAnd(x, y); #elif defined(__GNUC__) return __atomic_fetch_and(x, y, __ATOMIC_SEQ_CST); #elif defined(_MSC_VER) return InterlockedAnd64(x, y); #elif defined(__clang__) return __c11_atomic_fetch_and(x, y) #else #error "No atomic_fetch_and implementation." #endif }
__inline__ __device__ unsigned long long int atomic_fetch_and( volatile unsigned long long int * const dest , const unsigned long long int val ) { return atomicAnd((unsigned long long int*)dest,val); }
__inline__ __device__ int atomic_fetch_and( volatile int * const dest , const int val ) { return atomicAnd((int*)dest,val); }