void structAtomicLoad() { struct foo f = __c11_atomic_load(&bigAtomic, 5); // expected-error {{atomic load requires runtime support that is not available for this target}} struct bar b; __atomic_load(&smallThing, &b, 5); __atomic_load(&bigThing, &f, 5); }
static inline void atomic_daccum (double *p, const double val) { #if defined(ATOMIC_FP_FE_EMUL) double pv, upd; int done = 0; do { __atomic_load ((int64_t*)p, (int64_t*)&pv, __ATOMIC_ACQUIRE); if (__atomic_compare_exchange ((int64_t*)p, (int64_t*)&pv, (int64_t*)&NAN_EMPTY, 1, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) { upd = pv + val; __atomic_store ((int64_t*)p, (int64_t*)&upd, __ATOMIC_RELEASE); done = 1; } else MM_PAUSE(); } while (!done); #elif defined(ATOMIC_FP_OPTIMISTIC) double pv, upd; __atomic_load ((int64_t*)p, (int64_t*)&pv, __ATOMIC_ACQUIRE); do { upd = pv + val; if (__atomic_compare_exchange ((int64_t*)p, (int64_t*)&pv, (int64_t*)&upd, 1, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) break; else MM_PAUSE(); } while (1); #else OMP(omp atomic) *p += val; #endif }
T unsafe_load() const noexcept { T result; __atomic_load(reinterpret_cast<const uint64_t*>(&mElement), reinterpret_cast<uint64_t*>(&result), std::memory_order_seq_cst); __atomic_load(reinterpret_cast<const uint64_t*>(&mElement) + 1, reinterpret_cast<uint64_t*>(&result) + 1, std::memory_order_seq_cst); return result; }
int main () { v = 0; count = 0; if (__atomic_load_n (&v, __ATOMIC_RELAXED) != count++) abort(); else v++; if (__atomic_load_n (&v, __ATOMIC_ACQUIRE) != count++) abort(); else v++; if (__atomic_load_n (&v, __ATOMIC_CONSUME) != count++) abort(); else v++; if (__atomic_load_n (&v, __ATOMIC_SEQ_CST) != count++) abort(); else v++; /* Now test the generic variants. */ __atomic_load (&v, &count, __ATOMIC_RELAXED); if (count != v) abort(); else v++; __atomic_load (&v, &count, __ATOMIC_ACQUIRE); if (count != v) abort(); else v++; __atomic_load (&v, &count, __ATOMIC_CONSUME); if (count != v) abort(); else v++; __atomic_load (&v, &count, __ATOMIC_SEQ_CST); if (count != v) abort(); else v++; return 0; }
uint64_t usecTimestamp(void) { uint32_t high0; __atomic_load(&usecTimerHighCount, &high0, __ATOMIC_SEQ_CST); uint32_t low = TIM7->CNT; uint32_t high; __atomic_load(&usecTimerHighCount, &high, __ATOMIC_SEQ_CST); // There was no increment in between if (high == high0) { return (((uint64_t)high) << 16) + low; } // There was an increment, but we don't expect another one soon return (((uint64_t)high) << 16) + TIM7->CNT; }
/* Test for consistency on sizes 1, 2, 4, 8, 16 and 32. */ int main () { test_struct c; __atomic_store (&a, &zero, __ATOMIC_RELAXED); if (memcmp (&a, &zero, size)) abort (); __atomic_exchange (&a, &ones, &c, __ATOMIC_SEQ_CST); if (memcmp (&c, &zero, size)) abort (); if (memcmp (&a, &ones, size)) abort (); __atomic_load (&a, &b, __ATOMIC_RELAXED); if (memcmp (&b, &ones, size)) abort (); if (!__atomic_compare_exchange (&a, &b, &zero, false, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)) abort(); if (memcmp (&a, &zero, size)) abort (); if (__atomic_compare_exchange (&a, &b, &ones, false, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)) abort(); if (memcmp (&b, &zero, size)) abort (); return 0; }
void test1(void) { (void)__atomic_load(&c1, &c2, memory_order_seq_cst); (void)__atomic_store(&c1, &c2, memory_order_seq_cst); (void)__atomic_load(&s1, &s2, memory_order_seq_cst); (void)__atomic_store(&s1, &s2, memory_order_seq_cst); (void)__atomic_load(&i1, &i2, memory_order_seq_cst); (void)__atomic_store(&i1, &i2, memory_order_seq_cst); (void)__atomic_load(&ll1, &ll2, memory_order_seq_cst); (void)__atomic_store(&ll1, &ll2, memory_order_seq_cst); (void)__atomic_load(&a1, &a2, memory_order_seq_cst); (void)__atomic_store(&a1, &a2, memory_order_seq_cst); // ARM-LABEL: define{{.*}} void @test1 // ARM: = call{{.*}} zeroext i8 @__atomic_load_1(i8* @c1 // ARM: call{{.*}} void @__atomic_store_1(i8* @c1, i8 zeroext // ARM: = call{{.*}} zeroext i16 @__atomic_load_2(i8* bitcast (i16* @s1 to i8*) // ARM: call{{.*}} void @__atomic_store_2(i8* bitcast (i16* @s1 to i8*), i16 zeroext // ARM: = call{{.*}} i32 @__atomic_load_4(i8* bitcast (i32* @i1 to i8*) // ARM: call{{.*}} void @__atomic_store_4(i8* bitcast (i32* @i1 to i8*), i32 // ARM: = call{{.*}} i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // ARM: call{{.*}} void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 // ARM: call{{.*}} void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // ARM: call{{.*}} void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC32-LABEL: define void @test1 // PPC32: = load atomic i8, i8* @c1 seq_cst // PPC32: store atomic i8 {{.*}}, i8* @c1 seq_cst // PPC32: = load atomic i16, i16* @s1 seq_cst // PPC32: store atomic i16 {{.*}}, i16* @s1 seq_cst // PPC32: = load atomic i32, i32* @i1 seq_cst // PPC32: store atomic i32 {{.*}}, i32* @i1 seq_cst // PPC32: = call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // PPC32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 // PPC32: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC32: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC64-LABEL: define void @test1 // PPC64: = load atomic i8, i8* @c1 seq_cst // PPC64: store atomic i8 {{.*}}, i8* @c1 seq_cst // PPC64: = load atomic i16, i16* @s1 seq_cst // PPC64: store atomic i16 {{.*}}, i16* @s1 seq_cst // PPC64: = load atomic i32, i32* @i1 seq_cst // PPC64: store atomic i32 {{.*}}, i32* @i1 seq_cst // PPC64: = load atomic i64, i64* @ll1 seq_cst // PPC64: store atomic i64 {{.*}}, i64* @ll1 seq_cst // PPC64: call void @__atomic_load(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC64: call void @__atomic_store(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS32-LABEL: define void @test1 // MIPS32: = load atomic i8, i8* @c1 seq_cst // MIPS32: store atomic i8 {{.*}}, i8* @c1 seq_cst // MIPS32: = load atomic i16, i16* @s1 seq_cst // MIPS32: store atomic i16 {{.*}}, i16* @s1 seq_cst // MIPS32: = load atomic i32, i32* @i1 seq_cst // MIPS32: store atomic i32 {{.*}}, i32* @i1 seq_cst // MIPS32: call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // MIPS32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 // MIPS32: call void @__atomic_load(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS32: call void @__atomic_store(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS64-LABEL: define void @test1 // MIPS64: = load atomic i8, i8* @c1 seq_cst // MIPS64: store atomic i8 {{.*}}, i8* @c1 seq_cst // MIPS64: = load atomic i16, i16* @s1 seq_cst // MIPS64: store atomic i16 {{.*}}, i16* @s1 seq_cst // MIPS64: = load atomic i32, i32* @i1 seq_cst // MIPS64: store atomic i32 {{.*}}, i32* @i1 seq_cst // MIPS64: = load atomic i64, i64* @ll1 seq_cst // MIPS64: store atomic i64 {{.*}}, i64* @ll1 seq_cst // MIPS64: call void @__atomic_load(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0) // MIPS64: call void @__atomic_store(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) }
LOGGING_BUFFER_STATUS static inline _readBufferStatus() { LOGGING_BUFFER_STATUS status; __atomic_load(&buffer_status, &status, __ATOMIC_SEQ_CST); return status; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* data; __atomic_load((void* const volatile *)p, &data, __ATOMIC_ACQUIRE); return data; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline julong OrderAccess::load_acquire(volatile julong* p) { julong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
void test_presence(void) { // CHECK-LABEL: @test_presence // CHECK: atomicrmw add i32* {{.*}} seq_cst __atomic_fetch_add(&i, 1, memory_order_seq_cst); // CHECK: atomicrmw sub i32* {{.*}} seq_cst __atomic_fetch_sub(&i, 1, memory_order_seq_cst); // CHECK: load atomic i32, i32* {{.*}} seq_cst int r; __atomic_load(&i, &r, memory_order_seq_cst); // CHECK: store atomic i32 {{.*}} seq_cst r = 0; __atomic_store(&i, &r, memory_order_seq_cst); // CHECK: __atomic_fetch_add_8 __atomic_fetch_add(&l, 1, memory_order_seq_cst); // CHECK: __atomic_fetch_sub_8 __atomic_fetch_sub(&l, 1, memory_order_seq_cst); // CHECK: __atomic_load_8 long long rl; __atomic_load(&l, &rl, memory_order_seq_cst); // CHECK: __atomic_store_8 rl = 0; __atomic_store(&l, &rl, memory_order_seq_cst); }