main () { v = 0; count = 0; __atomic_store_n (&v, count + 1, __ATOMIC_RELAXED); if (v != ++count) abort (); __atomic_store_n (&v, count + 1, __ATOMIC_RELEASE); if (v != ++count) abort (); __atomic_store_n (&v, count + 1, __ATOMIC_SEQ_CST); if (v != ++count) abort (); /* Now test the generic variant. */ count++; __atomic_store (&v, &count, __ATOMIC_RELAXED); if (v != count++) abort (); __atomic_store (&v, &count, __ATOMIC_RELEASE); if (v != count++) abort (); __atomic_store (&v, &count, __ATOMIC_SEQ_CST); if (v != count) abort (); return 0; }
void structAtomicStore() { struct foo f = {0}; __c11_atomic_store(&bigAtomic, f, 5); // expected-error {{atomic store requires runtime support that is not available for this target}} struct bar b = {0}; __atomic_store(&smallThing, &b, 5); __atomic_store(&bigThing, &f, 5); }
/* Test for consistency on sizes 1, 2, 4, 8, 16 and 32. */ int main () { test_struct c; __atomic_store (&a, &zero, __ATOMIC_RELAXED); if (memcmp (&a, &zero, size)) abort (); __atomic_exchange (&a, &ones, &c, __ATOMIC_SEQ_CST); if (memcmp (&c, &zero, size)) abort (); if (memcmp (&a, &ones, size)) abort (); __atomic_load (&a, &b, __ATOMIC_RELAXED); if (memcmp (&b, &ones, size)) abort (); if (!__atomic_compare_exchange (&a, &b, &zero, false, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)) abort(); if (memcmp (&a, &zero, size)) abort (); if (__atomic_compare_exchange (&a, &b, &ones, false, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)) abort(); if (memcmp (&b, &zero, size)) abort (); return 0; }
static inline void atomic_daccum (double *p, const double val) { #if defined(ATOMIC_FP_FE_EMUL) double pv, upd; int done = 0; do { __atomic_load ((int64_t*)p, (int64_t*)&pv, __ATOMIC_ACQUIRE); if (__atomic_compare_exchange ((int64_t*)p, (int64_t*)&pv, (int64_t*)&NAN_EMPTY, 1, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) { upd = pv + val; __atomic_store ((int64_t*)p, (int64_t*)&upd, __ATOMIC_RELEASE); done = 1; } else MM_PAUSE(); } while (!done); #elif defined(ATOMIC_FP_OPTIMISTIC) double pv, upd; __atomic_load ((int64_t*)p, (int64_t*)&pv, __ATOMIC_ACQUIRE); do { upd = pv + val; if (__atomic_compare_exchange ((int64_t*)p, (int64_t*)&pv, (int64_t*)&upd, 1, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) break; else MM_PAUSE(); } while (1); #else OMP(omp atomic) *p += val; #endif }
int addValueForNextLogEntry(FIELD_NAME name, FIELD_TYPE field) { // Must be atomic __atomic_store(&buffer[name], &field, __ATOMIC_SEQ_CST); // Must be atomic _changeBufferStatus(FIELDS_BUFFER_DIRTY); return 0; }
void atomic_fifo_put(struct atomic_fifo *af, void *item) { struct atomic_fifo *af_item; af_item = malloc(sizeof(struct atomic_fifo)); af_item->data = item; af_item->next = NULL; __atomic_store(&af->next, &af_item, __ATOMIC_SEQ_CST); }
int __dumpLog(FILE* stream, uint64_t current_timestamp_micro) { if(!stream) { return -1; } // Must be atomic if(_readBufferStatus() != FIELDS_BUFFER_DIRTY) { return 0; } // Must be atomic _changeBufferStatus(FIELDS_BUFFER_CLEAN); // Make our copy for(size_t i=0; i < sizeof_array(temp_buffer); i++) { // Must be atomic __atomic_store(&temp_buffer[i], &buffer[i], __ATOMIC_SEQ_CST); } // Use 19 char because this is the biggest field that can be stored in uint64_t, +1 for the comma char timestamp_buffer[19+1]; int string_length = snprintf(timestamp_buffer, sizeof(timestamp_buffer), "%lu,", current_timestamp_micro); fwrite(timestamp_buffer, 1, string_length, stream); // Start from the second item because we handle timestamp appart for(size_t i=1; i < sizeof_array(temp_buffer); i++) { char field_buffer[FIELDS_PRECISION_INTEGRAL+1+FIELDS_PRECISION_DECIMAL+1]; // digits + the dot + null character int string_length = snprintf(field_buffer, sizeof(field_buffer), "%" FIELDS_PRECISION_INTEGRAL_STRING "." FIELDS_PRECISION_DECIMAL_STRING "f", (double)temp_buffer[i]); fwrite(field_buffer, 1, string_length, stream); // Print separator only if not last field on the line if(i+1 != sizeof_array(temp_buffer)) { fwrite(SEPARATOR_STRING, 1, sizeof(SEPARATOR_STRING), stream); } } fwrite("\n", 1, sizeof("\n"), stream); fflush(stream); return 0; }
inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
void foo (char *s) { __atomic_store (s, (void *) 0, __ATOMIC_SEQ_CST); /* { dg-error "size mismatch" } */ }
inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { __atomic_store(p, &v, __ATOMIC_RELEASE); }
void static inline _changeBufferStatus(LOGGING_BUFFER_STATUS status) { __atomic_store(&buffer_status, &status, __ATOMIC_SEQ_CST); }
inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { __atomic_store(p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { __atomic_store(p, &v, __ATOMIC_RELEASE); }
void unsafe_store(T element) noexcept { __atomic_store(reinterpret_cast<uint64_t*>(&mElement), reinterpret_cast<uint64_t*>(&element), std::memory_order_seq_cst); __atomic_store(reinterpret_cast<uint64_t*>(&mElement) + 1, reinterpret_cast<uint64_t*>(&element) + 1, std::memory_order_seq_cst); }
inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { __atomic_store((void* volatile *)p, &v, __ATOMIC_RELEASE); }
inline void OrderAccess::store_fence(jdouble* p, jdouble v) { __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(jfloat* p, jfloat v) { __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
inline void OrderAccess::store_fence(julong* p, julong v) { __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
void test1(void) { (void)__atomic_load(&c1, &c2, memory_order_seq_cst); (void)__atomic_store(&c1, &c2, memory_order_seq_cst); (void)__atomic_load(&s1, &s2, memory_order_seq_cst); (void)__atomic_store(&s1, &s2, memory_order_seq_cst); (void)__atomic_load(&i1, &i2, memory_order_seq_cst); (void)__atomic_store(&i1, &i2, memory_order_seq_cst); (void)__atomic_load(&ll1, &ll2, memory_order_seq_cst); (void)__atomic_store(&ll1, &ll2, memory_order_seq_cst); (void)__atomic_load(&a1, &a2, memory_order_seq_cst); (void)__atomic_store(&a1, &a2, memory_order_seq_cst); // ARM-LABEL: define{{.*}} void @test1 // ARM: = call{{.*}} zeroext i8 @__atomic_load_1(i8* @c1 // ARM: call{{.*}} void @__atomic_store_1(i8* @c1, i8 zeroext // ARM: = call{{.*}} zeroext i16 @__atomic_load_2(i8* bitcast (i16* @s1 to i8*) // ARM: call{{.*}} void @__atomic_store_2(i8* bitcast (i16* @s1 to i8*), i16 zeroext // ARM: = call{{.*}} i32 @__atomic_load_4(i8* bitcast (i32* @i1 to i8*) // ARM: call{{.*}} void @__atomic_store_4(i8* bitcast (i32* @i1 to i8*), i32 // ARM: = call{{.*}} i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // ARM: call{{.*}} void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 // ARM: call{{.*}} void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // ARM: call{{.*}} void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC32-LABEL: define void @test1 // PPC32: = load atomic i8, i8* @c1 seq_cst // PPC32: store atomic i8 {{.*}}, i8* @c1 seq_cst // PPC32: = load atomic i16, i16* @s1 seq_cst // PPC32: store atomic i16 {{.*}}, i16* @s1 seq_cst // PPC32: = load atomic i32, i32* @i1 seq_cst // PPC32: store atomic i32 {{.*}}, i32* @i1 seq_cst // PPC32: = call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // PPC32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 // PPC32: call void @__atomic_load(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC32: call void @__atomic_store(i32 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC64-LABEL: define void @test1 // PPC64: = load atomic i8, i8* @c1 seq_cst // PPC64: store atomic i8 {{.*}}, i8* @c1 seq_cst // PPC64: = load atomic i16, i16* @s1 seq_cst // PPC64: store atomic i16 {{.*}}, i16* @s1 seq_cst // PPC64: = load atomic i32, i32* @i1 seq_cst // PPC64: store atomic i32 {{.*}}, i32* @i1 seq_cst // PPC64: = load atomic i64, i64* @ll1 seq_cst // PPC64: store atomic i64 {{.*}}, i64* @ll1 seq_cst // PPC64: call void @__atomic_load(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // PPC64: call void @__atomic_store(i64 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS32-LABEL: define void @test1 // MIPS32: = load atomic i8, i8* @c1 seq_cst // MIPS32: store atomic i8 {{.*}}, i8* @c1 seq_cst // MIPS32: = load atomic i16, i16* @s1 seq_cst // MIPS32: store atomic i16 {{.*}}, i16* @s1 seq_cst // MIPS32: = load atomic i32, i32* @i1 seq_cst // MIPS32: store atomic i32 {{.*}}, i32* @i1 seq_cst // MIPS32: call i64 @__atomic_load_8(i8* bitcast (i64* @ll1 to i8*) // MIPS32: call void @__atomic_store_8(i8* bitcast (i64* @ll1 to i8*), i64 // MIPS32: call void @__atomic_load(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS32: call void @__atomic_store(i32 signext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) // MIPS64-LABEL: define void @test1 // MIPS64: = load atomic i8, i8* @c1 seq_cst // MIPS64: store atomic i8 {{.*}}, i8* @c1 seq_cst // MIPS64: = load atomic i16, i16* @s1 seq_cst // MIPS64: store atomic i16 {{.*}}, i16* @s1 seq_cst // MIPS64: = load atomic i32, i32* @i1 seq_cst // MIPS64: store atomic i32 {{.*}}, i32* @i1 seq_cst // MIPS64: = load atomic i64, i64* @ll1 seq_cst // MIPS64: store atomic i64 {{.*}}, i64* @ll1 seq_cst // MIPS64: call void @__atomic_load(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0) // MIPS64: call void @__atomic_store(i64 zeroext 100, i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a1, i32 0, i32 0), i8* getelementptr inbounds ([100 x i8], [100 x i8]* @a2, i32 0, i32 0) }
inline void OrderAccess::store_ptr_fence(void** p, void* v) { __atomic_store(p, &v, __ATOMIC_RELAXED); fence(); }
void test_presence(void) { // CHECK-LABEL: @test_presence // CHECK: atomicrmw add i32* {{.*}} seq_cst __atomic_fetch_add(&i, 1, memory_order_seq_cst); // CHECK: atomicrmw sub i32* {{.*}} seq_cst __atomic_fetch_sub(&i, 1, memory_order_seq_cst); // CHECK: load atomic i32, i32* {{.*}} seq_cst int r; __atomic_load(&i, &r, memory_order_seq_cst); // CHECK: store atomic i32 {{.*}} seq_cst r = 0; __atomic_store(&i, &r, memory_order_seq_cst); // CHECK: __atomic_fetch_add_8 __atomic_fetch_add(&l, 1, memory_order_seq_cst); // CHECK: __atomic_fetch_sub_8 __atomic_fetch_sub(&l, 1, memory_order_seq_cst); // CHECK: __atomic_load_8 long long rl; __atomic_load(&l, &rl, memory_order_seq_cst); // CHECK: __atomic_store_8 rl = 0; __atomic_store(&l, &rl, memory_order_seq_cst); }