int main() { // Small constants use immediate field printf("0x%08x\n", __sync_fetch_and_add(&foo, 1)); // CHECK: 0x5a5a5a5a printf("0x%08x\n", __sync_add_and_fetch(&foo, 1)); // CHECK: 0x5a5a5a5c printf("0x%08x\n", __sync_add_and_fetch(&foo, 1)); // CHECK: 0x5a5a5a5d printf("0x%08x\n", __sync_fetch_and_add(&foo, 1)); // CHECK: 0x5a5a5a5d // Large constants require a separate load. printf("0x%08x\n", __sync_add_and_fetch(&foo, 0x10000000)); // CHECK: 0x6a5a5a5e printf("0x%08x\n", __sync_sub_and_fetch(&foo, 0x20000000)); // CHECK: 0x4a5a5a5e printf("0x%08x\n", __sync_and_and_fetch(&foo, 0xf0ffffff)); // CHECK: 0x405a5a5e printf("0x%08x\n", __sync_or_and_fetch(&foo, 0x0f000000)); // CHECK: 0x4f5a5a5e printf("0x%08x\n", __sync_xor_and_fetch(&foo, 0x05000000)); // CHECK: 0x4a5a5a5e // Small constants. These will generate immediate instructions. Test for all forms. printf("0x%08x\n", __sync_sub_and_fetch(&foo, 1)); // CHECK: 0x4a5a5a5d printf("0x%08x\n", __sync_and_and_fetch(&foo, 1)); // CHECK: 0x00000001 printf("0x%08x\n", __sync_or_and_fetch(&foo, 2)); // CHECK: 0x00000003 printf("0x%08x\n", __sync_xor_and_fetch(&foo, 0xffffffff)); // CHECK: 0xfffffffc printf("0x%08x\n", __sync_nand_and_fetch(&foo, 0x5fffffff)); // CHECK: 0xa0000003 // Compare and swap foo = 2; // successful printf("0x%08x\n", __sync_val_compare_and_swap(&foo, 2, 3)); // CHECK: 0x00000002 printf("0x%08x\n", foo); // CHECK: 0x00000003 // not successful printf("0x%08x\n", __sync_val_compare_and_swap(&foo, 2, 4)); // CHECK: 0x00000003 printf("0x%08x\n", foo); // CHECK: 0x00000003 // not successful printf("0x%08x\n", __sync_bool_compare_and_swap(&foo, 2, 10)); // CHECK: 0x00000000 printf("0x%08x\n", foo); // CHECK: 0x00000003 // successful printf("0x%08x\n", __sync_bool_compare_and_swap(&foo, 3, 10)); // CHECK: 0x00000001 printf("0x%08x\n", foo); // CHECK: 0x0000000a // Unlock foo = 1; __sync_lock_release(&foo); printf("foo = %d\n", foo); // CHECK: foo = 0 // Swap foo = 0x12; printf("old value 0x%08x\n", __atomic_exchange_n(&foo, 0x17, __ATOMIC_RELEASE)); // CHECK: 0x00000012 printf("new value 0x%08x\n", foo); // CHECK: new value 0x00000017 return 0; }
abool & abool::operator-=( uint64_t aValue ) { if ((aValue % 2) > 0) { __sync_xor_and_fetch(&_value, 0x01); } return *this; }
abool & abool::operator-=( const aint64_t & aValue ) { if (((int64_t)aValue % 2) > 0) { __sync_xor_and_fetch(&_value, 0x01); } return *this; }
static void do_hi (void) { if (__sync_fetch_and_add(AL+4, 1) != 0) abort (); if (__sync_fetch_and_add(AL+5, 4) != 0) abort (); if (__sync_fetch_and_add(AL+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AL+7, 12) != 0) abort (); if (__sync_fetch_and_and(AL+8, 7) != -1) abort (); if (__sync_fetch_and_or(AL+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AL+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AL+11, 7) != -1) abort (); if (__sync_add_and_fetch(AL+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AL+13, 12) != -12) abort (); if (__sync_and_and_fetch(AL+14, 7) != 7) abort (); if (__sync_or_and_fetch(AL+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AL+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AL+17, 7) != ~7) abort (); }
static void do_qi (void) { if (__sync_fetch_and_add(AI+4, 1) != 0) abort (); if (__sync_fetch_and_add(AI+5, 4) != 0) abort (); if (__sync_fetch_and_add(AI+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AI+7, 12) != 0) abort (); if (__sync_fetch_and_and(AI+8, 7) != (char)-1) abort (); if (__sync_fetch_and_or(AI+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AI+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AI+11, 7) != (char)-1) abort (); if (__sync_add_and_fetch(AI+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AI+13, 12) != (char)-12) abort (); if (__sync_and_and_fetch(AI+14, 7) != 7) abort (); if (__sync_or_and_fetch(AI+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AI+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AI+17, 7) != (char)~7) abort (); }
/** * These are the standard RHS operators for this guy, and are all * handled in a consistent, thread-safe way. The way this works for * a boolean is that each move "flips" the state, so a simple * modulo 2 gives us whether or not to flip this value at all. */ abool & abool::operator+=( bool aValue ) { if (aValue) { __sync_xor_and_fetch(&_value, 0x01); } return *this; }
abool & abool::operator-=( const abool & aValue ) { if ((bool)aValue) { __sync_xor_and_fetch(&_value, 0x01); } return *this; }
void test_op_and_fetch (void) { sc = __sync_add_and_fetch (&sc, uc); uc = __sync_add_and_fetch (&uc, uc); ss = __sync_add_and_fetch (&ss, uc); us = __sync_add_and_fetch (&us, uc); si = __sync_add_and_fetch (&si, uc); ui = __sync_add_and_fetch (&ui, uc); sll = __sync_add_and_fetch (&sll, uc); ull = __sync_add_and_fetch (&ull, uc); sc = __sync_sub_and_fetch (&sc, uc); uc = __sync_sub_and_fetch (&uc, uc); ss = __sync_sub_and_fetch (&ss, uc); us = __sync_sub_and_fetch (&us, uc); si = __sync_sub_and_fetch (&si, uc); ui = __sync_sub_and_fetch (&ui, uc); sll = __sync_sub_and_fetch (&sll, uc); ull = __sync_sub_and_fetch (&ull, uc); sc = __sync_or_and_fetch (&sc, uc); uc = __sync_or_and_fetch (&uc, uc); ss = __sync_or_and_fetch (&ss, uc); us = __sync_or_and_fetch (&us, uc); si = __sync_or_and_fetch (&si, uc); ui = __sync_or_and_fetch (&ui, uc); sll = __sync_or_and_fetch (&sll, uc); ull = __sync_or_and_fetch (&ull, uc); sc = __sync_xor_and_fetch (&sc, uc); uc = __sync_xor_and_fetch (&uc, uc); ss = __sync_xor_and_fetch (&ss, uc); us = __sync_xor_and_fetch (&us, uc); si = __sync_xor_and_fetch (&si, uc); ui = __sync_xor_and_fetch (&ui, uc); sll = __sync_xor_and_fetch (&sll, uc); ull = __sync_xor_and_fetch (&ull, uc); sc = __sync_and_and_fetch (&sc, uc); uc = __sync_and_and_fetch (&uc, uc); ss = __sync_and_and_fetch (&ss, uc); us = __sync_and_and_fetch (&us, uc); si = __sync_and_and_fetch (&si, uc); ui = __sync_and_and_fetch (&ui, uc); sll = __sync_and_and_fetch (&sll, uc); ull = __sync_and_and_fetch (&ull, uc); sc = __sync_nand_and_fetch (&sc, uc); uc = __sync_nand_and_fetch (&uc, uc); ss = __sync_nand_and_fetch (&ss, uc); us = __sync_nand_and_fetch (&us, uc); si = __sync_nand_and_fetch (&si, uc); ui = __sync_nand_and_fetch (&ui, uc); sll = __sync_nand_and_fetch (&sll, uc); ull = __sync_nand_and_fetch (&ull, uc); }
void test_op_and_fetch (void) { sc = __sync_add_and_fetch (&sc, uc); // CHECK: atomicrmw add uc = __sync_add_and_fetch (&uc, uc); // CHECK: atomicrmw add ss = __sync_add_and_fetch (&ss, uc); // CHECK: atomicrmw add us = __sync_add_and_fetch (&us, uc); // CHECK: atomicrmw add si = __sync_add_and_fetch (&si, uc); // CHECK: atomicrmw add ui = __sync_add_and_fetch (&ui, uc); // CHECK: atomicrmw add sll = __sync_add_and_fetch (&sll, uc); // CHECK: atomicrmw add ull = __sync_add_and_fetch (&ull, uc); // CHECK: atomicrmw add sc = __sync_sub_and_fetch (&sc, uc); // CHECK: atomicrmw sub uc = __sync_sub_and_fetch (&uc, uc); // CHECK: atomicrmw sub ss = __sync_sub_and_fetch (&ss, uc); // CHECK: atomicrmw sub us = __sync_sub_and_fetch (&us, uc); // CHECK: atomicrmw sub si = __sync_sub_and_fetch (&si, uc); // CHECK: atomicrmw sub ui = __sync_sub_and_fetch (&ui, uc); // CHECK: atomicrmw sub sll = __sync_sub_and_fetch (&sll, uc); // CHECK: atomicrmw sub ull = __sync_sub_and_fetch (&ull, uc); // CHECK: atomicrmw sub sc = __sync_or_and_fetch (&sc, uc); // CHECK: atomicrmw or uc = __sync_or_and_fetch (&uc, uc); // CHECK: atomicrmw or ss = __sync_or_and_fetch (&ss, uc); // CHECK: atomicrmw or us = __sync_or_and_fetch (&us, uc); // CHECK: atomicrmw or si = __sync_or_and_fetch (&si, uc); // CHECK: atomicrmw or ui = __sync_or_and_fetch (&ui, uc); // CHECK: atomicrmw or sll = __sync_or_and_fetch (&sll, uc); // CHECK: atomicrmw or ull = __sync_or_and_fetch (&ull, uc); // CHECK: atomicrmw or sc = __sync_xor_and_fetch (&sc, uc); // CHECK: atomicrmw xor uc = __sync_xor_and_fetch (&uc, uc); // CHECK: atomicrmw xor ss = __sync_xor_and_fetch (&ss, uc); // CHECK: atomicrmw xor us = __sync_xor_and_fetch (&us, uc); // CHECK: atomicrmw xor si = __sync_xor_and_fetch (&si, uc); // CHECK: atomicrmw xor ui = __sync_xor_and_fetch (&ui, uc); // CHECK: atomicrmw xor sll = __sync_xor_and_fetch (&sll, uc); // CHECK: atomicrmw xor ull = __sync_xor_and_fetch (&ull, uc); // CHECK: atomicrmw xor sc = __sync_and_and_fetch (&sc, uc); // CHECK: atomicrmw and uc = __sync_and_and_fetch (&uc, uc); // CHECK: atomicrmw and ss = __sync_and_and_fetch (&ss, uc); // CHECK: atomicrmw and us = __sync_and_and_fetch (&us, uc); // CHECK: atomicrmw and si = __sync_and_and_fetch (&si, uc); // CHECK: atomicrmw and ui = __sync_and_and_fetch (&ui, uc); // CHECK: atomicrmw and sll = __sync_and_and_fetch (&sll, uc); // CHECK: atomicrmw and ull = __sync_and_and_fetch (&ull, uc); // CHECK: atomicrmw and }
/* Now check return values. */ static void do_ret_di (void) { if (__sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll) != 1) abort (); if (__sync_lock_test_and_set (AL+2, 1) != 0) abort (); __sync_lock_release (AL+3); /* no return value, but keep to match results. */ /* The following tests should not change the value since the original does NOT match. */ if (__sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll) != 0) abort (); if (__sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll) != 0) abort (); if (__sync_fetch_and_add (AL+8, 1) != 0) abort (); if (__sync_fetch_and_add (AL+9, 0xb000e0000000ll) != 0x1000e0de0000ll) abort (); if (__sync_fetch_and_sub (AL+10, 22) != 42) abort (); if (__sync_fetch_and_sub (AL+11, 0xb000e0000000ll) != 0xc001c0de0000ll) abort (); if (__sync_fetch_and_and (AL+12, 0x300000007ll) != -1ll) abort (); if (__sync_fetch_and_or (AL+13, 0x500000009ll) != 0) abort (); if (__sync_fetch_and_xor (AL+14, 0xe00000001ll) != 0xff00ff0000ll) abort (); if (__sync_fetch_and_nand (AL+15, 0xa00000007ll) != -1ll) abort (); /* These should be the same as the fetch_and_* cases except for return value. */ if (__sync_add_and_fetch (AL+16, 1) != 1) abort (); if (__sync_add_and_fetch (AL+17, 0xb000e0000000ll) != 0xc001c0de0000ll) abort (); if (__sync_sub_and_fetch (AL+18, 22) != 20) abort (); if (__sync_sub_and_fetch (AL+19, 0xb000e0000000ll) != 0x1000e0de0000ll) abort (); if (__sync_and_and_fetch (AL+20, 0x300000007ll) != 0x300000007ll) abort (); if (__sync_or_and_fetch (AL+21, 0x500000009ll) != 0x500000009ll) abort (); if (__sync_xor_and_fetch (AL+22, 0xe00000001ll) != 0xf100ff0001ll) abort (); if (__sync_nand_and_fetch (AL+23, 0xa00000007ll) != ~0xa00000007ll) abort (); }
static void do_di (void) { if (__sync_val_compare_and_swap(AL+0, 0, 1) != 0) abort (); if (__sync_val_compare_and_swap(AL+0, 0, 1) != 1) abort (); if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 1) abort (); if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 0) abort (); if (__sync_lock_test_and_set(AL+2, 1) != 0) abort (); if (__sync_fetch_and_add(AL+4, 1) != 0) abort (); if (__sync_fetch_and_add(AL+5, 4) != 0) abort (); if (__sync_fetch_and_add(AL+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AL+7, 12) != 0) abort (); if (__sync_fetch_and_and(AL+8, 7) != -1) abort (); if (__sync_fetch_and_or(AL+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AL+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AL+11, 7) != 0) abort (); if (__sync_add_and_fetch(AL+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AL+13, 12) != -12) abort (); if (__sync_and_and_fetch(AL+14, 7) != 7) abort (); if (__sync_or_and_fetch(AL+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AL+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AL+17, 7) != 7) abort (); }
/* First check they work in terms of what they do to memory. */ static void do_noret_di (void) { __sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll); __sync_lock_test_and_set (AL+2, 1); __sync_lock_release (AL+3); /* The following tests should not change the value since the original does NOT match. */ __sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll); __sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll); __sync_fetch_and_add (AL+8, 1); __sync_fetch_and_add (AL+9, 0xb000e0000000ll); /* + to both halves & carry. */ __sync_fetch_and_sub (AL+10, 22); __sync_fetch_and_sub (AL+11, 0xb000e0000000ll); __sync_fetch_and_and (AL+12, 0x300000007ll); __sync_fetch_and_or (AL+13, 0x500000009ll); __sync_fetch_and_xor (AL+14, 0xe00000001ll); __sync_fetch_and_nand (AL+15, 0xa00000007ll); /* These should be the same as the fetch_and_* cases except for return value. */ __sync_add_and_fetch (AL+16, 1); /* add to both halves & carry. */ __sync_add_and_fetch (AL+17, 0xb000e0000000ll); __sync_sub_and_fetch (AL+18, 22); __sync_sub_and_fetch (AL+19, 0xb000e0000000ll); __sync_and_and_fetch (AL+20, 0x300000007ll); __sync_or_and_fetch (AL+21, 0x500000009ll); __sync_xor_and_fetch (AL+22, 0xe00000001ll); __sync_nand_and_fetch (AL+23, 0xa00000007ll); }
int main() { { T x = HILO(5, 3); T y = __sync_add_and_fetch(&x, DUP(1)); assert(y == HILO(6, 4)); assert(x == HILO(6, 4)); volatile T n = HILO(2, 1); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_add_and_fetch, (void*)&n); for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); printf("n: %llx\n", n); assert(n == HILO(NUM_THREADS*10000ULL+2ULL, NUM_THREADS*10000ULL+1ULL)); } } { T x = HILO(15, 13); T y = __sync_sub_and_fetch(&x, HILO(10, 10)); assert(y == HILO(5, 3)); assert(x == HILO(5, 3)); volatile T n = HILO(NUM_THREADS*10000ULL+5ULL, NUM_THREADS*10000ULL+3ULL); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_sub_and_fetch, (void*)&n); for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); printf("n: %llx\n", n); assert(n == HILO(5,3)); } } { T x = HILO(32768 + 5, 5); T y = __sync_or_and_fetch(&x, HILO(65536 + 9, 9)); assert(y == HILO(32768 + 65536 + 13, 13)); assert(x == HILO(32768 + 65536 + 13, 13)); for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { or_and_fetch_data = HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(1 << i); pthread_create(&thread[i], NULL, thread_or_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(or_and_fetch_data == HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1)); } } } { T x = HILO(32768 + 5, 5); T y = __sync_and_and_fetch(&x, HILO(32768 + 9, 9)); assert(y == HILO(32768 + 1, 1)); assert(x == HILO(32768 + 1, 1)); if (emscripten_has_threading_support()) { for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { and_and_fetch_data = HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1); for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(~(1UL<<i)); pthread_create(&thread[i], NULL, thread_and_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(and_and_fetch_data == HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS)); } } } { T x = HILO(32768 + 5, 5); T y = __sync_xor_and_fetch(&x, HILO(16384 + 9, 9)); assert(y == HILO(32768 + 16384 + 12, 12)); assert(x == HILO(32768 + 16384 + 12, 12)); if (emscripten_has_threading_support()) { for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { xor_and_fetch_data = HILO(32768 + (1<<NUM_THREADS), 1<<NUM_THREADS); for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(~(1UL<<i)); pthread_create(&thread[i], NULL, thread_xor_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(xor_and_fetch_data == HILO(32768 + ((1<<(NUM_THREADS+1))-1), (1<<(NUM_THREADS+1))-1)); } } } // XXX NAND support does not exist in Atomics API. #if 0 { T x = 5; T y = __sync_nand_and_fetch(&x, 9); assert(y == 5); assert(x == -2); const int oddNThreads = NUM_THREADS-1; for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { nand_and_fetch_data = 0; for(int i = 0; i < oddNThreads; ++i) pthread_create(&thread[i], NULL, thread_nand_and_fetch, (void*)-1); for(int i = 0; i < oddNThreads; ++i) pthread_join(thread[i], NULL); assert(nand_and_fetch_data == -1); } } #endif #ifdef REPORT_RESULT REPORT_RESULT(0); #endif }
void *thread_xor_and_fetch(void *arg) { for(int i = 0; i < 9999; ++i) // Odd number of times so that the operation doesn't cancel itself out. __sync_xor_and_fetch((T*)&xor_and_fetch_data, *(T*)arg); pthread_exit(0); }
UInt8 BitXorAtomic8(UInt32 val, UInt8* ptr) { return __sync_xor_and_fetch(ptr, val); }
T xorAndFetch(T value) { return __sync_xor_and_fetch(&_value, value); }
UInt16 BitXorAtomic16(UInt32 val, UInt16* ptr) { return __sync_xor_and_fetch(ptr, val); }
void NANOS_atomic ( int op, int type, void * variable, void * operand ) { if ( ( type == 0 ) && ( op == 1 || op == 2 || op == 5 || op == 6 || op == 7) ) { // variable has integer type and the operation is some kind of the following compound assignments: // plus, minus, and, ior, xor printf("info: 'atomic' construct implemented using atomic builtins.\n"); int tmp = *((int *) operand); switch (op) { case 1: __sync_add_and_fetch((int *) variable, tmp); break; case 2: __sync_sub_and_fetch((int *) variable, tmp); break; case 5: __sync_and_and_fetch((int *) variable, tmp); break; case 6: __sync_or_and_fetch((int *) variable, tmp); break; case 7: __sync_xor_and_fetch((int *) variable, tmp); break; }; } else if ( ( type == 0 ) && ( op == 10 || op == 11) ) { // variable has integer type and the operation is a pre-/post- incr-/decr- ement printf("info: 'atomic' construct implemented using atomic builtins.\n"); if (op == 10) __sync_add_and_fetch((int *) variable, 1); else __sync_sub_and_fetch((int *) variable, 1); } else { // any other case printf("info: 'atomic' construct implemented using compare and exchange.\n"); if (type == 1) { // Float type // float tmp = *((float *) operand); printf("Nanos support for Atomic access to floats is not yet implemented\n"); abort(); } else if (type == 2) { // Double type double tmp = *((double *) operand); double oldval, newval; unsigned int sizeof_var = sizeof(variable); do { oldval = *((double *) variable); switch (op) { case 1: newval = oldval + tmp; break; case 2: newval = oldval - tmp; break; case 3: newval = oldval * tmp; break; case 4: newval = oldval / tmp; break; case 10: newval = oldval + 1; break; case 11: newval = oldval - 1; break; default: printf("Unhandled operation type while generating Nanos code for OpenMP atomic contruct."); abort(); } __sync_synchronize(); } while ( (sizeof_var == 4) ? !__sync_bool_compare_and_swap_4((double *) variable, *(unsigned int *) &oldval, *(unsigned int *) &newval) : (sizeof_var == 8) ? !__sync_bool_compare_and_swap_8((double *) variable, *(unsigned long *) &oldval, *(unsigned long *) &newval) : 0 ); } else { printf("Unhandled variable type while generating Nanos code for OpenMP atomic contruct."); abort( ); } } }
/** * These are the prefix and postfix decrement operators, in that * order, and the first returns a reference to the same instance * while the latter has to receive a copy prior to the decrement. * In the case of the boolean, these simply "flip" the state of the * bool from true->false->true, etc. */ abool & abool::operator--() { __sync_xor_and_fetch(&_value, 0x01); return *this; }
abool abool::operator--(int) { abool pre(*this); __sync_xor_and_fetch(&_value, 0x01); return pre; }