int main() { // Small constants use immediate field printf("0x%08x\n", __sync_fetch_and_add(&foo, 1)); // CHECK: 0x5a5a5a5a printf("0x%08x\n", __sync_add_and_fetch(&foo, 1)); // CHECK: 0x5a5a5a5c printf("0x%08x\n", __sync_add_and_fetch(&foo, 1)); // CHECK: 0x5a5a5a5d printf("0x%08x\n", __sync_fetch_and_add(&foo, 1)); // CHECK: 0x5a5a5a5d // Large constants require a separate load. printf("0x%08x\n", __sync_add_and_fetch(&foo, 0x10000000)); // CHECK: 0x6a5a5a5e printf("0x%08x\n", __sync_sub_and_fetch(&foo, 0x20000000)); // CHECK: 0x4a5a5a5e printf("0x%08x\n", __sync_and_and_fetch(&foo, 0xf0ffffff)); // CHECK: 0x405a5a5e printf("0x%08x\n", __sync_or_and_fetch(&foo, 0x0f000000)); // CHECK: 0x4f5a5a5e printf("0x%08x\n", __sync_xor_and_fetch(&foo, 0x05000000)); // CHECK: 0x4a5a5a5e // Small constants. These will generate immediate instructions. Test for all forms. printf("0x%08x\n", __sync_sub_and_fetch(&foo, 1)); // CHECK: 0x4a5a5a5d printf("0x%08x\n", __sync_and_and_fetch(&foo, 1)); // CHECK: 0x00000001 printf("0x%08x\n", __sync_or_and_fetch(&foo, 2)); // CHECK: 0x00000003 printf("0x%08x\n", __sync_xor_and_fetch(&foo, 0xffffffff)); // CHECK: 0xfffffffc printf("0x%08x\n", __sync_nand_and_fetch(&foo, 0x5fffffff)); // CHECK: 0xa0000003 // Compare and swap foo = 2; // successful printf("0x%08x\n", __sync_val_compare_and_swap(&foo, 2, 3)); // CHECK: 0x00000002 printf("0x%08x\n", foo); // CHECK: 0x00000003 // not successful printf("0x%08x\n", __sync_val_compare_and_swap(&foo, 2, 4)); // CHECK: 0x00000003 printf("0x%08x\n", foo); // CHECK: 0x00000003 // not successful printf("0x%08x\n", __sync_bool_compare_and_swap(&foo, 2, 10)); // CHECK: 0x00000000 printf("0x%08x\n", foo); // CHECK: 0x00000003 // successful printf("0x%08x\n", __sync_bool_compare_and_swap(&foo, 3, 10)); // CHECK: 0x00000001 printf("0x%08x\n", foo); // CHECK: 0x0000000a // Unlock foo = 1; __sync_lock_release(&foo); printf("foo = %d\n", foo); // CHECK: foo = 0 // Swap foo = 0x12; printf("old value 0x%08x\n", __atomic_exchange_n(&foo, 0x17, __ATOMIC_RELEASE)); // CHECK: 0x00000012 printf("new value 0x%08x\n", foo); // CHECK: new value 0x00000017 return 0; }
/** * This routine stops the given counter and, if needed, waits for the count to * drop to zero before returning. */ bool waitable_counter_wait(waitable_counter_t *wc, int timeout) { unsigned cnt; struct timespec ts; struct timespec *pts; int ret; if (timeout < 0) { pts = NULL; } else { pts = &ts; ts.tv_sec = timeout / 1000; ts.tv_nsec = (timeout % 1000) * 1000000; } cnt = __sync_or_and_fetch(&wc->cnt, WAITABLE_COUNTER_STOPPED); while (cnt != WAITABLE_COUNTER_STOPPED) { ret = syscall(SYS_futex, &wc->cnt, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, cnt, pts, NULL, 0); if (ret == -1 && errno == ETIMEDOUT) { return false; } cnt = *(unsigned volatile *)&wc->cnt; } return true; }
/* ------------------------------------------------------------------------- */ void thread_pool_resume(struct thread_pool_t* pool) { pthread_attr_t attr; int i; /* set pool to active, so threads know to not exit */ if(__sync_fetch_and_add(&pool->active, 0)) /* already running */ return; __sync_or_and_fetch(&pool->active, 1); /* set to active */ /* for portability, explicitly create threads in a joinable state */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); /* launch worker threads */ for(i = 0; i != pool->num_threads; ++i) { pthread_create(&(pool->worker[i].thread), &attr, (void*(*)(void*))thread_pool_worker, &pool->worker[i]); } /* clean up */ pthread_attr_destroy(&attr); }
static void do_hi (void) { if (__sync_fetch_and_add(AL+4, 1) != 0) abort (); if (__sync_fetch_and_add(AL+5, 4) != 0) abort (); if (__sync_fetch_and_add(AL+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AL+7, 12) != 0) abort (); if (__sync_fetch_and_and(AL+8, 7) != -1) abort (); if (__sync_fetch_and_or(AL+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AL+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AL+11, 7) != -1) abort (); if (__sync_add_and_fetch(AL+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AL+13, 12) != -12) abort (); if (__sync_and_and_fetch(AL+14, 7) != 7) abort (); if (__sync_or_and_fetch(AL+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AL+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AL+17, 7) != ~7) abort (); }
static void do_qi (void) { if (__sync_fetch_and_add(AI+4, 1) != 0) abort (); if (__sync_fetch_and_add(AI+5, 4) != 0) abort (); if (__sync_fetch_and_add(AI+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AI+7, 12) != 0) abort (); if (__sync_fetch_and_and(AI+8, 7) != (char)-1) abort (); if (__sync_fetch_and_or(AI+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AI+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AI+11, 7) != (char)-1) abort (); if (__sync_add_and_fetch(AI+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AI+13, 12) != (char)-12) abort (); if (__sync_and_and_fetch(AI+14, 7) != 7) abort (); if (__sync_or_and_fetch(AI+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AI+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AI+17, 7) != (char)~7) abort (); }
/** * This method allows a more deliberate setting of the value by * the caller. This is typically not used, but it can be in those * times when the explicit method call is cleaner to use. */ void abool::setValue( bool aValue ) { if (aValue) { __sync_or_and_fetch(&_value, 0x01); } else { __sync_and_and_fetch(&_value, 0x00); } }
void test_op_and_fetch (void) { sc = __sync_add_and_fetch (&sc, uc); uc = __sync_add_and_fetch (&uc, uc); ss = __sync_add_and_fetch (&ss, uc); us = __sync_add_and_fetch (&us, uc); si = __sync_add_and_fetch (&si, uc); ui = __sync_add_and_fetch (&ui, uc); sll = __sync_add_and_fetch (&sll, uc); ull = __sync_add_and_fetch (&ull, uc); sc = __sync_sub_and_fetch (&sc, uc); uc = __sync_sub_and_fetch (&uc, uc); ss = __sync_sub_and_fetch (&ss, uc); us = __sync_sub_and_fetch (&us, uc); si = __sync_sub_and_fetch (&si, uc); ui = __sync_sub_and_fetch (&ui, uc); sll = __sync_sub_and_fetch (&sll, uc); ull = __sync_sub_and_fetch (&ull, uc); sc = __sync_or_and_fetch (&sc, uc); uc = __sync_or_and_fetch (&uc, uc); ss = __sync_or_and_fetch (&ss, uc); us = __sync_or_and_fetch (&us, uc); si = __sync_or_and_fetch (&si, uc); ui = __sync_or_and_fetch (&ui, uc); sll = __sync_or_and_fetch (&sll, uc); ull = __sync_or_and_fetch (&ull, uc); sc = __sync_xor_and_fetch (&sc, uc); uc = __sync_xor_and_fetch (&uc, uc); ss = __sync_xor_and_fetch (&ss, uc); us = __sync_xor_and_fetch (&us, uc); si = __sync_xor_and_fetch (&si, uc); ui = __sync_xor_and_fetch (&ui, uc); sll = __sync_xor_and_fetch (&sll, uc); ull = __sync_xor_and_fetch (&ull, uc); sc = __sync_and_and_fetch (&sc, uc); uc = __sync_and_and_fetch (&uc, uc); ss = __sync_and_and_fetch (&ss, uc); us = __sync_and_and_fetch (&us, uc); si = __sync_and_and_fetch (&si, uc); ui = __sync_and_and_fetch (&ui, uc); sll = __sync_and_and_fetch (&sll, uc); ull = __sync_and_and_fetch (&ull, uc); sc = __sync_nand_and_fetch (&sc, uc); uc = __sync_nand_and_fetch (&uc, uc); ss = __sync_nand_and_fetch (&ss, uc); us = __sync_nand_and_fetch (&us, uc); si = __sync_nand_and_fetch (&si, uc); ui = __sync_nand_and_fetch (&ui, uc); sll = __sync_nand_and_fetch (&sll, uc); ull = __sync_nand_and_fetch (&ull, uc); }
CAMLprim value stub_atomic_or_fetch_uint8(value buf, value idx, value val) { CAMLparam3(buf, idx, val); // Finding the address of buf+idx uint8_t c_val = (uint8_t)Int_val(val); uint8_t *ptr = Caml_ba_data_val(buf) + Int_val(idx); if (Int_val(idx) >= Caml_ba_array_val(buf)->dim[0]) caml_invalid_argument("idx"); CAMLreturn(Val_int((uint8_t)__sync_or_and_fetch(ptr, c_val))); }
void test_op_and_fetch (void) { sc = __sync_add_and_fetch (&sc, uc); // CHECK: atomicrmw add uc = __sync_add_and_fetch (&uc, uc); // CHECK: atomicrmw add ss = __sync_add_and_fetch (&ss, uc); // CHECK: atomicrmw add us = __sync_add_and_fetch (&us, uc); // CHECK: atomicrmw add si = __sync_add_and_fetch (&si, uc); // CHECK: atomicrmw add ui = __sync_add_and_fetch (&ui, uc); // CHECK: atomicrmw add sll = __sync_add_and_fetch (&sll, uc); // CHECK: atomicrmw add ull = __sync_add_and_fetch (&ull, uc); // CHECK: atomicrmw add sc = __sync_sub_and_fetch (&sc, uc); // CHECK: atomicrmw sub uc = __sync_sub_and_fetch (&uc, uc); // CHECK: atomicrmw sub ss = __sync_sub_and_fetch (&ss, uc); // CHECK: atomicrmw sub us = __sync_sub_and_fetch (&us, uc); // CHECK: atomicrmw sub si = __sync_sub_and_fetch (&si, uc); // CHECK: atomicrmw sub ui = __sync_sub_and_fetch (&ui, uc); // CHECK: atomicrmw sub sll = __sync_sub_and_fetch (&sll, uc); // CHECK: atomicrmw sub ull = __sync_sub_and_fetch (&ull, uc); // CHECK: atomicrmw sub sc = __sync_or_and_fetch (&sc, uc); // CHECK: atomicrmw or uc = __sync_or_and_fetch (&uc, uc); // CHECK: atomicrmw or ss = __sync_or_and_fetch (&ss, uc); // CHECK: atomicrmw or us = __sync_or_and_fetch (&us, uc); // CHECK: atomicrmw or si = __sync_or_and_fetch (&si, uc); // CHECK: atomicrmw or ui = __sync_or_and_fetch (&ui, uc); // CHECK: atomicrmw or sll = __sync_or_and_fetch (&sll, uc); // CHECK: atomicrmw or ull = __sync_or_and_fetch (&ull, uc); // CHECK: atomicrmw or sc = __sync_xor_and_fetch (&sc, uc); // CHECK: atomicrmw xor uc = __sync_xor_and_fetch (&uc, uc); // CHECK: atomicrmw xor ss = __sync_xor_and_fetch (&ss, uc); // CHECK: atomicrmw xor us = __sync_xor_and_fetch (&us, uc); // CHECK: atomicrmw xor si = __sync_xor_and_fetch (&si, uc); // CHECK: atomicrmw xor ui = __sync_xor_and_fetch (&ui, uc); // CHECK: atomicrmw xor sll = __sync_xor_and_fetch (&sll, uc); // CHECK: atomicrmw xor ull = __sync_xor_and_fetch (&ull, uc); // CHECK: atomicrmw xor sc = __sync_and_and_fetch (&sc, uc); // CHECK: atomicrmw and uc = __sync_and_and_fetch (&uc, uc); // CHECK: atomicrmw and ss = __sync_and_and_fetch (&ss, uc); // CHECK: atomicrmw and us = __sync_and_and_fetch (&us, uc); // CHECK: atomicrmw and si = __sync_and_and_fetch (&si, uc); // CHECK: atomicrmw and ui = __sync_and_and_fetch (&ui, uc); // CHECK: atomicrmw and sll = __sync_and_and_fetch (&sll, uc); // CHECK: atomicrmw and ull = __sync_and_and_fetch (&ull, uc); // CHECK: atomicrmw and }
int cobalt_event_post(cobalt_event_t *event, unsigned long bits) { struct cobalt_event_data *datp = get_event_data(event); if (bits == 0) return 0; __sync_or_and_fetch(&datp->value, bits); /* full barrier. */ if ((datp->flags & COBALT_EVENT_PENDED) == 0) return 0; return XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_event_sync, event); }
inline operator int() const throw() { #ifdef __GNUC__ return __sync_or_and_fetch(const_cast <volatile int *>(&_v),0); #else #ifdef __WINDOWS__ return (int)_v; #else _l.lock(); int v = _v; _l.unlock(); return v; #endif #endif }
/* Now check return values. */ static void do_ret_di (void) { if (__sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll) != 1) abort (); if (__sync_lock_test_and_set (AL+2, 1) != 0) abort (); __sync_lock_release (AL+3); /* no return value, but keep to match results. */ /* The following tests should not change the value since the original does NOT match. */ if (__sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll) != 0) abort (); if (__sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll) != 0) abort (); if (__sync_fetch_and_add (AL+8, 1) != 0) abort (); if (__sync_fetch_and_add (AL+9, 0xb000e0000000ll) != 0x1000e0de0000ll) abort (); if (__sync_fetch_and_sub (AL+10, 22) != 42) abort (); if (__sync_fetch_and_sub (AL+11, 0xb000e0000000ll) != 0xc001c0de0000ll) abort (); if (__sync_fetch_and_and (AL+12, 0x300000007ll) != -1ll) abort (); if (__sync_fetch_and_or (AL+13, 0x500000009ll) != 0) abort (); if (__sync_fetch_and_xor (AL+14, 0xe00000001ll) != 0xff00ff0000ll) abort (); if (__sync_fetch_and_nand (AL+15, 0xa00000007ll) != -1ll) abort (); /* These should be the same as the fetch_and_* cases except for return value. */ if (__sync_add_and_fetch (AL+16, 1) != 1) abort (); if (__sync_add_and_fetch (AL+17, 0xb000e0000000ll) != 0xc001c0de0000ll) abort (); if (__sync_sub_and_fetch (AL+18, 22) != 20) abort (); if (__sync_sub_and_fetch (AL+19, 0xb000e0000000ll) != 0x1000e0de0000ll) abort (); if (__sync_and_and_fetch (AL+20, 0x300000007ll) != 0x300000007ll) abort (); if (__sync_or_and_fetch (AL+21, 0x500000009ll) != 0x500000009ll) abort (); if (__sync_xor_and_fetch (AL+22, 0xe00000001ll) != 0xf100ff0001ll) abort (); if (__sync_nand_and_fetch (AL+23, 0xa00000007ll) != ~0xa00000007ll) abort (); }
static void do_di (void) { if (__sync_val_compare_and_swap(AL+0, 0, 1) != 0) abort (); if (__sync_val_compare_and_swap(AL+0, 0, 1) != 1) abort (); if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 1) abort (); if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 0) abort (); if (__sync_lock_test_and_set(AL+2, 1) != 0) abort (); if (__sync_fetch_and_add(AL+4, 1) != 0) abort (); if (__sync_fetch_and_add(AL+5, 4) != 0) abort (); if (__sync_fetch_and_add(AL+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AL+7, 12) != 0) abort (); if (__sync_fetch_and_and(AL+8, 7) != -1) abort (); if (__sync_fetch_and_or(AL+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AL+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AL+11, 7) != 0) abort (); if (__sync_add_and_fetch(AL+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AL+13, 12) != -12) abort (); if (__sync_and_and_fetch(AL+14, 7) != 7) abort (); if (__sync_or_and_fetch(AL+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AL+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AL+17, 7) != 7) abort (); }
/* Return nonzero if the timeout has occurred. */ int timeout_passed(const int timeout) { if (timeout >= 0 && timeout < TIMEOUTS) { const int state = __sync_or_and_fetch(&timeout_state[timeout], 0); /* Refers to an unused timeout? */ if (!(state & TIMEOUT_USED)) return -1; /* Not armed? */ if (!(state & TIMEOUT_ARMED)) return -1; /* Return 1 if timeout passed, 0 otherwise. */ return (state & TIMEOUT_PASSED) ? 1 : 0; } else { /* Invalid timeout number. */ return -1; } }
/* First check they work in terms of what they do to memory. */ static void do_noret_di (void) { __sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll); __sync_lock_test_and_set (AL+2, 1); __sync_lock_release (AL+3); /* The following tests should not change the value since the original does NOT match. */ __sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll); __sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll); __sync_fetch_and_add (AL+8, 1); __sync_fetch_and_add (AL+9, 0xb000e0000000ll); /* + to both halves & carry. */ __sync_fetch_and_sub (AL+10, 22); __sync_fetch_and_sub (AL+11, 0xb000e0000000ll); __sync_fetch_and_and (AL+12, 0x300000007ll); __sync_fetch_and_or (AL+13, 0x500000009ll); __sync_fetch_and_xor (AL+14, 0xe00000001ll); __sync_fetch_and_nand (AL+15, 0xa00000007ll); /* These should be the same as the fetch_and_* cases except for return value. */ __sync_add_and_fetch (AL+16, 1); /* add to both halves & carry. */ __sync_add_and_fetch (AL+17, 0xb000e0000000ll); __sync_sub_and_fetch (AL+18, 22); __sync_sub_and_fetch (AL+19, 0xb000e0000000ll); __sync_and_and_fetch (AL+20, 0x300000007ll); __sync_or_and_fetch (AL+21, 0x500000009ll); __sync_xor_and_fetch (AL+22, 0xe00000001ll); __sync_nand_and_fetch (AL+23, 0xa00000007ll); }
static inline void request_notify(struct libxenvchan *ctrl, uint8_t bit) { uint8_t *notify = ctrl->is_server ? &ctrl->ring->cli_notify : &ctrl->ring->srv_notify; __sync_or_and_fetch(notify, bit); xen_mb(); /* post the request /before/ caller re-reads any indexes */ }
int audio_thread(void *param) { SND_EVENT evnt; int buffsize; int samples; int err; char *errstr; int active; if((err = CreateBuffer(snd_format|PCM_RING,0, &hBuff)) != 0) { errstr = "Cannot create sound buffer\n\r"; goto exit_whith_error; }; SetVolume(hBuff,-900,-900); if((err = GetBufferSize(hBuff, &buffsize)) != 0) { errstr = "Cannot get buffer size\n\r"; goto exit_whith_error; }; __sync_or_and_fetch(&threads_running,AUDIO_THREAD); resampler_size = buffsize = buffsize/2; samples = buffsize/4; while( player_state != CLOSED) { uint32_t offset; double event_stamp, wait_stamp; int too_late = 0; switch(sound_state) { case PREPARE: mutex_lock(&astream.lock); if(astream.count < buffsize*2) { memset(astream.buffer+astream.count, 0, buffsize*2-astream.count); astream.count = buffsize*2; }; SetBuffer(hBuff, astream.buffer, 0, buffsize*2); astream.count -= buffsize*2; if(astream.count) memcpy(astream.buffer, astream.buffer+buffsize*2, astream.count); mutex_unlock(&astream.lock); SetTimeBase(hBuff, audio_base); case PAUSE_2_PLAY: GetTimeStamp(hBuff, &last_time_stamp); // printf("last audio time stamp %f\n", last_time_stamp); if((err = PlayBuffer(hBuff, 0)) !=0 ) { errstr = "Cannot play buffer\n\r"; goto exit_whith_error; }; active = 1; sync_audio(hBuff, buffsize); sound_state = PLAY; // printf("render: set audio latency to %f\n", audio_delta); /* breaktrough */ case PLAY: GetNotify(&evnt); if(evnt.code != 0xFF000001) { printf("invalid event code %d\n\r", evnt.code); continue; } if(evnt.stream != hBuff) { printf("invalid stream %x hBuff= %x\n\r", evnt.stream, hBuff); continue; }; offset = evnt.offset; mutex_lock(&astream.lock); if(astream.count < buffsize) { memset(astream.buffer+astream.count, 0, buffsize-astream.count); astream.count = buffsize; }; SetBuffer(hBuff, astream.buffer, offset, buffsize); { double val = 0; int16_t *src = (int16_t*)astream.buffer; int samples = buffsize/2; int i; for(i = 0, val = 0; i < samples/2; i++, src++) if(val < abs(*src)) val= abs(*src); // * *src; sound_level_0 = val; //sqrt(val / (samples/2)); for(i = 0, val = 0; i < samples/2; i++, src++) if(val < abs(*src)) val= abs(*src); // * *src; sound_level_1 = val; //sqrt(val / (samples/2)); // printf("%d\n", sound_level); }; samples_written+= buffsize/4; astream.count -= buffsize; if(astream.count) memcpy(astream.buffer, astream.buffer+buffsize, astream.count); mutex_unlock(&astream.lock); break; case PLAY_2_STOP: if( active ) { ResetBuffer(hBuff, SND_RESET_ALL); audio_base = -1.0; active = 0; } sound_state = STOP; break; case PLAY_2_PAUSE: if( active ) { StopBuffer(hBuff); }; sound_state = PAUSE; case PAUSE: case STOP: delay(1); }; } __sync_and_and_fetch(&threads_running,~AUDIO_THREAD); StopBuffer(hBuff); DestroyBuffer(hBuff); return 0; exit_whith_error: printf(errstr); return -1; };
void *thread_or_and_fetch(void *arg) { for(int i = 0; i < 10000; ++i) __sync_or_and_fetch((T*)&or_and_fetch_data, *(T*)arg); pthread_exit(0); }
int main() { { T x = HILO(5, 3); T y = __sync_add_and_fetch(&x, DUP(1)); assert(y == HILO(6, 4)); assert(x == HILO(6, 4)); volatile T n = HILO(2, 1); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_add_and_fetch, (void*)&n); for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); printf("n: %llx\n", n); assert(n == HILO(NUM_THREADS*10000ULL+2ULL, NUM_THREADS*10000ULL+1ULL)); } } { T x = HILO(15, 13); T y = __sync_sub_and_fetch(&x, HILO(10, 10)); assert(y == HILO(5, 3)); assert(x == HILO(5, 3)); volatile T n = HILO(NUM_THREADS*10000ULL+5ULL, NUM_THREADS*10000ULL+3ULL); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_sub_and_fetch, (void*)&n); for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); printf("n: %llx\n", n); assert(n == HILO(5,3)); } } { T x = HILO(32768 + 5, 5); T y = __sync_or_and_fetch(&x, HILO(65536 + 9, 9)); assert(y == HILO(32768 + 65536 + 13, 13)); assert(x == HILO(32768 + 65536 + 13, 13)); for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { or_and_fetch_data = HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(1 << i); pthread_create(&thread[i], NULL, thread_or_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(or_and_fetch_data == HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1)); } } } { T x = HILO(32768 + 5, 5); T y = __sync_and_and_fetch(&x, HILO(32768 + 9, 9)); assert(y == HILO(32768 + 1, 1)); assert(x == HILO(32768 + 1, 1)); if (emscripten_has_threading_support()) { for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { and_and_fetch_data = HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1); for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(~(1UL<<i)); pthread_create(&thread[i], NULL, thread_and_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(and_and_fetch_data == HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS)); } } } { T x = HILO(32768 + 5, 5); T y = __sync_xor_and_fetch(&x, HILO(16384 + 9, 9)); assert(y == HILO(32768 + 16384 + 12, 12)); assert(x == HILO(32768 + 16384 + 12, 12)); if (emscripten_has_threading_support()) { for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { xor_and_fetch_data = HILO(32768 + (1<<NUM_THREADS), 1<<NUM_THREADS); for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(~(1UL<<i)); pthread_create(&thread[i], NULL, thread_xor_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(xor_and_fetch_data == HILO(32768 + ((1<<(NUM_THREADS+1))-1), (1<<(NUM_THREADS+1))-1)); } } } // XXX NAND support does not exist in Atomics API. #if 0 { T x = 5; T y = __sync_nand_and_fetch(&x, 9); assert(y == 5); assert(x == -2); const int oddNThreads = NUM_THREADS-1; for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { nand_and_fetch_data = 0; for(int i = 0; i < oddNThreads; ++i) pthread_create(&thread[i], NULL, thread_nand_and_fetch, (void*)-1); for(int i = 0; i < oddNThreads; ++i) pthread_join(thread[i], NULL); assert(nand_and_fetch_data == -1); } } #endif #ifdef REPORT_RESULT REPORT_RESULT(0); #endif }
/** * This method returns the current value at the time of the * call. Since this can change at any time, this is really just * a snapshot of the value, and should not be considered a long- * term condition. */ uint8_t auint8_t::getValue() const { return __sync_or_and_fetch(const_cast<volatile uint8_t *>(&_value), 0x00); }
/** * This casting operator takes the atomic bool and maps it's * value into a simple bool for all those times when you really * need that bool. This is again a snapshot as the value can change * immediately upon return, but it's as good as you'll get. */ abool::operator bool() const { return (__sync_or_and_fetch(const_cast<volatile uint8_t *>(&_value), 0x00) == 1); }
int timeout_set(const double seconds) { struct timespec now, then; struct itimerspec when; double next; int timeout, i; /* Timeout must be in the future. */ if (seconds <= 0.0) return -1; /* Get current time, */ if (clock_gettime(CLOCK_REALTIME, &now)) return -1; /* and calculate when the timeout should fire. */ then = now; timespec_add(&then, seconds); /* Find an unused timeout. */ for (timeout = 0; timeout < TIMEOUTS; timeout++) if (!(__sync_fetch_and_or(&timeout_state[timeout], TIMEOUT_USED) & TIMEOUT_USED)) break; /* No unused timeouts? */ if (timeout >= TIMEOUTS) return -1; /* Clear all but TIMEOUT_USED from the state, */ __sync_and_and_fetch(&timeout_state[timeout], TIMEOUT_USED); /* update the timeout details, */ timeout_time[timeout] = then; /* and mark the timeout armable. */ __sync_or_and_fetch(&timeout_state[timeout], TIMEOUT_ARMED); /* How long till the next timeout? */ next = seconds; for (i = 0; i < TIMEOUTS; i++) if ((__sync_fetch_and_or(&timeout_state[i], 0) & (TIMEOUT_USED | TIMEOUT_ARMED | TIMEOUT_PASSED)) == (TIMEOUT_USED | TIMEOUT_ARMED)) { const double secs = timespec_diff(timeout_time[i], now); if (secs >= 0.0 && secs < next) next = secs; } /* Calculate duration when to fire the timeout next, */ timespec_set(&when.it_value, next); when.it_interval.tv_sec = 0; when.it_interval.tv_nsec = 0L; /* and arm the timer. */ if (timer_settime(timeout_timer, 0, &when, NULL)) { /* Failed. */ __sync_and_and_fetch(&timeout_state[timeout], 0); return -1; } /* Return the timeout number. */ return timeout; }
NOMIPS16 int f1 (int *z) { return __sync_or_and_fetch (z, 42); }
T orAndFetch(T value) { return __sync_or_and_fetch(&_value, value); }
uint8_t x1__atomic_set_uint8_t_bits(uint8_t *p, uint8_t v) { return __sync_or_and_fetch(p, v); }
static inline void vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask) { /* Use GCC builtin which atomic does atomic OR operation */ __sync_or_and_fetch(addr, mask); }
UInt16 BitOrAtomic16(UInt32 val, UInt16* ptr) { return __sync_or_and_fetch(ptr, val); }
int ami_has_timed_out(audio_module_runner *amr) { return __sync_or_and_fetch(&amr->timed_out, 0); }
static inline void btsOnPromoteFlag(struct node** p) { void** ptr = (void**) p; __sync_or_and_fetch(ptr,1); return; }
UInt8 BitOrAtomic8(UInt32 val, UInt8* ptr) { return __sync_or_and_fetch(ptr, val); }