/* Now check return values. */ static void do_ret_di (void) { if (__sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll) != 1) abort (); if (__sync_lock_test_and_set (AL+2, 1) != 0) abort (); __sync_lock_release (AL+3); /* no return value, but keep to match results. */ /* The following tests should not change the value since the original does NOT match. */ if (__sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll) != 0x100000002ll) abort (); if (__sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll) != 0) abort (); if (__sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll) != 0) abort (); if (__sync_fetch_and_add (AL+8, 1) != 0) abort (); if (__sync_fetch_and_add (AL+9, 0xb000e0000000ll) != 0x1000e0de0000ll) abort (); if (__sync_fetch_and_sub (AL+10, 22) != 42) abort (); if (__sync_fetch_and_sub (AL+11, 0xb000e0000000ll) != 0xc001c0de0000ll) abort (); if (__sync_fetch_and_and (AL+12, 0x300000007ll) != -1ll) abort (); if (__sync_fetch_and_or (AL+13, 0x500000009ll) != 0) abort (); if (__sync_fetch_and_xor (AL+14, 0xe00000001ll) != 0xff00ff0000ll) abort (); if (__sync_fetch_and_nand (AL+15, 0xa00000007ll) != -1ll) abort (); /* These should be the same as the fetch_and_* cases except for return value. */ if (__sync_add_and_fetch (AL+16, 1) != 1) abort (); if (__sync_add_and_fetch (AL+17, 0xb000e0000000ll) != 0xc001c0de0000ll) abort (); if (__sync_sub_and_fetch (AL+18, 22) != 20) abort (); if (__sync_sub_and_fetch (AL+19, 0xb000e0000000ll) != 0x1000e0de0000ll) abort (); if (__sync_and_and_fetch (AL+20, 0x300000007ll) != 0x300000007ll) abort (); if (__sync_or_and_fetch (AL+21, 0x500000009ll) != 0x500000009ll) abort (); if (__sync_xor_and_fetch (AL+22, 0xe00000001ll) != 0xf100ff0001ll) abort (); if (__sync_nand_and_fetch (AL+23, 0xa00000007ll) != ~0xa00000007ll) abort (); }
static void do_di (void) { if (__sync_val_compare_and_swap(AL+0, 0, 1) != 0) abort (); if (__sync_val_compare_and_swap(AL+0, 0, 1) != 1) abort (); if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 1) abort (); if (__sync_bool_compare_and_swap(AL+1, 0, 1) != 0) abort (); if (__sync_lock_test_and_set(AL+2, 1) != 0) abort (); if (__sync_fetch_and_add(AL+4, 1) != 0) abort (); if (__sync_fetch_and_add(AL+5, 4) != 0) abort (); if (__sync_fetch_and_add(AL+6, 22) != 0) abort (); if (__sync_fetch_and_sub(AL+7, 12) != 0) abort (); if (__sync_fetch_and_and(AL+8, 7) != -1) abort (); if (__sync_fetch_and_or(AL+9, 8) != 0) abort (); if (__sync_fetch_and_xor(AL+10, 9) != 0) abort (); if (__sync_fetch_and_nand(AL+11, 7) != 0) abort (); if (__sync_add_and_fetch(AL+12, 1) != 1) abort (); if (__sync_sub_and_fetch(AL+13, 12) != -12) abort (); if (__sync_and_and_fetch(AL+14, 7) != 7) abort (); if (__sync_or_and_fetch(AL+15, 8) != 8) abort (); if (__sync_xor_and_fetch(AL+16, 9) != 9) abort (); if (__sync_nand_and_fetch(AL+17, 7) != 7) abort (); }
static int reserve_id(struct socket_server *ss) { int i; for (i=0;i<MAX_SOCKET;i++) { int id = __sync_add_and_fetch(&(ss->alloc_id), 1); if (id < 0) { id = __sync_and_and_fetch(&(ss->alloc_id), 0x7fffffff); } struct socket *s = &ss->slot[id % MAX_SOCKET]; if (s->type == SOCKET_TYPE_INVALID) { if (__sync_bool_compare_and_swap(&s->type, SOCKET_TYPE_INVALID, SOCKET_TYPE_RESERVE)) { return id; } else { // retry --i; } } } return -1; }
int socket_server::reserve_id() { for (int i = 0; i < MAX_SOCKET; i++) { int id = __sync_add_and_fetch(&alloc_id, 1); if (id < 0) { id = __sync_and_and_fetch(&alloc_id, 0x7fffffff); } struct socket *s = &slot[HASH_ID(id)]; if (s->type == SOCKET_TYPE_INVALID) { if (__sync_bool_compare_and_swap(&s->type, SOCKET_TYPE_INVALID, SOCKET_TYPE_RESERVE)) { s->id = id; s->fd = -1; return id; } else { --i; } } } return -1; }
/* First check they work in terms of what they do to memory. */ static void do_noret_di (void) { __sync_val_compare_and_swap (AL+0, 0x100000002ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+1, 0x200000003ll, 0x1234567890ll); __sync_lock_test_and_set (AL+2, 1); __sync_lock_release (AL+3); /* The following tests should not change the value since the original does NOT match. */ __sync_val_compare_and_swap (AL+4, 0x000000002ll, 0x1234567890ll); __sync_val_compare_and_swap (AL+5, 0x100000000ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+6, 0x000000002ll, 0x1234567890ll); __sync_bool_compare_and_swap (AL+7, 0x100000000ll, 0x1234567890ll); __sync_fetch_and_add (AL+8, 1); __sync_fetch_and_add (AL+9, 0xb000e0000000ll); /* + to both halves & carry. */ __sync_fetch_and_sub (AL+10, 22); __sync_fetch_and_sub (AL+11, 0xb000e0000000ll); __sync_fetch_and_and (AL+12, 0x300000007ll); __sync_fetch_and_or (AL+13, 0x500000009ll); __sync_fetch_and_xor (AL+14, 0xe00000001ll); __sync_fetch_and_nand (AL+15, 0xa00000007ll); /* These should be the same as the fetch_and_* cases except for return value. */ __sync_add_and_fetch (AL+16, 1); /* add to both halves & carry. */ __sync_add_and_fetch (AL+17, 0xb000e0000000ll); __sync_sub_and_fetch (AL+18, 22); __sync_sub_and_fetch (AL+19, 0xb000e0000000ll); __sync_and_and_fetch (AL+20, 0x300000007ll); __sync_or_and_fetch (AL+21, 0x500000009ll); __sync_xor_and_fetch (AL+22, 0xe00000001ll); __sync_nand_and_fetch (AL+23, 0xa00000007ll); }
// 从socket池中获取一个空的socket 并为其分配一个id 2^31-1 // 在socket池中的位置 池的大小是64K socket_id的范围远大与64K static int reserve_id(struct socket_server *ss) { int i; for (i=0;i<MAX_SOCKET;i++) { int id = __sync_add_and_fetch(&(ss->alloc_id), 1); // 原子的++ // 小于0 已经最大了 说明 从0开始 if (id < 0) { id = __sync_and_and_fetch(&(ss->alloc_id), 0x7fffffff); } struct socket *s = &ss->slot[id % MAX_SOCKET];// 从socket池中取出socket if (s->type == SOCKET_TYPE_INVALID) { // 如果相等就交换成 SOCKET_TYPE_RESERVE 设置为已用 // 这里由于没有加锁 可能多个线程操作 所以还需要判断一次 if (__sync_bool_compare_and_swap(&s->type, SOCKET_TYPE_INVALID, SOCKET_TYPE_RESERVE)) { return id; } else { --i;// retry } } } return -1; }
void test_op_and_fetch (void) { sc = __sync_add_and_fetch (&sc, uc); uc = __sync_add_and_fetch (&uc, uc); ss = __sync_add_and_fetch (&ss, uc); us = __sync_add_and_fetch (&us, uc); si = __sync_add_and_fetch (&si, uc); ui = __sync_add_and_fetch (&ui, uc); sc = __sync_sub_and_fetch (&sc, uc); uc = __sync_sub_and_fetch (&uc, uc); ss = __sync_sub_and_fetch (&ss, uc); us = __sync_sub_and_fetch (&us, uc); si = __sync_sub_and_fetch (&si, uc); ui = __sync_sub_and_fetch (&ui, uc); sc = __sync_or_and_fetch (&sc, uc); uc = __sync_or_and_fetch (&uc, uc); ss = __sync_or_and_fetch (&ss, uc); us = __sync_or_and_fetch (&us, uc); si = __sync_or_and_fetch (&si, uc); ui = __sync_or_and_fetch (&ui, uc); sc = __sync_xor_and_fetch (&sc, uc); uc = __sync_xor_and_fetch (&uc, uc); ss = __sync_xor_and_fetch (&ss, uc); us = __sync_xor_and_fetch (&us, uc); si = __sync_xor_and_fetch (&si, uc); ui = __sync_xor_and_fetch (&ui, uc); sc = __sync_and_and_fetch (&sc, uc); uc = __sync_and_and_fetch (&uc, uc); ss = __sync_and_and_fetch (&ss, uc); us = __sync_and_and_fetch (&us, uc); si = __sync_and_and_fetch (&si, uc); ui = __sync_and_and_fetch (&ui, uc); sc = __sync_nand_and_fetch (&sc, uc); uc = __sync_nand_and_fetch (&uc, uc); ss = __sync_nand_and_fetch (&ss, uc); us = __sync_nand_and_fetch (&us, uc); si = __sync_nand_and_fetch (&si, uc); ui = __sync_nand_and_fetch (&ui, uc); }
void *thread_and_and_fetch(void *arg) { for(int i = 0; i < 10000; ++i) __sync_and_and_fetch((T*)&and_and_fetch_data, *(T*)arg); pthread_exit(0); }
static void watchdog_reset(void) { __sync_and_and_fetch(&watchdog_ctx.ticks, 0); }
void jobmanager::threadDone(int tid) { uint64_t newmask = __sync_and_and_fetch(&_runningMask, ~(1 << tid)); if (newmask == 0) sem_post(&_sem); }
T andAndFetch(T value) { return __sync_and_and_fetch(&_value, value); }
int timeout_set(const double seconds) { struct timespec now, then; struct itimerspec when; double next; int timeout, i; /* Timeout must be in the future. */ if (seconds <= 0.0) return -1; /* Get current time, */ if (clock_gettime(CLOCK_REALTIME, &now)) return -1; /* and calculate when the timeout should fire. */ then = now; timespec_add(&then, seconds); /* Find an unused timeout. */ for (timeout = 0; timeout < TIMEOUTS; timeout++) if (!(__sync_fetch_and_or(&timeout_state[timeout], TIMEOUT_USED) & TIMEOUT_USED)) break; /* No unused timeouts? */ if (timeout >= TIMEOUTS) return -1; /* Clear all but TIMEOUT_USED from the state, */ __sync_and_and_fetch(&timeout_state[timeout], TIMEOUT_USED); /* update the timeout details, */ timeout_time[timeout] = then; /* and mark the timeout armable. */ __sync_or_and_fetch(&timeout_state[timeout], TIMEOUT_ARMED); /* How long till the next timeout? */ next = seconds; for (i = 0; i < TIMEOUTS; i++) if ((__sync_fetch_and_or(&timeout_state[i], 0) & (TIMEOUT_USED | TIMEOUT_ARMED | TIMEOUT_PASSED)) == (TIMEOUT_USED | TIMEOUT_ARMED)) { const double secs = timespec_diff(timeout_time[i], now); if (secs >= 0.0 && secs < next) next = secs; } /* Calculate duration when to fire the timeout next, */ timespec_set(&when.it_value, next); when.it_interval.tv_sec = 0; when.it_interval.tv_nsec = 0L; /* and arm the timer. */ if (timer_settime(timeout_timer, 0, &when, NULL)) { /* Failed. */ __sync_and_and_fetch(&timeout_state[timeout], 0); return -1; } /* Return the timeout number. */ return timeout; }
UInt16 BitAndAtomic16(UInt32 val, UInt16* ptr) { return __sync_and_and_fetch(ptr, val); }
UInt8 BitAndAtomic8(UInt32 val, UInt8* ptr) { return __sync_and_and_fetch(ptr, val); }
void reset() { push(__sync_and_and_fetch(&data->v, 0)); }
int main() { { T x = HILO(5, 3); T y = __sync_add_and_fetch(&x, DUP(1)); assert(y == HILO(6, 4)); assert(x == HILO(6, 4)); volatile T n = HILO(2, 1); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_add_and_fetch, (void*)&n); for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); printf("n: %llx\n", n); assert(n == HILO(NUM_THREADS*10000ULL+2ULL, NUM_THREADS*10000ULL+1ULL)); } } { T x = HILO(15, 13); T y = __sync_sub_and_fetch(&x, HILO(10, 10)); assert(y == HILO(5, 3)); assert(x == HILO(5, 3)); volatile T n = HILO(NUM_THREADS*10000ULL+5ULL, NUM_THREADS*10000ULL+3ULL); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) pthread_create(&thread[i], NULL, thread_sub_and_fetch, (void*)&n); for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); printf("n: %llx\n", n); assert(n == HILO(5,3)); } } { T x = HILO(32768 + 5, 5); T y = __sync_or_and_fetch(&x, HILO(65536 + 9, 9)); assert(y == HILO(32768 + 65536 + 13, 13)); assert(x == HILO(32768 + 65536 + 13, 13)); for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { or_and_fetch_data = HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS); if (emscripten_has_threading_support()) { for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(1 << i); pthread_create(&thread[i], NULL, thread_or_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(or_and_fetch_data == HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1)); } } } { T x = HILO(32768 + 5, 5); T y = __sync_and_and_fetch(&x, HILO(32768 + 9, 9)); assert(y == HILO(32768 + 1, 1)); assert(x == HILO(32768 + 1, 1)); if (emscripten_has_threading_support()) { for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { and_and_fetch_data = HILO(65536 + (1<<(NUM_THREADS+1))-1, (1<<(NUM_THREADS+1))-1); for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(~(1UL<<i)); pthread_create(&thread[i], NULL, thread_and_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(and_and_fetch_data == HILO(65536 + (1<<NUM_THREADS), 1<<NUM_THREADS)); } } } { T x = HILO(32768 + 5, 5); T y = __sync_xor_and_fetch(&x, HILO(16384 + 9, 9)); assert(y == HILO(32768 + 16384 + 12, 12)); assert(x == HILO(32768 + 16384 + 12, 12)); if (emscripten_has_threading_support()) { for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { xor_and_fetch_data = HILO(32768 + (1<<NUM_THREADS), 1<<NUM_THREADS); for(int i = 0; i < NUM_THREADS; ++i) { threadArg[i] = DUP(~(1UL<<i)); pthread_create(&thread[i], NULL, thread_xor_and_fetch, (void*)&threadArg[i]); } for(int i = 0; i < NUM_THREADS; ++i) pthread_join(thread[i], NULL); assert(xor_and_fetch_data == HILO(32768 + ((1<<(NUM_THREADS+1))-1), (1<<(NUM_THREADS+1))-1)); } } } // XXX NAND support does not exist in Atomics API. #if 0 { T x = 5; T y = __sync_nand_and_fetch(&x, 9); assert(y == 5); assert(x == -2); const int oddNThreads = NUM_THREADS-1; for(int x = 0; x < 100; ++x) // Test a few times for robustness, since this test is so short-lived. { nand_and_fetch_data = 0; for(int i = 0; i < oddNThreads; ++i) pthread_create(&thread[i], NULL, thread_nand_and_fetch, (void*)-1); for(int i = 0; i < oddNThreads; ++i) pthread_join(thread[i], NULL); assert(nand_and_fetch_data == -1); } } #endif #ifdef REPORT_RESULT REPORT_RESULT(0); #endif }
int audio_thread(void *param) { SND_EVENT evnt; int buffsize; int samples; int err; char *errstr; int active; if((err = CreateBuffer(snd_format|PCM_RING,0, &hBuff)) != 0) { errstr = "Cannot create sound buffer\n\r"; goto exit_whith_error; }; SetVolume(hBuff,-900,-900); if((err = GetBufferSize(hBuff, &buffsize)) != 0) { errstr = "Cannot get buffer size\n\r"; goto exit_whith_error; }; __sync_or_and_fetch(&threads_running,AUDIO_THREAD); resampler_size = buffsize = buffsize/2; samples = buffsize/4; while( player_state != CLOSED) { uint32_t offset; double event_stamp, wait_stamp; int too_late = 0; switch(sound_state) { case PREPARE: mutex_lock(&astream.lock); if(astream.count < buffsize*2) { memset(astream.buffer+astream.count, 0, buffsize*2-astream.count); astream.count = buffsize*2; }; SetBuffer(hBuff, astream.buffer, 0, buffsize*2); astream.count -= buffsize*2; if(astream.count) memcpy(astream.buffer, astream.buffer+buffsize*2, astream.count); mutex_unlock(&astream.lock); SetTimeBase(hBuff, audio_base); case PAUSE_2_PLAY: GetTimeStamp(hBuff, &last_time_stamp); // printf("last audio time stamp %f\n", last_time_stamp); if((err = PlayBuffer(hBuff, 0)) !=0 ) { errstr = "Cannot play buffer\n\r"; goto exit_whith_error; }; active = 1; sync_audio(hBuff, buffsize); sound_state = PLAY; // printf("render: set audio latency to %f\n", audio_delta); /* breaktrough */ case PLAY: GetNotify(&evnt); if(evnt.code != 0xFF000001) { printf("invalid event code %d\n\r", evnt.code); continue; } if(evnt.stream != hBuff) { printf("invalid stream %x hBuff= %x\n\r", evnt.stream, hBuff); continue; }; offset = evnt.offset; mutex_lock(&astream.lock); if(astream.count < buffsize) { memset(astream.buffer+astream.count, 0, buffsize-astream.count); astream.count = buffsize; }; SetBuffer(hBuff, astream.buffer, offset, buffsize); { double val = 0; int16_t *src = (int16_t*)astream.buffer; int samples = buffsize/2; int i; for(i = 0, val = 0; i < samples/2; i++, src++) if(val < abs(*src)) val= abs(*src); // * *src; sound_level_0 = val; //sqrt(val / (samples/2)); for(i = 0, val = 0; i < samples/2; i++, src++) if(val < abs(*src)) val= abs(*src); // * *src; sound_level_1 = val; //sqrt(val / (samples/2)); // printf("%d\n", sound_level); }; samples_written+= buffsize/4; astream.count -= buffsize; if(astream.count) memcpy(astream.buffer, astream.buffer+buffsize, astream.count); mutex_unlock(&astream.lock); break; case PLAY_2_STOP: if( active ) { ResetBuffer(hBuff, SND_RESET_ALL); audio_base = -1.0; active = 0; } sound_state = STOP; break; case PLAY_2_PAUSE: if( active ) { StopBuffer(hBuff); }; sound_state = PAUSE; case PAUSE: case STOP: delay(1); }; } __sync_and_and_fetch(&threads_running,~AUDIO_THREAD); StopBuffer(hBuff); DestroyBuffer(hBuff); return 0; exit_whith_error: printf(errstr); return -1; };
void TASunlock(){ //TASstate = 0; __sync_and_and_fetch(&TASlockt, 0); // "set(false)" }
uint8_t x1__atomic_clear_uint8_t_bits(uint8_t *p, uint8_t v) { return __sync_and_and_fetch(p, ~v); }
void NANOS_atomic ( int op, int type, void * variable, void * operand ) { if ( ( type == 0 ) && ( op == 1 || op == 2 || op == 5 || op == 6 || op == 7) ) { // variable has integer type and the operation is some kind of the following compound assignments: // plus, minus, and, ior, xor printf("info: 'atomic' construct implemented using atomic builtins.\n"); int tmp = *((int *) operand); switch (op) { case 1: __sync_add_and_fetch((int *) variable, tmp); break; case 2: __sync_sub_and_fetch((int *) variable, tmp); break; case 5: __sync_and_and_fetch((int *) variable, tmp); break; case 6: __sync_or_and_fetch((int *) variable, tmp); break; case 7: __sync_xor_and_fetch((int *) variable, tmp); break; }; } else if ( ( type == 0 ) && ( op == 10 || op == 11) ) { // variable has integer type and the operation is a pre-/post- incr-/decr- ement printf("info: 'atomic' construct implemented using atomic builtins.\n"); if (op == 10) __sync_add_and_fetch((int *) variable, 1); else __sync_sub_and_fetch((int *) variable, 1); } else { // any other case printf("info: 'atomic' construct implemented using compare and exchange.\n"); if (type == 1) { // Float type // float tmp = *((float *) operand); printf("Nanos support for Atomic access to floats is not yet implemented\n"); abort(); } else if (type == 2) { // Double type double tmp = *((double *) operand); double oldval, newval; unsigned int sizeof_var = sizeof(variable); do { oldval = *((double *) variable); switch (op) { case 1: newval = oldval + tmp; break; case 2: newval = oldval - tmp; break; case 3: newval = oldval * tmp; break; case 4: newval = oldval / tmp; break; case 10: newval = oldval + 1; break; case 11: newval = oldval - 1; break; default: printf("Unhandled operation type while generating Nanos code for OpenMP atomic contruct."); abort(); } __sync_synchronize(); } while ( (sizeof_var == 4) ? !__sync_bool_compare_and_swap_4((double *) variable, *(unsigned int *) &oldval, *(unsigned int *) &newval) : (sizeof_var == 8) ? !__sync_bool_compare_and_swap_8((double *) variable, *(unsigned long *) &oldval, *(unsigned long *) &newval) : 0 ); } else { printf("Unhandled variable type while generating Nanos code for OpenMP atomic contruct."); abort( ); } } }