void test_sub() { v = res = 20; count = 0; __atomic_sub_fetch (&v, count + 1, __ATOMIC_RELAXED); if (v != --res) abort (); __atomic_fetch_sub (&v, count + 1, __ATOMIC_CONSUME); if (v != --res) abort (); __atomic_sub_fetch (&v, 1, __ATOMIC_ACQUIRE); if (v != --res) abort (); __atomic_fetch_sub (&v, 1, __ATOMIC_RELEASE); if (v != --res) abort (); __atomic_sub_fetch (&v, count + 1, __ATOMIC_ACQ_REL); if (v != --res) abort (); __atomic_fetch_sub (&v, count + 1, __ATOMIC_SEQ_CST); if (v != --res) abort (); }
void test_atomic_bool (_Atomic _Bool *a) { enum { SEQ_CST = __ATOMIC_SEQ_CST }; __atomic_fetch_add (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_add." } */ __atomic_fetch_sub (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_sub." } */ __atomic_fetch_and (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_and." } */ __atomic_fetch_xor (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_xor." } */ __atomic_fetch_or (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_or." } */ __atomic_fetch_nand (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_fetch_nand." } */ __atomic_add_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_add_fetch." } */ __atomic_sub_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_sub_fetch." } */ __atomic_and_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_and_fetch." } */ __atomic_xor_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_xor_fetch." } */ __atomic_or_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_or_fetch." } */ __atomic_nand_fetch (a, 1, SEQ_CST); /* { dg-error "operand type ._Atomic _Bool \\*. is incompatible with argument 1 of .__atomic_nand_fetch." } */ /* The following are valid and must be accepted. */ _Bool val = 0, ret = 0; __atomic_exchange (a, &val, &ret, SEQ_CST); __atomic_exchange_n (a, val, SEQ_CST); __atomic_compare_exchange (a, &val, &ret, !1, SEQ_CST, SEQ_CST); __atomic_compare_exchange_n (a, &val, ret, !1, SEQ_CST, SEQ_CST); __atomic_test_and_set (a, SEQ_CST); __atomic_clear (a, SEQ_CST); }
inline T Atomic<T>::fetchAndSub ( const T& val ) { #ifdef HAVE_NEW_GCC_ATOMIC_OPS return __atomic_fetch_sub( &_value, val, __ATOMIC_ACQ_REL); #else return __sync_fetch_and_sub( &_value,val ); #endif }
/** * @see mpscifo.h */ Msg_t *rmv_raw(MpscFifo_t *pQ) { Msg_t *pResult = pQ->pTail; Msg_t *pNext = __atomic_load_n(&pResult->pNext, __ATOMIC_SEQ_CST); //ACQUIRE); if (pNext != NULL) { __atomic_fetch_sub(&pQ->count, 1, __ATOMIC_SEQ_CST); __atomic_store_n(&pQ->pTail, pNext, __ATOMIC_SEQ_CST); //RELEASE } else { pResult = NULL; } return pResult; }
uint64 host_atomic_sub(uint64* value, const uint64 op) { #if defined(__GNUC__) return __atomic_fetch_sub( value, op, __ATOMIC_RELAXED ); #else Mutex mutex; ScopedLock lock( &mutex ); const uint64 old = *value; *value -= op; return old; #endif }
/** * Remove a tcb from the list, we assume the list will * NEVER be empty as idle will always be present. * * @param pcur is a tcb to be removed * * @return pnext if successful, else AC_NULL */ STATIC tcb_x86* remove_tcb_intr_disabled(tcb_x86* pcur) { tcb_x86* pnext_tcb = pcur->pnext_tcb; if (pnext_tcb != AC_NULL) { #ifdef SUPPORT_READY_LENGTH __atomic_fetch_sub(&ready_length, 1, __ATOMIC_RELAXED); #endif tcb_x86* pprev_tcb = pcur->pprev_tcb; //ac_printf("remove_tcb_intr_disabled: pcur=%x pnext_tcb=%x pprev_tcb=%x\n", // pcur, pnext_tcb, pprev_tcb); pprev_tcb->pnext_tcb = pnext_tcb; pnext_tcb->pprev_tcb = pprev_tcb; pcur->pnext_tcb = AC_NULL; pcur->pprev_tcb = AC_NULL; } return pnext_tcb; }
__host__ __device__ typename enable_if< sizeof(Integer64) == 8, Integer64 >::type atomic_fetch_sub(Integer64 *x, Integer64 y) { #if defined(__CUDA_ARCH__) return atomicSub(x, y); #elif defined(__GNUC__) return __atomic_fetch_sub(x, y, __ATOMIC_SEQ_CST); #elif defined(_MSC_VER) return InterlockedExchangeAdd64(x, -y); #elif defined(__clang__) return __c11_atomic_fetch_sub(x, y) #else #error "No atomic_fetch_sub implementation." #endif }
void hs_device_unref(hs_device *dev) { if (dev) { #ifdef _MSC_VER if (InterlockedDecrement(&dev->refcount)) return; #else if (__atomic_fetch_sub(&dev->refcount, 1, __ATOMIC_RELEASE) > 1) return; __atomic_thread_fence(__ATOMIC_ACQUIRE); #endif free(dev->key); free(dev->location); free(dev->path); free(dev->manufacturer); free(dev->product); free(dev->serial); } free(dev); }
/** * @see mpscifo.h */ Msg_t *rmv(MpscFifo_t *pQ) { #if 0 Msg_t *pResult = pQ->pTail; Msg_t** ptr_next = &pResult->pNext; Msg_t *pNext = __atomic_load_n(ptr_next, __ATOMIC_SEQ_CST); //ACQUIRE); if (pNext != NULL) { // TODO: Support "blocking" which means use condition variable int32_t* ptr_count = &pQ->count; __atomic_fetch_sub(ptr_count, 1, __ATOMIC_SEQ_CST); pQ->pTail = pNext; pResult->pNext = NULL; pResult->pRspq = pNext->pRspq; pResult->pExtra = pNext->pExtra; pResult->cmd = pNext->cmd; pResult->arg = pNext->arg; } else { pResult = NULL; } return pResult; #else Msg_t* pResult = pQ->pTail; Msg_t* pNext = pResult->pNext; if (pNext != NULL) { pQ->pTail = pNext; pQ->count -= 1; pResult->pRspq = pNext->pRspq; pResult->pExtra = pNext->pExtra; pResult->cmd = pNext->cmd; pResult->arg = pNext->arg; } else { pResult = NULL; } return pResult; #endif }
void test_fetch_sub() { v = res = 20; count = 0; if (__atomic_fetch_sub (&v, count + 1, __ATOMIC_RELAXED) != res--) abort (); if (__atomic_fetch_sub (&v, 1, __ATOMIC_CONSUME) != res--) abort (); if (__atomic_fetch_sub (&v, count + 1, __ATOMIC_ACQUIRE) != res--) abort (); if (__atomic_fetch_sub (&v, 1, __ATOMIC_RELEASE) != res--) abort (); if (__atomic_fetch_sub (&v, count + 1, __ATOMIC_ACQ_REL) != res--) abort (); if (__atomic_fetch_sub (&v, 1, __ATOMIC_SEQ_CST) != res--) abort (); }
/** * @see mpscifo.h */ Msg_t *rmv_raw(MpscFifo_t *pQ) { #if 0 // assert(pQ != NULL); int32_t* ptr_count = &pQ->count; int32_t initial_count = __atomic_load_n(ptr_count, __ATOMIC_SEQ_CST); Msg_t *pResult = pQ->pTail; Msg_t** ptr_next = &pResult->pNext; Msg_t *pNext = __atomic_load_n(ptr_next, __ATOMIC_SEQ_CST); //ACQUIRE); if (pNext != NULL) { #if 1 Msg_t** ptr_tail = &pQ->pTail; __atomic_store_n(ptr_tail, pNext, __ATOMIC_SEQ_CST); //RELEASE #elif 0 __atomic_store_n(&pQ->pTail, pNext, __ATOMIC_SEQ_CST); //RELEASE #else pQ->pTail = pNext; #endif int32_t* ptr_count = &pQ->count; __atomic_fetch_sub(ptr_count, 1, __ATOMIC_SEQ_CST); } else { #if 1 uint32_t yield_count = 0; uint32_t cur_count = initial_count; while ((pNext == NULL) && (cur_count > 0)) { yield_count += 1; sched_yield(); pNext = __atomic_load_n(ptr_next, __ATOMIC_SEQ_CST); //ACQUIRE); cur_count = __atomic_load_n(ptr_count, __ATOMIC_SEQ_CST); } if (pNext != NULL) { Msg_t** ptr_tail = &pQ->pTail; __atomic_store_n(ptr_tail, pNext, __ATOMIC_SEQ_CST); //RELEASE __atomic_fetch_sub(ptr_count, 1, __ATOMIC_SEQ_CST); printf("rmv_raw fixed initial_count=%d pNext=%p pQ->count=%d yield_count=%d\n", initial_count, pNext, pQ->count, yield_count); } else { pResult = NULL; printf("rmv_raw failed initial_count=%d pNext=%p pQ->count=%d yield_count=%d\n", initial_count, pNext, pQ->count, yield_count); } #else Msg_t *pNext_retry = __atomic_load_n(ptr_next, __ATOMIC_SEQ_CST); printf("rmv_raw 1 initial_count=%d pNext=%p pNext_retry=%p pQ->count=%d\n", initial_count, pNext, pNext_retry, pQ->count); sched_yield(); pNext_retry = __atomic_load_n(ptr_next, __ATOMIC_SEQ_CST); printf("rmv_raw 2 initial_count=%d pNext=%p pNext_retry=%p pQ->count=%d\n", initial_count, pNext, pNext_retry, pQ->count); //*((uint8_t*)0) = 0; // Crash pResult = NULL; #endif } return pResult; #else //_Atomic(Msg_t*) pResult = pQ->pTail; //_Atomic(Msg_t*) pNext = pResult->pNext; Msg_t* pResult = pQ->pTail; Msg_t* pNext = pResult->pNext; if (pNext != NULL) { pQ->pTail = pNext; pQ->count -= 1; } else { pResult = NULL; } return pResult; #endif }
void rsc_start_client(void *client_sched) { bool redo; uint32_t i; t_email email; t_session session = { 0 }; redo = true; email = *config->email; session.email = &email; session.helo = config->session.helo; session.read = buffer_create(NULL); if (session.read == NULL) { goto send_error; } session.write = buffer_create(NULL); if (session.write == NULL) { goto send_error; } rinoo_email_session_set(&email, &session); while (redo) { session.socket = rinoo_tcp_client(client_sched, &config->connection.ip, config->connection.port, config->connection.timeout * 1000); if (session.socket == NULL) { rinoo_log("Error: %s", strerror(errno)); config->stats.thread[rinoo_sched_self()->id].failed++; rsc_log(config, RSC_LOG_ERROR, "Couldn't create socket to %s:%d", config->connection.server, config->connection.port); goto send_error; } config->stats.thread[rinoo_sched_self()->id].sessions++; if (rinoo_smtp_start(&email) != 0) { goto send_error; } for (i = 0; i < config->session.nbmsg; i++) { if (rinoo_smtp_send(&email) != 0) { goto send_error; } config->stats.thread[rinoo_sched_self()->id].sent++; } if (rinoo_smtp_end(&email) != 0) { goto send_error; } rinoo_socket_destroy(session.socket); session.socket = NULL; config->stats.thread[rinoo_sched_self()->id].sessions--; if (config->mode == RSC_MODE_SECOND) { if (rinoo_task_wait(client_sched, 1000) != 0) { goto send_error; } } else if (config->mode != RSC_MODE_FLOOD) { redo = false; } } send_error: if (session.read != NULL) { buffer_destroy(session.read); } if (session.write != NULL) { buffer_destroy(session.write); } if (session.socket != NULL) { config->stats.thread[rinoo_sched_self()->id].sessions--; config->stats.thread[rinoo_sched_self()->id].failed++; rinoo_socket_destroy(session.socket); } if (__atomic_fetch_sub(&thread_counter, 1, __ATOMIC_SEQ_CST) == 1) { sched->stop = true; } }
short short_fetch_sub_consume (short *ptr, int value) { return __atomic_fetch_sub (ptr, value, __ATOMIC_CONSUME); }
char char_fetch_sub_consume (char *ptr, int value) { return __atomic_fetch_sub (ptr, value, __ATOMIC_CONSUME); }
__int128_t quad_fetch_sub_consume (__int128_t *ptr, __int128_t value) { return __atomic_fetch_sub (ptr, value, __ATOMIC_CONSUME); }
long long_fetch_sub_consume (long *ptr, long value) { return __atomic_fetch_sub (ptr, value, __ATOMIC_CONSUME); }
int atomic_fetch_sub_ACQUIRE () { return __atomic_fetch_sub (&v, 4096, __ATOMIC_ACQUIRE); }
ssize_t local_chunk_manager_gen_chunk (struct local_chunk_manager *lcm, off_t offset, size_t length, struct local_chunk ** ret_ch, off_t *chunk_offset){ //LOG(INFO, "local_chunk_manager_gen_chunk called\n"); struct local_chunk *thread_chunk_pool = lcm -> lchunk_pool[GET_THREAD_INDEX()]; int cur_lchunk_index; int cache_miss = 0; int old_lchunk_index = lcm -> cur_lchunk_index_prev[GET_THREAD_INDEX()]; int end_lchunk_index = (old_lchunk_index - 1 + LOCAL_POOL_SIZE) % LOCAL_POOL_SIZE; for (cur_lchunk_index = old_lchunk_index; cur_lchunk_index != end_lchunk_index; cur_lchunk_index=(cur_lchunk_index+1) % LOCAL_POOL_SIZE){ struct local_chunk *local_cur_ch = &thread_chunk_pool[cur_lchunk_index]; struct chunk *cur_ch = local_cur_ch -> chunk; size_t ch_length = cur_ch ? cur_ch -> length : -1; off_t ch_offset = cur_ch ? cur_ch -> offset: -1; //Search for good chunk - simple algo for now if (cur_ch && offset >= ch_offset && offset + length <= ch_length + ch_offset){ //LOG(INFO, "Lookup succeed\n"); if (cache_miss) {LOG(INFO, "Cache miss, %d\n", cache_miss);} *ret_ch = local_cur_ch; *chunk_offset = offset - ch_offset; if (cache_miss) lcm -> cur_lchunk_index_prev[GET_THREAD_INDEX()] = cur_lchunk_index; return ch_length - offset + ch_offset; } LOG(DEBUG, "Called with size %ld, offset %ld\n", length, offset); LOG(DEBUG, "Missed chunk with offset %ld, size %ld\n", ch_offset, ch_length); cache_miss++; } //LOG(INFO, "Lookup failed\n"); //Chunk was not found - we need to generate a new one old_lchunk_index = lcm -> cur_lchunk_index_free[GET_THREAD_INDEX()]; end_lchunk_index = (old_lchunk_index - 1 + LOCAL_POOL_SIZE) % LOCAL_POOL_SIZE; for (cur_lchunk_index = old_lchunk_index; cur_lchunk_index != end_lchunk_index; cur_lchunk_index=(cur_lchunk_index+1) % LOCAL_POOL_SIZE){ struct local_chunk *local_cur_ch = &thread_chunk_pool[cur_lchunk_index]; LOG(DEBUG, "Trying to free chunk %d with ref_cnt %d\n", cur_lchunk_index, local_cur_ch -> local_ref_cnt); if (local_cur_ch -> local_ref_cnt == 0){ struct chunk* cur_ch = local_cur_ch -> chunk; lcm -> cur_lchunk_index_free[GET_THREAD_INDEX()] = (cur_lchunk_index + 1) % LOCAL_POOL_SIZE; if (cur_ch) __atomic_fetch_sub(&cur_ch -> trc.ref_cnt, 1, 0); struct chunk * new_chunk = NULL; off_t ch_offset = 0; size_t ch_length = 0; ch_length = chunk_manager_gen_chunk(&lcm -> cm, offset, length, &new_chunk, &ch_offset); if (ch_length == -1) return -1; LOG(DEBUG, "Got new chunk with size %ld, offset %ld\n", ch_length, ch_offset); lcm -> cur_lchunk_index_prev[GET_THREAD_INDEX()] = cur_lchunk_index; local_cur_ch -> chunk = new_chunk; *ret_ch = local_cur_ch; *chunk_offset = offset - new_chunk -> offset; return new_chunk -> length - offset + new_chunk -> offset; } } return -1; }
int atomic_fetch_sub_ACQUIRE (int a) { return __atomic_fetch_sub (&v, a, __ATOMIC_ACQUIRE); }
P_LIB_API pboolean p_atomic_int_dec_and_test (volatile pint *atomic) { return (__atomic_fetch_sub (atomic, 1, __ATOMIC_SEQ_CST) == 1) ? TRUE : FALSE; }
void hle_sub (int *p, int v) { __atomic_fetch_sub (p, v, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE); }
void hle_sub (int *p, int v) { __atomic_fetch_sub (p, v, __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE); }
int atomic_fetch_sub_negative_ACQUIRE () { return __atomic_fetch_sub (&v, -4096, __ATOMIC_ACQUIRE); }
long atomic_fetch_sub_RELAXED (long a) { return __atomic_fetch_sub (&v, a, __ATOMIC_RELAXED); }
void test_presence(void) { // CHECK-LABEL: @test_presence // CHECK: atomicrmw add i32* {{.*}} seq_cst __atomic_fetch_add(&i, 1, memory_order_seq_cst); // CHECK: atomicrmw sub i32* {{.*}} seq_cst __atomic_fetch_sub(&i, 1, memory_order_seq_cst); // CHECK: load atomic i32, i32* {{.*}} seq_cst int r; __atomic_load(&i, &r, memory_order_seq_cst); // CHECK: store atomic i32 {{.*}} seq_cst r = 0; __atomic_store(&i, &r, memory_order_seq_cst); // CHECK: __atomic_fetch_add_8 __atomic_fetch_add(&l, 1, memory_order_seq_cst); // CHECK: __atomic_fetch_sub_8 __atomic_fetch_sub(&l, 1, memory_order_seq_cst); // CHECK: __atomic_load_8 long long rl; __atomic_load(&l, &rl, memory_order_seq_cst); // CHECK: __atomic_store_8 rl = 0; __atomic_store(&l, &rl, memory_order_seq_cst); }