void ijkmp_inc_ref(IjkMediaPlayer *mp) { assert(mp); __sync_fetch_and_add(&mp->ref_count, 1); }
void __cilkrts_obj_metadata_add_task( __cilkrts_pending_frame *t, __cilkrts_obj_metadata *meta, __cilkrts_task_list_node *tags, int g) { // Set pointer to task in argument's tags storage tags->st_task_and_last = t; // Fully mutual exclusion to avoid races spin_mutex_lock( &meta->mutex ); // Optimized version. This is called only if the task is already running, // by stealing the parent of an un-issued ready task. In this case, // joins=1, pushg=0 and ready=1, so only need to set num_gens=1, // youngest_group=g and oldest_num_tasks++ without further ado. if( !t ) { meta->num_gens = 1; meta->oldest_num_tasks++; meta->youngest_group = g; spin_mutex_unlock( &meta->mutex ); return; } // printf( "%d-%p: add_task begin t=%p meta=%p {yg=%d, ng=%d, ont=%d} tags=%p g=%d\n", __cilkrts_get_tls_worker()->self, (void*)0, t, meta, meta->youngest_group, meta->num_gens, meta-> oldest_num_tasks, tags, g ); int joins = ( meta->youngest_group & ((g | CILK_OBJ_GROUP_EMPTY) & CILK_OBJ_GROUP_NOT_WRITE ) ) != 0; int pushg = ( g & ( meta->youngest_group & CILK_OBJ_GROUP_NOT_WRITE ) ) == 0; int ready = joins & ( meta->num_gens <= 1 ); // push_generation( pushg ); // TODO: in CS, so non-atomic suffices? // __sync_fetch_and_add( &meta->num_gens, (uint32_t)pushg ); meta->num_gens += (uint32_t)pushg; meta->oldest_num_tasks += ready; meta->youngest_group = g; if( !ready ) { // t->add_incoming(); __sync_fetch_and_add( &t->incoming_count, 1 ); // We avoid branches by using a sentinel node in tasks (pointer // retrieved is always meaningful) and by unconditionally storing // a value __cilkrts_task_list_node * old_tail = meta->tasks.tail; tags->it_next = 0; old_tail->it_next = tags; // old_tail->set_last_in_generation( pushg ); // TODO: the bit should not be set, should it? Remove "& ~1" part old_tail->st_task_and_last = (__cilkrts_pending_frame *) ( ( (uintptr_t)old_tail->st_task_and_last & ~(uintptr_t)1 ) | (uintptr_t)pushg ); meta->tasks.tail = tags; } __CILKRTS_ASSERT( (meta->num_gens <= 1) == (meta->tasks.head.it_next == 0) ); __CILKRTS_ASSERT( meta->num_gens > 0 ); // printf( "%d-%p: add_task end t=%p meta=%p {yg=%d, ng=%d, ont=%d} tags=%p g=%d\n", __cilkrts_get_tls_worker()->self, (void*)0, t, meta, meta->youngest_group, meta->num_gens, meta-> oldest_num_tasks, tags, g ); spin_mutex_unlock( &meta->mutex ); }
__CILKRTS_INLINE void __cilkrts_obj_payload_del_ref( __cilkrts_obj_payload *pl ) { if( __sync_fetch_and_add( &pl->refcnt, -1 ) == 1 ) __cilkrts_obj_payload_destroy( pl ); }
static int globalHookHandler(TSCont contp, TSEvent event ATS_UNUSED, void *edata) { TSHttpTxn txnp = (TSHttpTxn)edata; TSMBuffer bufp; TSMLoc hdr_loc; TSMLoc url_loc; int ret; uint64_t req_id; TSCont txn_contp; lua_State *l; ts_lua_main_ctx *main_ctx; ts_lua_http_ctx *http_ctx; ts_lua_cont_info *ci; ts_lua_instance_conf *conf = (ts_lua_instance_conf *)TSContDataGet(contp); req_id = __sync_fetch_and_add(&ts_lua_g_http_next_id, 1); main_ctx = &ts_lua_g_main_ctx_array[req_id % TS_LUA_MAX_STATE_COUNT]; TSDebug(TS_LUA_DEBUG_TAG, "[%s] req_id: %" PRId64, __FUNCTION__, req_id); TSMutexLock(main_ctx->mutexp); http_ctx = ts_lua_create_http_ctx(main_ctx, conf); http_ctx->txnp = txnp; http_ctx->rri = NULL; http_ctx->has_hook = 0; if (!http_ctx->client_request_bufp) { if (TSHttpTxnClientReqGet(txnp, &bufp, &hdr_loc) == TS_SUCCESS) { http_ctx->client_request_bufp = bufp; http_ctx->client_request_hdrp = hdr_loc; if (TSHttpHdrUrlGet(bufp, hdr_loc, &url_loc) == TS_SUCCESS) { http_ctx->client_request_url = url_loc; } } } if (!http_ctx->client_request_hdrp) { ts_lua_destroy_http_ctx(http_ctx); TSMutexUnlock(main_ctx->mutexp); TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); return 0; } txn_contp = TSContCreate(ts_lua_http_cont_handler, NULL); TSContDataSet(txn_contp, http_ctx); ci = &http_ctx->cinfo; ci->contp = txn_contp; ci->mutex = TSContMutexGet((TSCont)txnp); l = ci->routine.lua; switch (event) { case TS_EVENT_HTTP_READ_REQUEST_HDR: lua_getglobal(l, TS_LUA_FUNCTION_G_READ_REQUEST); break; case TS_EVENT_HTTP_SEND_REQUEST_HDR: lua_getglobal(l, TS_LUA_FUNCTION_G_SEND_REQUEST); break; case TS_EVENT_HTTP_READ_RESPONSE_HDR: lua_getglobal(l, TS_LUA_FUNCTION_G_READ_RESPONSE); break; case TS_EVENT_HTTP_SEND_RESPONSE_HDR: // client response can be changed within a transaction // (e.g. due to the follow redirect feature). So, clearing the pointers // to allow API(s) to fetch the pointers again when it re-enters the hook if (http_ctx->client_response_hdrp != NULL) { TSHandleMLocRelease(http_ctx->client_response_bufp, TS_NULL_MLOC, http_ctx->client_response_hdrp); http_ctx->client_response_hdrp = NULL; } lua_getglobal(l, TS_LUA_FUNCTION_G_SEND_RESPONSE); break; case TS_EVENT_HTTP_CACHE_LOOKUP_COMPLETE: lua_getglobal(l, TS_LUA_FUNCTION_G_CACHE_LOOKUP_COMPLETE); break; case TS_EVENT_HTTP_TXN_START: lua_getglobal(l, TS_LUA_FUNCTION_G_TXN_START); break; case TS_EVENT_HTTP_PRE_REMAP: lua_getglobal(l, TS_LUA_FUNCTION_G_PRE_REMAP); break; case TS_EVENT_HTTP_POST_REMAP: lua_getglobal(l, TS_LUA_FUNCTION_G_POST_REMAP); break; case TS_EVENT_HTTP_SELECT_ALT: lua_getglobal(l, TS_LUA_FUNCTION_G_SELECT_ALT); break; case TS_EVENT_HTTP_OS_DNS: lua_getglobal(l, TS_LUA_FUNCTION_G_OS_DNS); break; case TS_EVENT_HTTP_READ_CACHE_HDR: lua_getglobal(l, TS_LUA_FUNCTION_G_READ_CACHE); break; case TS_EVENT_HTTP_TXN_CLOSE: lua_getglobal(l, TS_LUA_FUNCTION_G_TXN_CLOSE); break; default: ts_lua_destroy_http_ctx(http_ctx); TSMutexUnlock(main_ctx->mutexp); TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); return 0; } if (lua_type(l, -1) != LUA_TFUNCTION) { lua_pop(l, 1); ts_lua_destroy_http_ctx(http_ctx); TSMutexUnlock(main_ctx->mutexp); TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); return 0; } ts_lua_set_cont_info(l, NULL); if (lua_pcall(l, 0, 1, 0) != 0) { TSError("[ts_lua] lua_pcall failed: %s", lua_tostring(l, -1)); } ret = lua_tointeger(l, -1); lua_pop(l, 1); if (http_ctx->has_hook) { // add a hook to release resources for context TSDebug(TS_LUA_DEBUG_TAG, "[%s] has txn hook -> adding txn close hook handler to release resources", __FUNCTION__); TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_CLOSE_HOOK, txn_contp); } else { TSDebug(TS_LUA_DEBUG_TAG, "[%s] no txn hook -> release resources now", __FUNCTION__); ts_lua_destroy_http_ctx(http_ctx); } TSMutexUnlock(main_ctx->mutexp); if (ret) { TSHttpTxnReenable(txnp, TS_EVENT_HTTP_ERROR); } else { TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); } return 0; }
void ia_doer_destroy(iadoer *doer) { __sync_fetch_and_add(&ioarena.doers_count, -1); ia_histogram_destroy(&doer->hg); ia_kvpool_destory(&doer->pool); }
void *Allocate(void *arg) { x = (char*)malloc(10); __sync_fetch_and_add(&state, 1); while (__sync_fetch_and_add(&state, 0) != 3) {} return NULL; }
static void *Worker( void *arg ) { TYPE id = (size_t)arg; uint64_t entry; int other = inv( id ); // int is better than TYPE #ifdef FAST unsigned int cnt = 0, oid = id; #endif // FAST for ( int r = 0; r < RUNS; r += 1 ) { entry = 0; while ( stop == 0 ) { for ( ;; ) { #ifdef FLICKER for ( int i = 0; i < 100; i += 1 ) intents[id] = i % 2; // flicker #endif // FLICKER intents[id] = WantIn; // declare intent // Necessary to prevent the read of intents[other] from floating above the assignment // intents[id] = WantIn, when the hardware determines the two subscripts are different. Fence(); // force store before more loads if ( FASTPATH( intents[other] == DontWantIn ) ) break; if ( last == id ) { #ifdef FLICKER for ( int i = 0; i < 100; i += 1 ) intents[id] = i % 2; // flicker #endif // FLICKER intents[id] = DontWantIn; // Optional fence to prevent LD of "last" from being lifted above store of // intends[id]=DontWantIn. Because a thread only writes its own id into "last", // and because of eventual consistency (writes eventually become visible), // the fence is conservative. //Fence(); // force store before more loads await( last != id ); // low priority busy wait } // if } // for CriticalSection( id ); #ifdef FLICKER for ( int i = id; i < 100; i += 1 ) last = i % 2; // flicker #endif // FLICKER last = id; // exit protocol #ifdef FLICKER for ( int i = 0; i < 100; i += 1 ) intents[id] = i % 2; // flicker #endif // FLICKER intents[id] = DontWantIn; #ifdef FAST id = startpoint( cnt ); // different starting point each experiment other = inv( id ); cnt = cycleUp( cnt, NoStartPoints ); #endif // FAST entry += 1; } // while #ifdef FAST id = oid; other = inv( id ); #endif // FAST entries[r][id] = entry; __sync_fetch_and_add( &Arrived, 1 ); while ( stop != 0 ) Pause(); __sync_fetch_and_add( &Arrived, -1 ); } // for return NULL; } // Worker
/* command from agent */ static int vr_flow_set(struct vrouter *router, vr_flow_req *req) { int ret; unsigned int fe_index; struct vr_flow_entry *fe = NULL; struct vr_flow_table_info *infop = router->vr_flow_table_info; router = vrouter_get(req->fr_rid); if (!router) return -EINVAL; fe = vr_get_flow_entry(router, req->fr_index); if ((ret = vr_flow_req_is_invalid(router, req, fe))) return ret; if (fe && (fe->fe_action == VR_FLOW_ACTION_HOLD) && ((req->fr_action != fe->fe_action) || !(req->fr_flags & VR_FLOW_FLAG_ACTIVE))) __sync_fetch_and_add(&infop->vfti_action_count, 1); /* * for delete, absence of the requested flow entry is caustic. so * handle that case first */ if (!(req->fr_flags & VR_FLOW_FLAG_ACTIVE)) { if (!fe) return -EINVAL; return vr_flow_delete(router, req, fe); } /* * for non-delete cases, absence of flow entry means addition of a * new flow entry with the key specified in the request */ if (!fe) { fe = vr_add_flow_req(req, &fe_index); if (!fe) return -ENOSPC; } vr_flow_set_mirror(router, req, fe); if (req->fr_flags & VR_RFLOW_VALID) { fe->fe_rflow = req->fr_rindex; } else { if (fe->fe_rflow >= 0) fe->fe_rflow = -1; } fe->fe_vrf = req->fr_flow_vrf; if (req->fr_flags & VR_FLOW_FLAG_VRFT) fe->fe_dvrf = req->fr_flow_dvrf; fe->fe_ecmp_nh_index = req->fr_ecmp_nh_index; fe->fe_src_nh_index = req->fr_src_nh_index; fe->fe_action = req->fr_action; fe->fe_flags = req->fr_flags; return vr_flow_schedule_transition(router, req, fe); }
inline void atomic_increment( sp_int32_t * pw ) { __sync_fetch_and_add( pw, 1 ); }
void *realloc(void *ptr, size_t size) { struct log_malloc_s *mem; sig_atomic_t memuse = 0; sig_atomic_t memruse = 0; sig_atomic_t memchange = 0; #ifdef HAVE_MALLOC_USABLE_SIZE size_t rsize = 0; sig_atomic_t memrchange = 0; #endif if(!DL_RESOLVE_CHECK(realloc)) return NULL; mem = (ptr != NULL) ? MEM_HEAD(ptr) : NULL; //FIXME: not handling foreign memory here (seems not needed) if(mem && (mem->size != ~mem->cb)) { assert(mem->size != ~mem->cb); return NULL; } if((mem = real_realloc(mem, size + MEM_OFF)) != NULL) { memchange = (ptr) ? size - mem->size : size; memuse = __sync_add_and_fetch(&g_ctx.mem_used, memchange); #ifdef HAVE_MALLOC_USABLE_SIZE rsize = malloc_usable_size(mem); memrchange = (ptr) ? rsize - mem->rsize : rsize; memruse = __sync_add_and_fetch(&g_ctx.mem_rused, memrchange); #endif } #ifndef DISABLE_CALL_COUNTS (void)__sync_fetch_and_add(&g_ctx.stat.realloc, 1); g_ctx.stat.unrel_sum++; #endif if(!g_ctx.memlog_disabled) { int s; char buf[LOG_BUFSIZE]; s = snprintf(buf, sizeof(buf), "+ realloc %d %p %p (%zu %zu) [%u:%u]\n", memchange, ptr, MEM_PTR(mem), (mem ? mem->size : 0), size, memuse, memruse); log_trace(buf, s, sizeof(buf), 1); } /* now we can update */ if(mem != NULL) { mem->size = size; mem->cb = ~mem->size; #ifdef HAVE_MALLOC_USABLE_SIZE mem->rsize = rsize; #endif } return MEM_PTR(mem); }
void *allocMem(unsigned int size) { return (void*) __sync_fetch_and_add(&gNextAlloc, (size + 63) & ~63); }
VOID_TASK_IMPL_2(sylvan_skiplist_assign_next, sylvan_skiplist_t, l, MTBDD, dd) { if (dd == mtbdd_false || dd == mtbdd_true) return; uint32_t trace[SL_DEPTH]; uint32_t loc = 0, loc_next = 0, k = SL_DEPTH-1; for (;;) { /* invariant: [loc].dd < dd */ /* note: this is always true for loc==0 */ sl_bucket *e = l->buckets + loc; loc_next = (*(volatile uint32_t*)&e->next[k]) & 0x7fffffff; if (loc_next != 0 && l->buckets[loc_next].dd == dd) { /* found */ return; } else if (loc_next != 0 && l->buckets[loc_next].dd < dd) { /* go right */ loc = loc_next; } else if (k > 0) { /* go down */ trace[k] = loc; k--; } else if (!(e->next[0] & 0x80000000) && cas(&e->next[0], loc_next, loc_next|0x80000000)) { /* locked */ break; } } /* claim next item */ const uint64_t next = __sync_fetch_and_add(&l->next, 1); if (next >= l->size) { fprintf(stderr, "Out of cheese exception, no more blocks available\n"); exit(1); } /* fill next item */ sl_bucket *a = l->buckets + next; a->dd = dd; a->next[0] = loc_next; compiler_barrier(); l->buckets[loc].next[0] = next; /* determine height */ uint64_t h = 1 + __builtin_clz(LACE_TRNG) / 2; if (h > SL_DEPTH) h = SL_DEPTH; /* go up and create links */ for (k=1;k<h;k++) { loc = trace[k]; for (;;) { sl_bucket *e = l->buckets + loc; /* note, at k>0, no locks on edges */ uint32_t loc_next = *(volatile uint32_t*)&e->next[k]; if (loc_next != 0 && l->buckets[loc_next].dd < dd) { loc = loc_next; } else { a->next[k] = loc_next; if (cas(&e->next[k], loc_next, next)) break; } } } }
int dummy; void MFENCE(){ __sync_fetch_and_add (&dummy,0);}
// reap completed writes // if a write failed, remove the data from the cache. // NOTE: we assume that only one thread calls this at a time, for a given cache // always succeeds void md_cache_complete_writes( struct md_syndicate_cache* cache, md_cache_lru_t* write_lru ) { md_cache_completion_buffer_t* completed = NULL; // get the current completed buffer, and switch to the other md_cache_completed_wlock( cache ); completed = cache->completed; if( cache->completed == cache->completed_1 ) { cache->completed = cache->completed_2; } else { cache->completed = cache->completed_1; } md_cache_completed_unlock( cache ); // safe to use completed as long as no one else performs the above swap int write_count = 0; // reap completed writes for( md_cache_completion_buffer_t::iterator itr = completed->begin(); itr != completed->end(); itr++ ) { struct md_cache_block_future* f = *itr; struct md_cache_entry_key* c = &f->key; // finished an aio write md_cache_ongoing_writes_wlock( cache ); md_cache_remove_ongoing( cache, f ); md_cache_ongoing_writes_unlock( cache ); if( f->aio_rc != 0 ) { SG_error("WARN: write aio %" PRIX64 ".%" PRId64 "[%" PRIu64 ".%" PRId64 "] rc = %d\n", c->file_id, c->file_version, c->block_id, c->block_version, f->aio_rc ); // clean up md_cache_evict_block_internal( cache, c->file_id, c->file_version, c->block_id, c->block_version ); } else if( f->write_rc < 0 ) { SG_error("WARN: write %" PRIX64 ".%" PRId64 "[%" PRIu64 ".%" PRId64 "] rc = %d\n", c->file_id, c->file_version, c->block_id, c->block_version, f->write_rc ); // clean up md_cache_evict_block_internal( cache, c->file_id, c->file_version, c->block_id, c->block_version ); } else { // finished! if( write_lru ) { // log this as written write_lru->push_back( *c ); } write_count ++; } // finalized! f->finalized = true; bool detached = f->detached; // wake up anyone waiting on this sem_post( &f->sem_ongoing ); // are we supposed to reap it? if( detached || !cache->running ) { md_cache_block_future_free( f ); } } // successfully cached blocks __sync_fetch_and_add( &cache->num_blocks_written, write_count ); if( write_count != 0 ) SG_debug("Cache now has %d blocks\n", cache->num_blocks_written ); completed->clear(); }
operator long() const { return __sync_fetch_and_add( &value_, 0 ); }
inline sp_int32_t atomic_decrement( sp_int32_t * pw ) { return __sync_fetch_and_add( pw, -1 ); }
static int transactionStartHookHandler(TSCont contp, TSEvent event ATS_UNUSED, void *edata) { TSHttpTxn txnp = (TSHttpTxn) edata; uint64_t req_id; TSCont txn_contp; TSCont global_contp; ts_lua_main_ctx *main_ctx; ts_lua_http_ctx *http_ctx; ts_lua_instance_conf *conf = (ts_lua_instance_conf *) TSContDataGet(contp); req_id = __sync_fetch_and_add(&ts_lua_g_http_next_id, 1); main_ctx = &ts_lua_g_main_ctx_array[req_id % TS_LUA_MAX_STATE_COUNT]; TSDebug(TS_LUA_DEBUG_TAG, "[%s] req_id: %" PRId64, __FUNCTION__, req_id); TSMutexLock(main_ctx->mutexp); http_ctx = ts_lua_create_http_ctx(main_ctx, conf); http_ctx->txnp = txnp; http_ctx->remap = 0; txn_contp = TSContCreate(ts_lua_http_cont_handler, NULL); TSContDataSet(txn_contp, http_ctx); http_ctx->main_contp = txn_contp; global_contp = TSContCreate(globalHookHandler, NULL); TSContDataSet(global_contp, http_ctx); //adding hook based on whether the lua global function exists. lua_State *l = http_ctx->lua; lua_getglobal(l, TS_LUA_FUNCTION_G_SEND_REQUEST); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_SEND_REQUEST_HDR_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "send_request_hdr_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_READ_RESPONSE); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_READ_RESPONSE_HDR_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "read_response_hdr_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_SEND_RESPONSE); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_SEND_RESPONSE_HDR_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "send_response_hdr_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_CACHE_LOOKUP_COMPLETE); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_CACHE_LOOKUP_COMPLETE_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "cache_lookup_complete_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_READ_REQUEST); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_READ_REQUEST_HDR_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "read_request_hdr_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_TXN_START); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_START_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "txn_start_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_PRE_REMAP); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_PRE_REMAP_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "pre_remap_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_POST_REMAP); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_POST_REMAP_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "post_remap_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_SELECT_ALT); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_SELECT_ALT_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "select_alt_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_OS_DNS); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_OS_DNS_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "os_dns_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_READ_CACHE); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_READ_CACHE_HDR_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "read_cache_hdr_hook added"); } lua_pop(l, 1); lua_getglobal(l, TS_LUA_FUNCTION_G_TXN_CLOSE); if (lua_type(l, -1) == LUA_TFUNCTION) { TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_CLOSE_HOOK, global_contp); TSDebug(TS_LUA_DEBUG_TAG, "txn_close_hook added"); } lua_pop(l, 1); // add a hook to release resources for context TSHttpTxnHookAdd(txnp, TS_HTTP_TXN_CLOSE_HOOK, txn_contp); TSMutexUnlock(main_ctx->mutexp); TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); return 0; }
int32_t cxAtomicAddInt32(int32_t *p, int32_t x) { return __sync_fetch_and_add(p,x); }
void *Deallocate(void *arg) { free(x); __sync_fetch_and_add(&state, 1); while (__sync_fetch_and_add(&state, 0) != 3) {} return NULL; }
void startParallel() { startAllThreads(); __sync_fetch_and_add(&gActiveThreadCount, 1); }
static TSRemapStatus ts_lua_remap_plugin_init(void *ih, TSHttpTxn rh, TSRemapRequestInfo *rri) { int ret; uint64_t req_id; TSCont contp; lua_State *L; ts_lua_main_ctx *main_ctx; ts_lua_http_ctx *http_ctx; ts_lua_cont_info *ci; ts_lua_instance_conf *instance_conf; int remap = (rri == NULL ? 0 : 1); instance_conf = (ts_lua_instance_conf *)ih; req_id = __sync_fetch_and_add(&ts_lua_http_next_id, 1); main_ctx = &ts_lua_main_ctx_array[req_id % TS_LUA_MAX_STATE_COUNT]; TSMutexLock(main_ctx->mutexp); http_ctx = ts_lua_create_http_ctx(main_ctx, instance_conf); http_ctx->txnp = rh; http_ctx->has_hook = 0; http_ctx->rri = rri; if (rri != NULL) { http_ctx->client_request_bufp = rri->requestBufp; http_ctx->client_request_hdrp = rri->requestHdrp; http_ctx->client_request_url = rri->requestUrl; } ci = &http_ctx->cinfo; L = ci->routine.lua; contp = TSContCreate(ts_lua_http_cont_handler, NULL); TSContDataSet(contp, http_ctx); ci->contp = contp; ci->mutex = TSContMutexGet((TSCont)rh); lua_getglobal(L, (remap ? TS_LUA_FUNCTION_REMAP : TS_LUA_FUNCTION_OS_RESPONSE)); if (lua_type(L, -1) != LUA_TFUNCTION) { TSMutexUnlock(main_ctx->mutexp); return TSREMAP_NO_REMAP; } ts_lua_set_cont_info(L, NULL); if (lua_pcall(L, 0, 1, 0) != 0) { TSError("[ts_lua] lua_pcall failed: %s", lua_tostring(L, -1)); ret = TSREMAP_NO_REMAP; } else { ret = lua_tointeger(L, -1); } lua_pop(L, 1); if (http_ctx->has_hook) { TSDebug(TS_LUA_DEBUG_TAG, "[%s] has txn hook -> adding txn close hook handler to release resources", __FUNCTION__); TSHttpTxnHookAdd(rh, TS_HTTP_TXN_CLOSE_HOOK, contp); } else { TSDebug(TS_LUA_DEBUG_TAG, "[%s] no txn hook -> release resources now", __FUNCTION__); ts_lua_destroy_http_ctx(http_ctx); } TSMutexUnlock(main_ctx->mutexp); return ret; }
valtype Inc(volatile valtype &value) { return __sync_fetch_and_add(&value, 1); }
void* get(Ref* ref) { return __sync_fetch_and_add(&ref->value, 0); }
valtype Add(volatile valtype &value, valtype amt) { return __sync_fetch_and_add(&value, amt); }
static int32_t mccdaq_callback(uint16_t * d, int32_t max_d) { #define MAX_DATA 1000000 static uint16_t data[MAX_DATA]; static int32_t max_data; static int32_t idx; static uint64_t time_last; static uint16_t pulse_threshold = PULSE_THRESHOLD_NOT_SET; DEBUG("max_data=%d\n", max_data); // keep track of total samples received total_samples_rcvd += max_d; // if mode is PLOT then plot the value; // mode_arg is the number of samples to plot, when this // becomes zero then set mode to IDLE if (mode == PLOT) { int32_t i; for (i = 0; i < max_d; i++) { if (mode_arg <= 0) { mode_arg = 0; mode = IDLE; break; } print_plot_str(d[i], PULSE_THRESHOLD_NOT_SET); mode_arg--; } } // copy received data to array large enough for one second of data if (max_data + max_d > MAX_DATA) { FATAL("too much data %d+%d > %d\n", max_data, max_d, MAX_DATA); } memcpy(data+max_data, d, max_d*sizeof(uint16_t)); max_data += max_d; // determine pule threshold ... // // if there is less than 1000 samples of data available for this second then // return because we want 1000 or more samples to determine the pule_threshold // else if pulse_threshold is not set then // determine the minimum he3 adc value for the 1000 samples, and // set pulse_threshold to that baseline value plus 8 // endif if (max_data < 1000) { return 0; } else if (pulse_threshold == PULSE_THRESHOLD_NOT_SET) { int32_t i; int32_t baseline = 1000000000; for (i = 0; i < 1000; i++) { if (data[i] < baseline) { baseline = data[i]; } } pulse_threshold = baseline + 8; DEBUG("pulse_threshold-2048=%d\n", pulse_threshold-2048); } // search for pulses in the data // - when pulse is found determine // . pulse_start_idx, pulse_end_idx // . pulse_height // . pulse_chan // . pulse_count[MAX_CHANNEL] // - if mode is PULSEMON then // plot the pulse and print pulse height // endif bool in_a_pulse = false; int32_t pulse_start_idx = -1; int32_t pulse_end_idx = -1; while (true) { // terminate this loop when // - near the end of data and not processing a pulse OR // - at the end of data if ((!in_a_pulse && idx >= max_data-10) || (idx == max_data)) { break; } // print warning if data out of range if (data[idx] > 4095) { WARN("data[%d] = %u, is out of range\n", idx, data[idx]); data[idx] = 2048; } // determine the pulse_start_idx and pulse_end_idx if (data[idx] >= pulse_threshold && !in_a_pulse) { in_a_pulse = true; pulse_start_idx = idx; } else if (data[idx] < pulse_threshold && in_a_pulse) { in_a_pulse = false; pulse_end_idx = idx - 1; } // if a pulse has been located ... if (pulse_end_idx != -1) { int32_t pulse_height, pulse_channel; int32_t i; // scan from start to end of pulse to determine pulse_height pulse_height = -1; for (i = pulse_start_idx; i <= pulse_end_idx; i++) { if (data[i] - pulse_threshold > pulse_height) { pulse_height = data[i] - pulse_threshold; } } // determine pulse_channel from pulse_height pulse_channel = pulse_height / ((4096 - pulse_threshold) / MAX_CHANNEL); if (pulse_channel >= MAX_CHANNEL) { WARN("chan being reduced from %d to %d\n", pulse_channel, MAX_CHANNEL-1); pulse_channel = MAX_CHANNEL-1; } // keep track of pulse_count __sync_fetch_and_add(&pulse_count[pulse_channel], 1); // if mode is PULSEMON then plot this pulse if (mode == PULSEMON) { char time_str[MAX_TIME_STR]; printf("%s: pulse: height=%d channel=%d threshold-2048=%d\n", time2str(time_str, get_real_time_us(), false, true, true), pulse_height, pulse_channel, pulse_threshold-2048); for (i = pulse_start_idx; i <= pulse_end_idx; i++) { print_plot_str(data[i], pulse_threshold); } printf("\n"); } // done with this pulse pulse_start_idx = -1; pulse_end_idx = -1; } // move to next data idx++; } // if time has incremented from last time this code block was run then ... uint64_t time_now = time(NULL); if (time_now > time_last) { int32_t chan; // if mode equal INFO then print stats if (mode == INFO) { int64_t duration_secs = time_now - pulse_time_start; printf("time=%d samples=%d mccdaq_restarts=%d pulse_threshold-2048=%d\n", (int32_t)duration_secs, max_data, mccdaq_get_restart_count(), pulse_threshold-2048); printf(" counts = "); for (chan = 0; chan < MAX_CHANNEL; chan++) { printf("%8d ", pulse_count[chan]); } printf("\n"); printf(" cpm = "); for (chan = 0; chan < MAX_CHANNEL; chan++) { printf("%8.2f ", pulse_count[chan] / (duration_secs / 60.0)); } printf("\n"); printf("\n"); } // reset for next second of data time_last = time_now; max_data = 0; idx = 0; pulse_threshold = PULSE_THRESHOLD_NOT_SET; } // if sigint then set mode to IDLE if (sigint) { sigint = false; mode = IDLE; mode_arg = 0; } // return 0, so scanning continues return 0; }
static bool arch_sanCovParseRaw(honggfuzz_t * hfuzz, fuzzer_t * fuzzer) { int dataFd = -1; uint8_t *dataBuf = NULL; off_t dataFileSz = 0, pos = 0; bool is32bit = true, ret = false, isSeedFirstRun = false; char covFile[PATH_MAX] = { 0 }; /* Fuzzer local runtime data structs - need free() before exit */ uint64_t *startMapsIndex = NULL; memMap_t *mapsBuf = NULL; /* Local counters */ uint64_t nBBs = 0; /* Total BB hits found in raw file */ uint64_t nZeroBBs = 0; /* Number of non-hit instrumented BBs */ uint64_t mapsNum = 0; /* Total number of entries in map file */ uint64_t noCovMapsNum = 0; /* Loaded DSOs not compiled with coverage */ /* File line-by-line read help buffers */ char *pLine = NULL; size_t lineSz = 0; /* Coverage data analysis starts by parsing map file listing */ snprintf(covFile, sizeof(covFile), "%s/%s/%d.sancov.map", hfuzz->workDir, _HF_SANCOV_DIR, fuzzer->pid); if (!files_exists(covFile)) { LOG_D("sancov map file not found"); return false; } FILE *fCovMap = fopen(covFile, "rb"); if (fCovMap == NULL) { PLOG_E("Couldn't open '%s' - R/O mode", covFile); goto bail; } /* First line contains PC length (32/64-bit) */ if (getline(&pLine, &lineSz, fCovMap) == -1) { LOG_E("Invalid map file '%s'", covFile); fclose(fCovMap); goto bail; } int pcLen = atoi(pLine); if (pcLen == 32) { is32bit = true; } else if (pcLen == 64) { is32bit = false; } else { LOG_E("Invalid PC length (%d) in map file '%s'", pcLen, covFile); } /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { /* If runtime data destroy flag, new seed has been picked so destroy old & create new Trie */ if (hfuzz->clearCovMetadata == true) { /* Since this path is invoked on first run too, destroy old Trie only if exists */ if (hfuzz->covMetadata != NULL) { arch_trieDestroy(hfuzz->covMetadata); } arch_trieCreate(&hfuzz->covMetadata); hfuzz->clearCovMetadata = false; isSeedFirstRun = true; } } MX_UNLOCK(&hfuzz->sanCov_mutex); /* See if #maps is available from previous run to avoid realloc inside loop */ uint64_t prevMapsNum = __sync_fetch_and_add(&hfuzz->sanCovCnts.dsoCnt, 0UL); if (prevMapsNum > 0) { if ((mapsBuf = malloc(prevMapsNum * sizeof(memMap_t))) == NULL) { PLOG_E("malloc failed (sz=%" PRIu64 ")", prevMapsNum * sizeof(memMap_t)); /* This will be picked-up later from realloc branch */ prevMapsNum = 0; } } /* Iterate map entries */ for (;;) { if (getline(&pLine, &lineSz, fCovMap) == -1) { break; } /* Trim trailing whitespaces, not sure if needed copied from upstream sancov.py */ char *lineEnd = pLine + strlen(pLine) - 1; while (lineEnd > pLine && isspace(*lineEnd)) { lineEnd--; } *(lineEnd + 1) = 0; /* * Each line has following format: * Start End Base bin/DSO name * b5843000 b584e6ac b5843000 liblog.so */ memMap_t mapData = {.start = 0 }; char *savePtr = NULL; mapData.start = strtoull(strtok_r(pLine, " ", &savePtr), NULL, 16); mapData.end = strtoull(strtok_r(NULL, " ", &savePtr), NULL, 16); mapData.base = strtoull(strtok_r(NULL, " ", &savePtr), NULL, 16); char *mapName = strtok_r(NULL, " ", &savePtr); memcpy(mapData.mapName, mapName, strlen(mapName)); /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { /* Add entry to Trie with zero data if not already */ if (!arch_trieSearch(hfuzz->covMetadata->children, mapData.mapName)) { arch_trieAdd(&hfuzz->covMetadata, mapData.mapName); } } MX_UNLOCK(&hfuzz->sanCov_mutex); /* If not DSO number history (first run) or new DSO loaded, realloc local maps metadata buf */ if (prevMapsNum == 0 || prevMapsNum < mapsNum) { if ((mapsBuf = realloc(mapsBuf, (size_t) (mapsNum + 1) * sizeof(memMap_t))) == NULL) { PLOG_E("realloc failed (sz=%" PRIu64 ")", (mapsNum + 1) * sizeof(memMap_t)); goto bail; } } /* Add entry to local maps metadata array */ memcpy(&mapsBuf[mapsNum], &mapData, sizeof(memMap_t)); /* Increase loaded maps counter (includes non-instrumented DSOs too) */ mapsNum++; } /* Delete .sancov.map file */ fclose(fCovMap); unlink(covFile); /* Create a quick index array with maps start addresses */ startMapsIndex = malloc(mapsNum * sizeof(uint64_t)); if (startMapsIndex == NULL) { PLOG_E("malloc failed (sz=%" PRIu64 ")", mapsNum * sizeof(uint64_t)); goto bail; } /* Sort quick maps index */ qsort(mapsBuf, mapsNum, sizeof(memMap_t), arch_qsortCmp); for (size_t i = 0; i < mapsNum; i++) { startMapsIndex[i] = mapsBuf[i].start; } /* mmap() .sancov.raw file */ snprintf(covFile, sizeof(covFile), "%s/%s/%d.sancov.raw", hfuzz->workDir, _HF_SANCOV_DIR, fuzzer->pid); dataBuf = files_mapFile(covFile, &dataFileSz, &dataFd, false); if (dataBuf == NULL) { LOG_E("Couldn't open and map '%s' in R/O mode", covFile); goto bail; } /* * Avoid cost of size checks inside raw data read loop by defining the read function * & pivot size based on PC length. */ uint64_t(*pReadRawBBAddrFunc) (const uint8_t *) = NULL; uint8_t pivot = 0; if (is32bit) { pReadRawBBAddrFunc = &util_getUINT32; pivot = 4; } else { pReadRawBBAddrFunc = &util_getUINT64; pivot = 8; } /* * Take advantage of data locality (next processed addr is very likely to belong * to same map) to avoid Trie node search for each read entry. */ node_t *curMap = NULL; uint64_t prevIndex = 0; /* Iterate over data buffer containing list of hit BB addresses */ while (pos < dataFileSz) { uint64_t bbAddr = pReadRawBBAddrFunc(dataBuf + pos); pos += pivot; /* Don't bother for zero BB addr (inserted checks without hit) */ if (bbAddr == 0x0) { nZeroBBs++; continue; } else { /* Find best hit based on start addr & verify range for errors */ uint64_t bestFit = arch_interpSearch(startMapsIndex, mapsNum, bbAddr); if (bbAddr >= mapsBuf[bestFit].start && bbAddr < mapsBuf[bestFit].end) { /* Increase exe/DSO total BB counter */ mapsBuf[bestFit].bbCnt++; /* Update current Trie node if map changed */ if (curMap == NULL || (prevIndex != bestFit)) { prevIndex = bestFit; /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { curMap = arch_trieSearch(hfuzz->covMetadata->children, mapsBuf[bestFit].mapName); if (curMap == NULL) { LOG_E("Corrupted Trie - '%s' not found", mapsBuf[bestFit].mapName); MX_UNLOCK(&hfuzz->sanCov_mutex); continue; } /* Maintain bitmaps only for exec/DSOs with coverage enabled - allocate on first use */ if (curMap->data.pBM == NULL) { LOG_D("Allocating bitmap for map '%s'", mapsBuf[bestFit].mapName); curMap->data.pBM = arch_newBitmap(_HF_BITMAP_SIZE); /* * If bitmap allocation failed, unset cached Trie node ptr * to execute this selection branch again. */ if (curMap->data.pBM == NULL) { curMap = NULL; MX_UNLOCK(&hfuzz->sanCov_mutex); continue; } } } MX_UNLOCK(&hfuzz->sanCov_mutex); } /* If new relative BB addr update DSO's bitmap */ uint32_t relAddr = (uint32_t) (bbAddr - mapsBuf[bestFit].base); if (!arch_queryBitmap(curMap->data.pBM, relAddr)) { /* Interaction with global Trie should mutex wrap to avoid threads races */ MX_LOCK(&hfuzz->sanCov_mutex); { arch_setBitmap(curMap->data.pBM, relAddr); } MX_UNLOCK(&hfuzz->sanCov_mutex); /* Also increase new BBs counter at worker's thread runtime data */ mapsBuf[bestFit].newBBCnt++; } } else { /* * Normally this should never get executed. If hit, sanitizer * coverage data collection come across some kind of bug. */ LOG_E("Invalid BB addr (%" PRIx64 ") at offset %ld", bbAddr, pos); } } nBBs++; } /* Finally iterate over all instrumented maps to sum-up the number of newly met BB addresses */ for (uint64_t i = 0; i < mapsNum; i++) { if (mapsBuf[i].bbCnt > 0 && !isSeedFirstRun) { fuzzer->sanCovCnts.newBBCnt += mapsBuf[i].newBBCnt; } else { noCovMapsNum++; } } /* Successful parsing - update fuzzer worker's counters */ fuzzer->sanCovCnts.hitBBCnt = nBBs; fuzzer->sanCovCnts.totalBBCnt = nBBs + nZeroBBs; fuzzer->sanCovCnts.dsoCnt = mapsNum; fuzzer->sanCovCnts.iDsoCnt = mapsNum - noCovMapsNum; /* Instrumented DSOs */ ret = true; bail: unlink(covFile); if (dataBuf) { munmap(dataBuf, dataFileSz); } if (dataFd != -1) { close(dataFd); } if (mapsBuf) { free(mapsBuf); } if (startMapsIndex) { free(startMapsIndex); } if (pLine) { free(pLine); } return ret; } static bool arch_sanCovParse(honggfuzz_t * hfuzz, fuzzer_t * fuzzer) { int dataFd = -1; uint8_t *dataBuf = NULL; off_t dataFileSz = 0, pos = 0; bool is32bit = true; char covFile[PATH_MAX] = { 0 }; DIR *pSanCovDir = NULL; bool ret = false; snprintf(covFile, sizeof(covFile), "%s/%s/%s.%d.sancov", hfuzz->workDir, _HF_SANCOV_DIR, files_basename(hfuzz->cmdline[0]), fuzzer->pid); if (!files_exists(covFile)) { LOG_D("Target sancov file not found"); return false; } /* Local cache file suffix to use for file search of worker pid data */ char pidFSuffix[13] = { 0 }; snprintf(pidFSuffix, sizeof(pidFSuffix), "%d.sancov", fuzzer->pid); /* Total BBs counter summarizes all DSOs */ uint64_t nBBs = 0; /* Iterate sancov dir for files generated against fuzzer pid */ snprintf(covFile, sizeof(covFile), "%s/%s", hfuzz->workDir, _HF_SANCOV_DIR); pSanCovDir = opendir(covFile); struct dirent *pDir = NULL; while ((pDir = readdir(pSanCovDir)) != NULL) { /* Parse files with worker's PID */ if (strstr(pDir->d_name, pidFSuffix)) { snprintf(covFile, sizeof(covFile), "%s/%s/%s", hfuzz->workDir, _HF_SANCOV_DIR, pDir->d_name); dataBuf = files_mapFile(covFile, &dataFileSz, &dataFd, false); if (dataBuf == NULL) { LOG_E("Couldn't open and map '%s' in R/O mode", covFile); goto bail; } if (dataFileSz < 8) { LOG_E("Coverage data file too short"); goto bail; } /* Check magic values & derive PC length */ uint64_t magic = util_getUINT64(dataBuf); if (magic == kMagic32) { is32bit = true; } else if (magic == kMagic64) { is32bit = false; } else { LOG_E("Invalid coverage data file"); goto bail; } pos += 8; /* * Avoid cost of size checks inside raw data read loop by defining the read function * & pivot size based on PC length. */ uint64_t(*pReadRawBBAddrFunc) (const uint8_t *) = NULL; uint8_t pivot = 0; if (is32bit) { pReadRawBBAddrFunc = &util_getUINT32; pivot = 4; } else { pReadRawBBAddrFunc = &util_getUINT64; pivot = 8; } while (pos < dataFileSz) { uint32_t bbAddr = pReadRawBBAddrFunc(dataBuf + pos); pos += pivot; if (bbAddr == 0x0) { continue; } nBBs++; } } } /* Successful parsing - update fuzzer worker counters */ fuzzer->sanCovCnts.hitBBCnt = nBBs; ret = true; bail: unlink(covFile); if (dataBuf) { munmap(dataBuf, dataFileSz); } if (dataFd != -1) { close(dataFd); } if (pSanCovDir) { closedir(pSanCovDir); } return ret; }
__CILKRTS_INLINE void __cilkrts_obj_payload_add_ref( __cilkrts_obj_payload *pl ) { __sync_fetch_and_add( &pl->refcnt, 1 ); }
static uint64_t gen_counter(struct GenInfo * const gi) { const uint64_t v = __sync_fetch_and_add(&(gi->gen.counter.counter), 1lu); return v; }
/* __CILKRTS_INLINE */ void __cilkrts_obj_version_del_ref( __cilkrts_obj_version *v ) { // printf( "%d-%p: version_del_ref v=%p refcnt before=%d\n", __cilkrts_get_tls_worker()->self, (void*)0, v, v->refcnt ); if( __sync_fetch_and_add( &v->refcnt, -1 ) == 1 ) __cilkrts_obj_version_destroy( v ); }
static uint64_t db_aquire_mtid(struct DB *const db) { return __sync_fetch_and_add(&(db->next_mtid), 1); }