static void reader(void *ctxt) { if (dispatch_atomic_dec(&r_count) == 0) { if (r_count == 0) { test_stop(); } } }
static void cascade(void* context) { uintptr_t idx, *idxptr = (uintptr_t*)context; if (done) return; idx = *idxptr + 1; if (idx < QUEUES) { *idxptr = idx; dispatch_async_f(queues[idx], context, cascade); } if (dispatch_atomic_dec(&iterations) == 0) { done = 1; histogram(); MU_PASS("Please check histogram to be sure"); } }
static void _dispatch_apply2(void *_ctxt) { struct dispatch_apply_s *da = _ctxt; size_t const iter = da->da_iterations; dispatch_function_apply_t func = da->da_func; void *const ctxt = da->da_ctxt; size_t idx; _dispatch_workitem_dec(); // this unit executes many items // Striding is the responsibility of the caller. while (fastpath((idx = dispatch_atomic_inc((intptr_t*)&da->da_index) - 1) < iter)) { func(ctxt, idx); _dispatch_workitem_inc(); } if (dispatch_atomic_dec((intptr_t*)&da->da_thr_cnt) == 0) { dispatch_semaphore_signal(da->da_sema); } }
void cpubusy(void* context) { intptr_t *count = context; intptr_t iterdone; long idx; for (idx = 0; idx < LOOP_COUNT; ++idx) { if (done) break; } if ((iterdone = dispatch_atomic_dec(&iterations)) == 0) { dispatch_atomic_inc(&done); dispatch_atomic_inc(count); histogram(); test_stop(); exit(0); } else if (iterdone > 0) { dispatch_atomic_inc(count); } }
bool iteration_wrap::deref() { return dispatch_atomic_dec( &ref ) == 0; }