long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long value = dispatch_atomic_dec2o(dsema, dsema_value); dispatch_atomic_acquire_barrier(); if (fastpath(value >= 0)) { return 0; } return _dispatch_semaphore_wait_slow(dsema, timeout); }
DISPATCH_NOINLINE void dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) { if (slowpath(iterations == 0)) { return; } dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); da->da_func = func; da->da_ctxt = ctxt; da->da_iterations = iterations; da->da_index = 0; da->da_thr_cnt = _dispatch_hw_config.cc_max_active; da->da_done = 0; da->da_queue = NULL; if (da->da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { da->da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; } if (iterations < da->da_thr_cnt) { da->da_thr_cnt = (uint32_t)iterations; } if (slowpath(dq->dq_width <= 2) || slowpath(da->da_thr_cnt <= 1) || slowpath(_dispatch_thread_getspecific(dispatch_apply_key))) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } dispatch_queue_t old_dq = (dispatch_queue_t) _dispatch_thread_getspecific(dispatch_queue_key); if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { da->da_queue = dq; return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } dispatch_atomic_acquire_barrier(); _dispatch_thread_setspecific(dispatch_queue_key, dq); _dispatch_apply_f2(dq, da, _dispatch_apply2); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); }