static void _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; uint32_t da_width = 2 * (da->da_thr_cnt - 1); dispatch_queue_t dq = da->da_queue, rq = dq, tq; do { uint32_t running = dispatch_atomic_add2o(rq, dq_running, da_width); uint32_t width = rq->dq_width; if (slowpath(running > width)) { uint32_t excess = width > 1 ? running - width : da_width; for (tq = dq; 1; tq = tq->do_targetq) { (void)dispatch_atomic_sub2o(tq, dq_running, excess); if (tq == rq) { break; } } da_width -= excess; if (slowpath(!da_width)) { return _dispatch_apply_serial(da); } da->da_thr_cnt -= excess / 2; } rq = rq->do_targetq; } while (slowpath(rq->do_targetq)); _dispatch_apply_f2(rq, da, _dispatch_apply3); do { (void)dispatch_atomic_sub2o(dq, dq_running, da_width); dq = dq->do_targetq; } while (slowpath(dq->do_targetq)); }
DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; typeof(da->da_func) const func = da->da_func; void *const da_ctxt = da->da_ctxt; size_t idx, done = 0; _dispatch_workitem_dec(); // this unit executes many items // Make nested dispatch_apply fall into serial case rdar://problem/9294578 _dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul); // Striding is the responsibility of the caller. while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { _dispatch_client_callout2(da_ctxt, idx, func); _dispatch_workitem_inc(); done++; } _dispatch_thread_setspecific(dispatch_apply_key, NULL); dispatch_atomic_release_barrier(); // The thread that finished the last workitem wakes up the (possibly waiting) // thread that called dispatch_apply. They could be one and the same. if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) { _dispatch_thread_semaphore_signal(da->da_sema); } if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { _dispatch_continuation_free((dispatch_continuation_t)da); } }
void dispatch_suspend(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // rdar://8181908 explains why we need to do an internal retain at every // suspension. (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); _dispatch_retain(dou._do); }