DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; typeof(da->da_func) const func = da->da_func; void *const da_ctxt = da->da_ctxt; size_t idx, done = 0; _dispatch_workitem_dec(); // this unit executes many items // Make nested dispatch_apply fall into serial case rdar://problem/9294578 _dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul); // Striding is the responsibility of the caller. while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { _dispatch_client_callout2(da_ctxt, idx, func); _dispatch_workitem_inc(); done++; } _dispatch_thread_setspecific(dispatch_apply_key, NULL); dispatch_atomic_release_barrier(); // The thread that finished the last workitem wakes up the (possibly waiting) // thread that called dispatch_apply. They could be one and the same. if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) { _dispatch_thread_semaphore_signal(da->da_sema); } if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { _dispatch_continuation_free((dispatch_continuation_t)da); } }
static void _dispatch_apply3(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; dispatch_queue_t old_dq = (dispatch_queue_t) _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, da->da_queue); _dispatch_apply_invoke(ctxt); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); }
DISPATCH_NOINLINE void dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) { if (slowpath(iterations == 0)) { return; } dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); da->da_func = func; da->da_ctxt = ctxt; da->da_iterations = iterations; da->da_index = 0; da->da_thr_cnt = _dispatch_hw_config.cc_max_active; da->da_done = 0; da->da_queue = NULL; if (da->da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { da->da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; } if (iterations < da->da_thr_cnt) { da->da_thr_cnt = (uint32_t)iterations; } if (slowpath(dq->dq_width <= 2) || slowpath(da->da_thr_cnt <= 1) || slowpath(_dispatch_thread_getspecific(dispatch_apply_key))) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } dispatch_queue_t old_dq = (dispatch_queue_t) _dispatch_thread_getspecific(dispatch_queue_key); if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { da->da_queue = dq; return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } dispatch_atomic_acquire_barrier(); _dispatch_thread_setspecific(dispatch_queue_key, dq); _dispatch_apply_f2(dq, da, _dispatch_apply2); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); }
void _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) { _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) _dispatch_thread_getspecific(dispatch_sema4_key); _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); if (slowpath(old_sema)) { return _dispatch_thread_semaphore_dispose(old_sema); } }
static void _dispatch_introspection_thread_remove(void *ctxt) { dispatch_introspection_thread_t dit = ctxt; OSSpinLockLock(&_dispatch_introspection_threads_lock); TAILQ_REMOVE(&_dispatch_introspection_threads, dit, dit_list); OSSpinLockUnlock(&_dispatch_introspection_threads_lock); _dispatch_continuation_free((void*)dit); _dispatch_thread_setspecific(dispatch_introspection_key, NULL); }
_dispatch_thread_semaphore_t _dispatch_get_thread_semaphore(void) { _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) _dispatch_thread_getspecific(dispatch_sema4_key); if (slowpath(!sema)) { return _dispatch_thread_semaphore_create(); } _dispatch_thread_setspecific(dispatch_sema4_key, NULL); return sema; }
void _dispatch_introspection_thread_add(void) { if (_dispatch_thread_getspecific(dispatch_introspection_key)) { return; } uintptr_t thread = _dispatch_thread_self(); dispatch_introspection_thread_t dit = (void*)_dispatch_continuation_alloc(); dit->dit_isa = (void*)0x41; dit->thread = (void*)thread; dit->queue = !_dispatch_introspection_thread_queue_offset ? NULL : (void*)thread + _dispatch_introspection_thread_queue_offset; _dispatch_thread_setspecific(dispatch_introspection_key, dit); OSSpinLockLock(&_dispatch_introspection_threads_lock); TAILQ_INSERT_TAIL(&_dispatch_introspection_threads, dit, dit_list); OSSpinLockUnlock(&_dispatch_introspection_threads_lock); }