DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; typeof(da->da_func) const func = da->da_func; void *const da_ctxt = da->da_ctxt; size_t idx, done = 0; _dispatch_workitem_dec(); // this unit executes many items // Make nested dispatch_apply fall into serial case rdar://problem/9294578 _dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul); // Striding is the responsibility of the caller. while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { _dispatch_client_callout2(da_ctxt, idx, func); _dispatch_workitem_inc(); done++; } _dispatch_thread_setspecific(dispatch_apply_key, NULL); dispatch_atomic_release_barrier(); // The thread that finished the last workitem wakes up the (possibly waiting) // thread that called dispatch_apply. They could be one and the same. if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) { _dispatch_thread_semaphore_signal(da->da_sema); } if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { _dispatch_continuation_free((dispatch_continuation_t)da); } }
DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { // Before dsema_sent_ksignals is incremented we can rely on the reference // held by the waiter. However, once this value is incremented the waiter // may return between the atomic increment and the semaphore_signal(), // therefore an explicit reference must be held in order to safely access // dsema after the atomic increment. _dispatch_retain(dsema); (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif _dispatch_release(dsema); return 1; }
void dispatch_group_leave(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; dispatch_atomic_release_barrier(); long value = dispatch_atomic_inc2o(dsema, dsema_value); if (slowpath(value == LONG_MIN)) { DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()"); } if (slowpath(value == dsema->dsema_orig)) { (void)_dispatch_group_wake(dsema); } }
long dispatch_semaphore_signal(dispatch_semaphore_t dsema) { dispatch_atomic_release_barrier(); long value = dispatch_atomic_inc2o(dsema, dsema_value); if (fastpath(value > 0)) { return 0; } if (slowpath(value == LONG_MIN)) { DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()"); } return _dispatch_semaphore_signal_slow(dsema); }
DISPATCH_NOINLINE _os_object_t _os_object_retain(_os_object_t obj) { int xref_cnt = obj->os_obj_xref_cnt; if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return obj; // global object } xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed); if (slowpath(xref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; }
DISPATCH_NOINLINE static long _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long orig; again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); // check the values again in case we need to wake any threads if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } #if USE_MACH_SEM mach_timespec_t _timeout; kern_return_t kr; _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ // // The code above does not match the documentation, and that fact is // not surprising. The documented semantics are clumsy to use in any // practical way. The above hack effectively tricks the rest of the // Mach semaphore logic to behave like the libdispatch algorithm. switch (timeout) { default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { return KERN_OPERATION_TIMED_OUT; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { kr = semaphore_wait(dsema->dsema_waiter_port); } while (kr == KERN_ABORTED); DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } #elif USE_POSIX_SEM struct timespec _timeout; int ret; switch (timeout) { default: do { _timeout = _dispatch_timeout_ts(timeout); ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); } while (ret == -1 && errno == EINTR); if (!(ret == -1 && errno == ETIMEDOUT)) { DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { errno = ETIMEDOUT; return -1; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { ret = sem_wait(&dsema->dsema_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } #endif goto again; }