void _dispatch_semaphore_dispose(dispatch_object_t dou) { dispatch_semaphore_t dsema = dou._dsema; if (dsema->dsema_value < dsema->dsema_orig) { DISPATCH_CLIENT_CRASH( "Semaphore/group object deallocated while in use"); } #if USE_MACH_SEM kern_return_t kr; if (dsema->dsema_port) { kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } if (dsema->dsema_waiter_port) { kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } #elif USE_POSIX_SEM int ret = sem_destroy(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
void _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy) { semaphore_t tmp = MACH_PORT_NULL; _dispatch_fork_becomes_unsafe(); // lazily allocate the semaphore port // Someday: // 1) Switch to a doubly-linked FIFO in user-space. // 2) User-space timers for the timeout. #if DISPATCH_USE_OS_SEMAPHORE_CACHE if (policy == _DSEMA4_POLICY_FIFO) { tmp = (_dispatch_sema4_t)os_get_cached_semaphore(); if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) { os_put_cached_semaphore((os_semaphore_t)tmp); } return; } #endif kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0); DISPATCH_SEMAPHORE_VERIFY_KR(kr); if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } }
static void _dispatch_semaphore_create_port(semaphore_t *s4) { kern_return_t kr; semaphore_t tmp; if (*s4) { return; } _dispatch_safe_fork = false; // lazily allocate the semaphore port // Someday: // 1) Switch to a doubly-linked FIFO in user-space. // 2) User-space timers for the timeout. // 3) Use the per-thread semaphore port. while ((kr = semaphore_create(mach_task_self(), &tmp, SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); sleep(1); } if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } }
DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { // Before dsema_sent_ksignals is incremented we can rely on the reference // held by the waiter. However, once this value is incremented the waiter // may return between the atomic increment and the semaphore_signal(), // therefore an explicit reference must be held in order to safely access // dsema after the atomic increment. _dispatch_retain(dsema); (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif _dispatch_release(dsema); return 1; }
void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) { do { kern_return_t kr = semaphore_signal(*sema); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--count); }
void _dispatch_sema4_wait(_dispatch_sema4_t *sema) { kern_return_t kr; do { kr = semaphore_wait(*sema); } while (kr == KERN_ABORTED); DISPATCH_SEMAPHORE_VERIFY_KR(kr); }
void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) { #if USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_signal(s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post((sem_t *)sema); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy) { semaphore_t sema_port = *sema; *sema = MACH_PORT_DEAD; #if DISPATCH_USE_OS_SEMAPHORE_CACHE if (policy == _DSEMA4_POLICY_FIFO) { return os_put_cached_semaphore((os_semaphore_t)sema_port); } #endif kern_return_t kr = semaphore_destroy(mach_task_self(), sema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); }
DISPATCH_NOINLINE void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) { #if USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_destroy(mach_task_self(), s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_destroy((sem_t *)sema); free((sem_t *) sema); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
DISPATCH_NOINLINE static long _dispatch_group_wake(dispatch_semaphore_t dsema) { struct dispatch_sema_notify_s *next, *head, *tail = NULL; long rval; head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); if (head) { // snapshot before anything is notified/woken <rdar://problem/8554546> tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); } rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); if (rval) { // wake group waiters #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); do { kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); #endif } if (head) { // async group notify blocks do { dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); _dispatch_release(head->dsn_queue); next = fastpath(head->dsn_next); if (!next && head != tail) { while (!(next = fastpath(head->dsn_next))) { _dispatch_hardware_pause(); } } free(head); } while ((head = next)); _dispatch_release(dsema); } return 0; }
void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) { #if USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr; do { kr = semaphore_wait(s4); } while (slowpath(kr == KERN_ABORTED)); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret; do { ret = sem_wait((sem_t *) sema); } while (slowpath(ret != 0)); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) { mach_timespec_t _timeout; kern_return_t kr; do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(*sema, _timeout)); } while (kr == KERN_ABORTED); if (kr == KERN_OPERATION_TIMED_OUT) { return true; } DISPATCH_SEMAPHORE_VERIFY_KR(kr); return false; }
DISPATCH_NOINLINE static long _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long orig; again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); // check the values again in case we need to wake any threads if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } #if USE_MACH_SEM mach_timespec_t _timeout; kern_return_t kr; _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ // // The code above does not match the documentation, and that fact is // not surprising. The documented semantics are clumsy to use in any // practical way. The above hack effectively tricks the rest of the // Mach semaphore logic to behave like the libdispatch algorithm. switch (timeout) { default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { return KERN_OPERATION_TIMED_OUT; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { kr = semaphore_wait(dsema->dsema_waiter_port); } while (kr == KERN_ABORTED); DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } #elif USE_POSIX_SEM struct timespec _timeout; int ret; switch (timeout) { default: do { _timeout = _dispatch_timeout_ts(timeout); ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); } while (ret == -1 && errno == EINTR); if (!(ret == -1 && errno == ETIMEDOUT)) { DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { errno = ETIMEDOUT; return -1; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { ret = sem_wait(&dsema->dsema_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } #endif goto again; }