void _dispatch_semaphore_dispose(dispatch_object_t dou) { dispatch_semaphore_t dsema = dou._dsema; if (dsema->dsema_value < dsema->dsema_orig) { DISPATCH_CLIENT_CRASH( "Semaphore/group object deallocated while in use"); } #if USE_MACH_SEM kern_return_t kr; if (dsema->dsema_port) { kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } if (dsema->dsema_waiter_port) { kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } #elif USE_POSIX_SEM int ret = sem_destroy(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { // Before dsema_sent_ksignals is incremented we can rely on the reference // held by the waiter. However, once this value is incremented the waiter // may return between the atomic increment and the semaphore_signal(), // therefore an explicit reference must be held in order to safely access // dsema after the atomic increment. _dispatch_retain(dsema); (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif _dispatch_release(dsema); return 1; }
void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) { do { int ret = sem_post(sema); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--count); }
void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) { #if USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_signal(s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post((sem_t *)sema); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
DISPATCH_NOINLINE void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) { #if USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_destroy(mach_task_self(), s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_destroy((sem_t *)sema); free((sem_t *) sema); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
static void _dispatch_semaphore_init(long value, dispatch_object_t dou) { dispatch_semaphore_t dsema = dou._dsema; dsema->do_next = DISPATCH_OBJECT_LISTLESS; dsema->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dsema->dsema_value = value; dsema->dsema_orig = value; #if USE_POSIX_SEM int ret = sem_init(&dsema->dsema_sem, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
DISPATCH_NOINLINE static long _dispatch_group_wake(dispatch_semaphore_t dsema) { struct dispatch_sema_notify_s *next, *head, *tail = NULL; long rval; head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); if (head) { // snapshot before anything is notified/woken <rdar://problem/8554546> tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); } rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); if (rval) { // wake group waiters #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); do { kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); #endif } if (head) { // async group notify blocks do { dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); _dispatch_release(head->dsn_queue); next = fastpath(head->dsn_next); if (!next && head != tail) { while (!(next = fastpath(head->dsn_next))) { _dispatch_hardware_pause(); } } free(head); } while ((head = next)); _dispatch_release(dsema); } return 0; }
void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) { #if USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr; do { kr = semaphore_wait(s4); } while (slowpath(kr == KERN_ABORTED)); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret; do { ret = sem_wait((sem_t *) sema); } while (slowpath(ret != 0)); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif }
bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) { struct timespec _timeout; int ret; do { uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); ret = slowpath(sem_timedwait(sema, &_timeout)); } while (ret == -1 && errno == EINTR); if (ret == -1 && errno == ETIMEDOUT) { return true; } DISPATCH_SEMAPHORE_VERIFY_RET(ret); return false; }
DISPATCH_NOINLINE static _dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void) { _dispatch_safe_fork = false; #if USE_MACH_SEM semaphore_t s4; kern_return_t kr; while (slowpath(kr = semaphore_create(mach_task_self(), &s4, SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); sleep(1); } return s4; #elif USE_POSIX_SEM sem_t *s4 = malloc(sizeof(*s4)); int ret = sem_init(s4, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); return (_dispatch_thread_semaphore_t) s4; #endif }
DISPATCH_NOINLINE static long _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long orig; again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); // check the values again in case we need to wake any threads if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } #if USE_MACH_SEM mach_timespec_t _timeout; kern_return_t kr; _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ // // The code above does not match the documentation, and that fact is // not surprising. The documented semantics are clumsy to use in any // practical way. The above hack effectively tricks the rest of the // Mach semaphore logic to behave like the libdispatch algorithm. switch (timeout) { default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { return KERN_OPERATION_TIMED_OUT; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { kr = semaphore_wait(dsema->dsema_waiter_port); } while (kr == KERN_ABORTED); DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } #elif USE_POSIX_SEM struct timespec _timeout; int ret; switch (timeout) { default: do { _timeout = _dispatch_timeout_ts(timeout); ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); } while (ret == -1 && errno == EINTR); if (!(ret == -1 && errno == ETIMEDOUT)) { DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, orig - 1)) { errno = ETIMEDOUT; return -1; } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: do { ret = sem_wait(&dsema->dsema_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } #endif goto again; }
void _dispatch_sema4_wait(_dispatch_sema4_t *sema) { int ret = sem_wait(sema); DISPATCH_SEMAPHORE_VERIFY_RET(ret); }
void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) { int rc = sem_destroy(sema); DISPATCH_SEMAPHORE_VERIFY_RET(rc); }
void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) { int rc = sem_init(sema, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(rc); }