void dispatch_resume(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); // Global objects cannot be suspended or resumed. This also has the // side effect of saturating the suspend count of an object and // guarding against resuming due to overflow. if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // Check the previous value of the suspend count. If the previous // value was a single suspend interval, the object should be resumed. // If the previous value was less than the suspend interval, the object // has been over-resumed. unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do, do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { // Balancing the retain() done in suspend() for rdar://8181908 return _dispatch_release(dou._do); } if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) { return _dispatch_resume_slow(dou); } DISPATCH_CLIENT_CRASH("Over-resume of an object"); }
DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { // Before dsema_sent_ksignals is incremented we can rely on the reference // held by the waiter. However, once this value is incremented the waiter // may return between the atomic increment and the semaphore_signal(), // therefore an explicit reference must be held in order to safely access // dsema after the atomic increment. _dispatch_retain(dsema); (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif _dispatch_release(dsema); return 1; }
DISPATCH_NOINLINE static void _dispatch_resume_slow(dispatch_object_t dou) { _dispatch_wakeup(dou._do); // Balancing the retain() done in suspend() for rdar://8181908 _dispatch_release(dou._do); }
DISPATCH_NOINLINE static long _dispatch_group_wake(dispatch_semaphore_t dsema) { struct dispatch_sema_notify_s *next, *head, *tail = NULL; long rval; head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); if (head) { // snapshot before anything is notified/woken <rdar://problem/8554546> tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); } rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); if (rval) { // wake group waiters #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); do { kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); #endif } if (head) { // async group notify blocks do { dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); _dispatch_release(head->dsn_queue); next = fastpath(head->dsn_next); if (!next && head != tail) { while (!(next = fastpath(head->dsn_next))) { _dispatch_hardware_pause(); } } free(head); } while ((head = next)); _dispatch_release(dsema); } return 0; }
static void _dispatch_dealloc(dispatch_object_t dou) { dispatch_queue_t tq = dou._do->do_targetq; dispatch_function_t func = dou._do->do_finalizer; void *ctxt = dou._do->do_ctxt; _os_object_dealloc(dou._os_obj); if (func && ctxt) { dispatch_async_f(tq, ctxt, func); } _dispatch_release(tq); }
void _dispatch_xref_dispose(dispatch_object_t dou) { if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH("Release of a suspended object"); } #if !USE_OBJC if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); } else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) { _dispatch_runloop_queue_xref_dispose(dou._dq); } return _dispatch_release(dou._os_obj); #endif }
void dispatch_source_cancel(dispatch_source_t ds) { #if DISPATCH_DEBUG dispatch_debug(as_do(ds), __FUNCTION__); #endif // Right after we set the cancel flag, someone else // could potentially invoke the source, do the cancelation, // unregister the source, and deallocate it. We would // need to therefore retain/release before setting the bit _dispatch_retain(as_do(ds)); dispatch_atomic_or(&ds->ds_atomic_flags, DSF_CANCELED); _dispatch_wakeup(as_do(ds)); _dispatch_release(as_do(ds)); }
DISPATCH_NOINLINE void _dispatch_source_xref_release(dispatch_source_t ds) { #ifndef DISPATCH_NO_LEGACY if (ds->ds_is_legacy) { if (!(ds->ds_timer.flags & DISPATCH_TIMER_TYPE_MASK == DISPATCH_TIMER_ONESHOT)) { dispatch_source_cancel(ds); } // Clients often leave sources suspended at the last release dispatch_atomic_and(&ds->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK); } else #endif if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH("Release of a suspended object"); } _dispatch_wakeup(as_do(ds)); _dispatch_release(as_do(ds)); }