DISPATCH_NOINLINE void dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; struct dispatch_sema_notify_s *dsn, *prev; // FIXME -- this should be updated to use the continuation cache while (!(dsn = calloc(1, sizeof(*dsn)))) { sleep(1); } dsn->dsn_queue = dq; dsn->dsn_ctxt = ctxt; dsn->dsn_func = func; _dispatch_retain(dq); dispatch_atomic_store_barrier(); prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn); if (fastpath(prev)) { prev->dsn_next = dsn; } else { _dispatch_retain(dg); (void)dispatch_atomic_xchg2o(dsema, dsema_notify_head, dsn); if (dsema->dsema_value == dsema->dsema_orig) { _dispatch_group_wake(dsema); } } }
DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { // Before dsema_sent_ksignals is incremented we can rely on the reference // held by the waiter. However, once this value is incremented the waiter // may return between the atomic increment and the semaphore_signal(), // therefore an explicit reference must be held in order to safely access // dsema after the atomic increment. _dispatch_retain(dsema); (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif _dispatch_release(dsema); return 1; }
DISPATCH_ALWAYS_INLINE static inline void _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) { data->buf = buffer; data->size = size; data->destructor = destructor; if (queue) { _dispatch_retain(queue); data->do_targetq = queue; } }
void dispatch_suspend(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // rdar://8181908 explains why we need to do an internal retain at every // suspension. (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); _dispatch_retain(dou._do); }
void dispatch_source_cancel(dispatch_source_t ds) { #if DISPATCH_DEBUG dispatch_debug(as_do(ds), __FUNCTION__); #endif // Right after we set the cancel flag, someone else // could potentially invoke the source, do the cancelation, // unregister the source, and deallocate it. We would // need to therefore retain/release before setting the bit _dispatch_retain(as_do(ds)); dispatch_atomic_or(&ds->ds_atomic_flags, DSF_CANCELED); _dispatch_wakeup(as_do(ds)); _dispatch_release(as_do(ds)); }
dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, uintptr_t mask, dispatch_queue_t q) { dispatch_source_t ds = NULL; static char source_label[sizeof(ds->dq_label)] = "source"; // input validation if (type == NULL || (mask & ~type->mask)) { goto out_bad; } ds = calloc(1ul, sizeof(struct dispatch_source_s)); if (slowpath(!ds)) { goto out_bad; } // Initialize as a queue first, then override some settings below. _dispatch_queue_init((dispatch_queue_t)ds); memcpy(ds->dq_label, source_label, sizeof(source_label)); // Dispatch Object ds->do_vtable = &_dispatch_source_kevent_vtable; ds->do_ref_cnt++; // the reference the manger queue holds ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; // do_targetq will be retained below, past point of no-return ds->do_targetq = q; if (slowpath(!type->init(ds, type, handle, mask, q))) { goto out_bad; } dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); #if DISPATCH_DEBUG dispatch_debug(as_do(ds), __FUNCTION__); #endif _dispatch_retain(as_do(ds->do_targetq)); return ds; out_bad: free(ds); return NULL; }