void dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) { dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release); }
void dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler) { dispatch_assert(!ds->ds_is_legacy); handler = _dispatch_Block_copy(handler); dispatch_barrier_async_f((dispatch_queue_t)ds, handler, _dispatch_source_set_event_handler2); }
dispatch_data_t dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) { dispatch_data_t data; void *data_buf = NULL; if (!buffer || !size) { // Empty data requested so return the singleton empty object. Call // destructor immediately in this case to ensure any unused associated // storage is released. if (destructor) { _dispatch_data_destroy_buffer(buffer, size, queue, _dispatch_Block_copy(destructor)); } return dispatch_data_empty; } if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { // The default destructor was provided, indicating the data should be // copied. data_buf = malloc(size); if (slowpath(!data_buf)) { return DISPATCH_OUT_OF_MEMORY; } buffer = memcpy(data_buf, buffer, size); data = _dispatch_data_alloc(0, 0); destructor = DISPATCH_DATA_DESTRUCTOR_FREE; } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) { data = _dispatch_data_alloc(0, size); buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer, size); destructor = DISPATCH_DATA_DESTRUCTOR_NONE; } else { data = _dispatch_data_alloc(0, 0); destructor = _dispatch_Block_copy(destructor); } _dispatch_data_init(data, buffer, size, queue, destructor); return data; }
inline DISPATCH_ALWAYS_INLINE dispatch_block_private_data_s( dispatch_block_private_data_s const &o) noexcept : dbpd_magic(DISPATCH_BLOCK_PRIVATE_DATA_MAGIC), dbpd_flags(o.dbpd_flags), dbpd_atomic_flags(), dbpd_performed(), dbpd_priority(o.dbpd_priority), dbpd_voucher(o.dbpd_voucher), dbpd_block(), dbpd_group(), dbpd_queue(), dbpd_thread() { // copy constructor, create copy with retained references if (dbpd_voucher) voucher_retain(dbpd_voucher); if (o.dbpd_block) dbpd_block = _dispatch_Block_copy(o.dbpd_block); _dispatch_block_private_data_debug("copy from %p, block: %p from %p", &o, dbpd_block, o.dbpd_block); if (!o.dbpd_magic) return; // No group in initial copy of stack object dbpd_group = _dispatch_group_create_and_enter(); }
void _dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer, size_t size, dispatch_block_t destructor) { if (!buffer || !size) { if (destructor) { _dispatch_data_destroy_buffer(buffer, size, NULL, _dispatch_Block_copy(destructor)); } buffer = NULL; size = 0; destructor = DISPATCH_DATA_DESTRUCTOR_NONE; } _dispatch_data_init(data, buffer, size, NULL, destructor); }
} } dispatch_atomic_acquire_barrier(); _dispatch_thread_setspecific(dispatch_queue_key, dq); _dispatch_apply_f2(dq, da, _dispatch_apply2); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } #ifdef __BLOCKS__ #if DISPATCH_COCOA_COMPAT DISPATCH_NOINLINE static void _dispatch_apply_slow(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { struct Block_basic *bb = (struct Block_basic *)_dispatch_Block_copy((void *)work); dispatch_apply_f(iterations, dq, bb, (dispatch_apply_function_t)bb->Block_invoke); Block_release(bb); } #endif void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { #if DISPATCH_COCOA_COMPAT // Under GC, blocks transferred to other threads must be Block_copy()ed // rdar://problem/7455071 if (dispatch_begin_thread_4GC) { return _dispatch_apply_slow(iterations, dq, work); }