static void gcdpoll_watch_free(AvahiWatch *w) { AvahiWatch *prev; AvahiWatch *cur; if (w->w_read) { dispatch_source_cancel(w->w_read); dispatch_release(w->w_read); } if (w->w_write) { dispatch_source_cancel(w->w_write); dispatch_release(w->w_write); } prev = NULL; for (cur = all_w; cur; prev = cur, cur = cur->next) { if (cur != w) continue; if (prev == NULL) all_w = w->next; else prev->next = w->next; break; } free(w); }
static void CFSocketFinalize (CFTypeRef cf) { CFSocketRef s = (CFSocketRef)cf; #if HAVE_LIBDISPATCH dispatch_queue_t q = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dispatch_source_cancel(s->_readSource); dispatch_source_cancel(s->_writeSource); // Wait for source handlers to complete // before we proceed to destruction. dispatch_barrier_sync_f(q, NULL, DummyBarrier); if (s->_source != NULL) CFRelease(s->_source); #endif if (s->_socket != -1) { GSMutexLock (&_kCFSocketObjectsLock); CFDictionaryRemoveValue(_kCFSocketObjects, (void*)(uintptr_t) s->_socket); closesocket (s->_socket); GSMutexUnlock (&_kCFSocketObjectsLock); } if (s->_address) CFRelease (s->_address); if (s->_peerAddress) CFRelease (s->_peerAddress); }
void destroy_socket_context_s(socket_context_t *context) { if (context->read_source) dispatch_source_cancel(context->read_source); if (context->write_source) dispatch_source_cancel(context->write_source); free(context); }
static void mdns_deinit_task(void *arg) { struct mdns_group_entry *ge; struct mdns_browser *mb; AvahiWatch *w; AvahiTimeout *t; if (mdns_client) avahi_client_free(mdns_client); for (t = all_t; t; t = t->next) { if (t->timer) { dispatch_source_cancel(t->timer); dispatch_release(t->timer); t->timer = NULL; } } for (w = all_w; w; w = w->next) { if (w->w_read) { dispatch_source_cancel(w->w_read); dispatch_release(w->w_read); } if (w->w_write) { dispatch_source_cancel(w->w_write); dispatch_release(w->w_write); } } for (ge = group_entries; group_entries; ge = group_entries) { group_entries = ge->next; free(ge->name); free(ge->type); avahi_string_list_free(ge->txt); free(ge); } for (mb = browser_list; browser_list; mb = browser_list) { browser_list = mb->next; free(mb->type); free(mb); } }
void destroy_curl_context(curl_context_t *context) { if (context->read_source) dispatch_source_cancel(context->read_source); if (context->write_source) dispatch_source_cancel(context->write_source); log_detail("destroyed context %p\n", context); free(context); }
static void gcdpoll_timeout_free(AvahiTimeout *t) { AvahiTimeout *prev; AvahiTimeout *cur; if (t->timer) { dispatch_source_cancel(t->timer); dispatch_release(t->timer); t->timer = NULL; } prev = NULL; for (cur = all_t; cur; prev = cur, cur = cur->next) { if (cur != t) continue; if (prev == NULL) all_t = t->next; else prev->next = t->next; break; } free(t); }
static bool dispatch_test_check_evfilt_vm(void) { int kq = kqueue(); assert(kq != -1); struct kevent ke = { .filter = EVFILT_VM, .flags = EV_ADD|EV_ENABLE|EV_RECEIPT, .fflags = NOTE_VM_PRESSURE, }; int r = kevent(kq, &ke, 1, &ke, 1, NULL); close(kq); return !(r > 0 && ke.flags & EV_ERROR && ke.data == ENOTSUP); } static void cleanup(void) { dispatch_source_cancel(vm_source); dispatch_release(vm_source); dispatch_release(vm_queue); int32_t pc = 0, i; for (i = 0; i < max_page_count; ++i) { if (pages[i]) { pc++; free(pages[i]); } } if (pc) { log_msg("Freed %ldMB\n", pg2mb(pc)); } free(pages); test_stop(); }
void _notify_lib_port_proc_release(notify_state_t *ns, mach_port_t port, pid_t proc) { portproc_data_t *pdata = NULL; if (ns == NULL) return; if ((proc == 0) && (port == MACH_PORT_NULL)) return; if (ns->lock != NULL) pthread_mutex_lock(ns->lock); if (proc == 0) pdata = _nc_table_find_n(ns->port_table, port); else pdata = _nc_table_find_n(ns->proc_table, proc); if (pdata != NULL) { if (pdata->refcount > 0) pdata->refcount--; if (pdata->refcount == 0) { if (proc == 0) _nc_table_delete_n(ns->port_table, port); else _nc_table_delete_n(ns->proc_table, proc); dispatch_source_cancel(pdata->src); dispatch_release(pdata->src); free(pdata); } } if (ns->lock != NULL) pthread_mutex_unlock(ns->lock); }
static void test_io_close(int with_timer, bool from_path) { #define chunks 4 #define READSIZE (512*1024) unsigned int i; const char *path = LARGE_FILE; int fd = open(path, O_RDONLY); if (fd == -1) { if (errno == ENOENT) { test_skip("Large file not found"); return; } test_errno("open", errno, 0); test_stop(); } #ifdef F_GLOBAL_NOCACHE if (fcntl(fd, F_GLOBAL_NOCACHE, 1) == -1) { test_errno("fcntl F_GLOBAL_NOCACHE", errno, 0); test_stop(); } #endif struct stat sb; if (fstat(fd, &sb)) { test_errno("fstat", errno, 0); test_stop(); } const size_t size = (size_t)sb.st_size / chunks; const int expected_error = with_timer? ECANCELED : 0; dispatch_source_t t = NULL; dispatch_group_t g = dispatch_group_create(); dispatch_group_enter(g); void (^cleanup_handler)(int error) = ^(int error) { test_errno("create error", error, 0); dispatch_group_leave(g); close(fd); }; dispatch_io_t io; if (!from_path) { io = dispatch_io_create(DISPATCH_IO_RANDOM, fd, dispatch_get_global_queue(0, 0), cleanup_handler); } else { #if DISPATCHTEST_IO_PATH io = dispatch_io_create_with_path(DISPATCH_IO_RANDOM, path, O_RDONLY, 0, dispatch_get_global_queue(0, 0), cleanup_handler); #endif } dispatch_io_set_high_water(io, READSIZE); if (with_timer == 1) { dispatch_io_set_low_water(io, READSIZE); dispatch_io_set_interval(io, 2 * NSEC_PER_SEC, DISPATCH_IO_STRICT_INTERVAL); } else if (with_timer == 2) { t = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_global_queue(0,0)); dispatch_retain(io); dispatch_source_set_event_handler(t, ^{ dispatch_io_close(io, DISPATCH_IO_STOP); dispatch_source_cancel(t); });
static void cancelRepeatingTimer(struct nodeInstanceData *ctx) { dispatch_source_cancel(ctx->timer); dispatch_release(ctx->timer); ctx->timer = NULL; dispatch_semaphore_wait(ctx->timerCanceled, DISPATCH_TIME_FOREVER); }
static void read_from_source(void *_source) { dispatch_source_t source = (dispatch_source_t)_source; int descriptor = dispatch_source_get_handle(source); if (_read_redirect(descriptor, 0) == EOF) dispatch_source_cancel(source); }
void RequestTimer::setTimeout(int seconds) { m_timeoutSeconds = seconds > 0 ? seconds : 0; cancelTimerSource(); if (!m_timeoutSeconds) { return; } dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); m_timerSource = dispatch_source_create( DISPATCH_SOURCE_TYPE_TIMER, 0, DISPATCH_TIMER_STRICT, q); dispatch_time_t t = dispatch_time(DISPATCH_TIME_NOW, m_timeoutSeconds * NSEC_PER_SEC); dispatch_source_set_timer(m_timerSource, t, DISPATCH_TIME_FOREVER, 0); // Use the timer group as a semaphore. When the source is cancelled, // libdispatch will make sure all pending event handlers have finished before // invoking the cancel handler. This means that if we cancel the source and // then wait on the timer group, when we are done waiting, we know the source // is completely done and it's safe to free memory (e.g., in the destructor). // See cancelTimerSource() above. dispatch_group_enter(m_timerGroup); dispatch_source_set_event_handler(m_timerSource, ^{ onTimeout(); // Cancelling ourselves isn't needed for correctness, but we can go ahead // and do it now instead of waiting on it later, so why not. (Also, // getRemainingTime does use this opportunistically, but it's best effort.) dispatch_source_cancel(m_timerSource); });
static void asynchttp_complete(asynchttp_t *http) { secdebug("http", "http: %p", http); /* Shutdown streams and timer, we're about to invoke our client callback. */ if (http->stream) { CFReadStreamSetClient(http->stream, kCFStreamEventNone, NULL, NULL); CFReadStreamSetDispatchQueue(http->stream, NULL); CFReadStreamClose(http->stream); CFReleaseNull(http->stream); } if (http->timer) { dispatch_source_cancel(http->timer); dispatch_release_null(http->timer); } if (http->completed) { /* This should probably move to our clients. */ CFTimeInterval maxAge = NULL_TIME; if (http->response) { CFStringRef cacheControl = CFHTTPMessageCopyHeaderFieldValue( http->response, CFSTR("cache-control")); if (cacheControl) { CFStringRef maxAgeValue = copyParseMaxAge(cacheControl); CFRelease(cacheControl); if (maxAgeValue) { secdebug("http", "http header max-age: %@", maxAgeValue); maxAge = CFStringGetDoubleValue(maxAgeValue); CFRelease(maxAgeValue); } } } http->completed(http, maxAge); } }
static void closeSocketClient(SocketClientRef client) { dispatch_resume(client->source); dispatch_source_cancel(client->source); dispatch_release(client->source); close(client->fd); free(client); }
void net_socket_destroy(net_socket_t s) { mNetworkLog("Closing socket"); // Cancel read dispatch_source if(s->readDispatchSource) { dispatch_source_cancel(s->readDispatchSource); } // Cancel write dispatch_source if(s->writeDispatchSource) { net_socket_resume_write(s); // MUST resume write dispatch source before, so cancel handler is called dispatch_source_cancel(s->writeDispatchSource); } }
static void gcdpoll_watch_update(AvahiWatch *w, AvahiWatchEvent a_events) { if (w->w_read && !(a_events & AVAHI_WATCH_IN)) { dispatch_source_cancel(w->w_read); dispatch_release(w->w_read); w->w_read = NULL; } if (w->w_write && !(a_events & AVAHI_WATCH_OUT)) { dispatch_source_cancel(w->w_write); dispatch_release(w->w_write); w->w_write = NULL; } _gcdpoll_watch_add(w, a_events); }
void timer_close(timer_t *t) { if (t == NULL) return; if (t->t_src != NULL) dispatch_source_cancel(t->t_src); /* * We need to make sure that the source's event handler isn't currently running * before we free the timer. We let the source's queue do the actual free. */ dispatch_async(t->t_queue, ^{ timer_free(t); });
static void gcdpoll_timeout_update(AvahiTimeout *t, const struct timeval *tv) { if (tv) _gcdpoll_timeout_add(t, tv); else if (t->timer) { dispatch_source_cancel(t->timer); dispatch_release(t->timer); t->timer = NULL; } }
int main(void) { const char *path = "/usr/share/dict/words"; struct stat sb; dispatch_test_start("Dispatch Source Read"); int infd = open(path, O_RDONLY); if (infd == -1) { perror(path); exit(EXIT_FAILURE); } if (fstat(infd, &sb) == -1) { perror(path); exit(EXIT_FAILURE); } bytes_total = sb.st_size; if (fcntl(infd, F_SETFL, O_NONBLOCK) != 0) { perror(path); exit(EXIT_FAILURE); } if (!dispatch_test_check_evfilt_read_for_fd(infd)) { test_skip("EVFILT_READ kevent not firing for test file"); test_fin(NULL); } dispatch_queue_t main_q = dispatch_get_main_queue(); test_ptr_notnull("dispatch_get_main_queue", main_q); dispatch_source_t reader = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, main_q); test_ptr_notnull("dispatch_source_create", reader); assert(reader); dispatch_source_set_event_handler(reader, ^{ size_t estimated = dispatch_source_get_data(reader); fprintf(stderr, "bytes available: %zu\n", estimated); test_double_less_than_or_equal("estimated", estimated, bytes_total - bytes_read); const ssize_t bufsiz = 1024*500; // 500 KB buffer static char buffer[1024*500]; // 500 KB buffer ssize_t actual = read(infd, buffer, sizeof(buffer)); bytes_read += actual; printf("bytes read: %zd\n", actual); if (actual < bufsiz) { actual = read(infd, buffer, sizeof(buffer)); bytes_read += actual; // confirm EOF condition test_long("EOF", actual, 0); dispatch_source_cancel(reader); } });
static void event_handler(void* context) { UNREFERENCED_PARAMETER(context); ++i; fprintf(stderr, "%d\n", i); if (i >= 7) { dispatch_source_cancel(timer); } else if (i == 1) { dispatch_source_set_timer(timer, 0, 100, 0); } }
void test_proc(pid_t bad_pid) { dispatch_source_t proc_s[PID_CNT], proc; int res; pid_t pid, monitor_pid; event_cnt = 0; // Creates a process and register multiple observers. Send a signal, // exit the process, etc., and verify all observers were notified. posix_spawnattr_t attr; res = posix_spawnattr_init(&attr); assert(res == 0); #if HAVE_DECL_POSIX_SPAWN_START_SUSPENDED res = posix_spawnattr_setflags(&attr, POSIX_SPAWN_START_SUSPENDED); assert(res == 0); #endif char* args[] = { "/bin/sleep", "2", NULL }; res = posix_spawnp(&pid, args[0], NULL, &attr, args, NULL); if (res < 0) { perror(args[0]); exit(127); } res = posix_spawnattr_destroy(&attr); assert(res == 0); dispatch_group_t group = dispatch_group_create(); assert(pid > 0); monitor_pid = bad_pid ? bad_pid : pid; // rdar://problem/8090801 int i; for (i = 0; i < PID_CNT; ++i) { dispatch_group_enter(group); proc = proc_s[i] = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, monitor_pid, DISPATCH_PROC_EXIT, dispatch_get_global_queue(0, 0)); test_ptr_notnull("dispatch_source_proc_create", proc); dispatch_source_set_event_handler(proc, ^{ long flags = dispatch_source_get_data(proc); test_long("DISPATCH_PROC_EXIT", flags, DISPATCH_PROC_EXIT); event_cnt++; dispatch_source_cancel(proc); }); dispatch_source_set_cancel_handler(proc, ^{ dispatch_group_leave(group); });
void RequestTimer::cancelTimerSource() { if (m_timerSource) { dispatch_source_cancel(m_timerSource); dispatch_group_wait(m_timerGroup, DISPATCH_TIME_FOREVER); // At this point it is safe to free memory, the source or even ourselves (if // this is part of the destructor). See the way we set up the timer group // and cancellation handler in setTimeout() below. dispatch_release(m_timerSource); m_timerSource = nullptr; } }
/****************************************************************************** * _kextmanager_lock_volume tries to lock volumes for clients (kextutil) *****************************************************************************/ static void removeKextutilLock(void) { if (_gKextutilLock) { dispatch_source_cancel(_gKextutilLock); } if (gKernelRequestsPending) { kextd_process_kernel_requests(); } CFRunLoopWakeUp(CFRunLoopGetCurrent()); return; }
static void __RSFileMonitorClassDeallocate(RSTypeRef rs) { RSFileMonitorRef monitor = (RSFileMonitorRef)rs; __RSFileMonitorLock(monitor); if (monitor->_filePath) RSRelease(monitor->_filePath); if (monitor->_source) { dispatch_source_cancel(monitor->_source); close(monitor->_fd); monitor->_fd = 0; dispatch_release(monitor->_source); monitor->_source = nil; } __RSFileMonitorUnlock(monitor); }
static void redirect_atexit(void) { int i; /* stdout is linebuffered, so flush the buffer */ if (redirect_descriptors[STDOUT_FILENO].buf) fflush(stdout); /* Cancel all of our dispatch sources, so they flush to ASL */ for (i=0; i < n_redirect_descriptors; i++) if (redirect_descriptors[i].read_source) dispatch_source_cancel(redirect_descriptors[i].read_source); /* Wait at least three seconds for our sources to flush to ASL */ dispatch_group_wait(read_source_group, dispatch_time(DISPATCH_TIME_NOW, 3LL * NSEC_PER_SEC)); }
static int _gcdpoll_watch_add(AvahiWatch *w, AvahiWatchEvent a_events) { dispatch_source_t sr = NULL; dispatch_source_t sw = NULL; if ((a_events & AVAHI_WATCH_IN) && !w->w_read) { sr = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, w->fd, 0, mdns_sq); if (!sr) return -1; dispatch_set_context(sr, w); dispatch_source_set_event_handler_f(sr, gcdpollcb_watch_read); } if ((a_events & AVAHI_WATCH_OUT) && !w->w_write) { sw = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, w->fd, 0, mdns_sq); if (!sw) { if (sr) { dispatch_source_cancel(sr); dispatch_release(sr); } return -1; } dispatch_set_context(sw, w); dispatch_source_set_event_handler_f(sw, gcdpollcb_watch_write); } if (sr) { w->w_read = sr; dispatch_resume(sr); } if (sw) { w->w_write = sw; dispatch_resume(sw); } return 0; }
/* timer cb (cancel + release) */ static void gcdpollcb_timer(void *arg) { AvahiTimeout *t; t = (AvahiTimeout *)arg; if (t->timer) { dispatch_source_cancel(t->timer); dispatch_release(t->timer); t->timer = NULL; } t->cb(t, t->userdata); }
/* * Free a vnode_t and cancel/release its dispatch source. */ static void _vnode_free(vnode_t *vnode) { dispatch_source_cancel(vnode->src); /* * Actually free the vnode on the pathwatch queue. This allows any * enqueued _vnode_event operations to complete before the vnode disappears. * _vnode_event() quietly returns if the source has been cancelled. */ dispatch_async(_global.pathwatch_queue, ^{ dispatch_release(vnode->src); free(vnode->path); free(vnode->path_node); free(vnode); });
void test_fin(void *cxt) { fprintf(stderr, "Called back every %llu us on average\n", (delay/count)/NSEC_PER_USEC); test_long_less_than("Frequency", 1, ceil((double)delay/(count*interval))); int i; for (i = 0; i < N; i++) { dispatch_source_cancel(t[i]); dispatch_release(t[i]); } dispatch_resume(q); dispatch_release(q); dispatch_release(g); test_ptr("finalizer ran", cxt, cxt); test_stop(); }
void WorkQueue::unregisterMachPortEventHandler(mach_port_t machPort) { ASSERT(machPort != MACH_PORT_NULL); MutexLocker locker(m_eventSourcesMutex); HashMap<mach_port_t, EventSource*>::iterator it = m_eventSources.find(machPort); ASSERT(it != m_eventSources.end()); ASSERT(m_eventSources.contains(machPort)); EventSource* eventSource = it->second; // Cancel and release the source. It will be deleted in its finalize handler. dispatch_source_cancel(eventSource->dispatchSource()); dispatch_release(eventSource->dispatchSource()); m_eventSources.remove(it); }