int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* filename, unsigned int flags) { int portfd; int first_run; if (uv__is_active(handle)) return -EINVAL; first_run = 0; if (handle->loop->fs_fd == -1) { portfd = port_create(); if (portfd == -1) return -errno; handle->loop->fs_fd = portfd; first_run = 1; } uv__handle_start(handle); handle->filename = strdup(filename); handle->fd = PORT_UNUSED; handle->cb = cb; memset(&handle->fo, 0, sizeof handle->fo); handle->fo.fo_name = handle->filename; uv__fs_event_rearm(handle); /* FIXME(bnoordhuis) Check return code. */ if (first_run) { uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd); uv__io_start(handle->loop, &handle->loop->fs_event_watcher, UV__POLLIN); } return 0; }
int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb cb, const char* path, unsigned int interval) { struct poll_ctx* ctx; uv_loop_t* loop; size_t len; if (uv__is_active(handle)) return 0; loop = handle->loop; len = strlen(path); ctx = calloc(1, sizeof(*ctx) + len); if (ctx == NULL) return uv__set_artificial_error(loop, UV_ENOMEM); ctx->loop = loop; ctx->poll_cb = cb; ctx->interval = interval ? interval : 1; ctx->start_time = uv_now(loop); ctx->parent_handle = handle; memcpy(ctx->path, path, len + 1); if (uv_timer_init(loop, &ctx->timer_handle)) abort(); ctx->timer_handle.flags |= UV__HANDLE_INTERNAL; uv__handle_unref(&ctx->timer_handle); if (uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb)) abort(); handle->poll_ctx = ctx; uv__handle_start(handle); return 0; }
int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb cb, const char* path, unsigned int interval) { uv_fs_t* req; size_t len; if (uv__is_active(handle)) return 0; len = strlen(path) + 1; req = malloc(sizeof(*req) + len); if (req == NULL) return uv__set_artificial_error(handle->loop, UV_ENOMEM); req->data = handle; handle->path = memcpy(req + 1, path, len); handle->fs_req = req; handle->poll_cb = cb; handle->interval = interval ? interval : 1; handle->start_time = uv_now(handle->loop); handle->busy_polling = 0; memset(&handle->statbuf, 0, sizeof(handle->statbuf)); if (uv_fs_stat(handle->loop, handle->fs_req, handle->path, poll_cb)) abort(); uv__handle_start(handle); return 0; }
int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) return 0; uv__handle_stop(handle); #if defined(__APPLE__) if (uv__fsevents_close(handle)) #endif /* defined(__APPLE__) */ { uv__io_close(handle->loop, &handle->event_watcher); } uv__free(handle->path); handle->path = NULL; if (handle->event_watcher.fd != -1) { /* When FSEvents is used, we don't use the event_watcher's fd under certain * confitions. (see uv_fs_event_start) */ uv__close(handle->event_watcher.fd); handle->event_watcher.fd = -1; } return 0; }
int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) return 0; if (handle->dir_handle != INVALID_HANDLE_VALUE) { CloseHandle(handle->dir_handle); handle->dir_handle = INVALID_HANDLE_VALUE; } uv__handle_stop(handle); if (handle->filew) { uv__free(handle->filew); handle->filew = NULL; } if (handle->short_filew) { uv__free(handle->short_filew); handle->short_filew = NULL; } if (handle->path) { uv__free(handle->path); handle->path = NULL; } if (handle->dirw) { uv__free(handle->dirw); handle->dirw = NULL; } return 0; }
int uv_timer_start(uv_timer_t* handle, uv_timer_cb cb, uint64_t timeout, uint64_t repeat) { uint64_t clamped_timeout; if (cb == NULL) return UV_EINVAL; if (uv__is_active(handle)) uv_timer_stop(handle); clamped_timeout = handle->loop->time + timeout; if (clamped_timeout < timeout) clamped_timeout = (uint64_t) -1; handle->timer_cb = cb; handle->timeout = clamped_timeout; handle->repeat = repeat; /* start_id is the second index to be compared in uv__timer_cmp() */ handle->start_id = handle->loop->timer_counter++; heap_insert(timer_heap(handle->loop), (struct heap_node*) &handle->heap_node, timer_less_than); uv__handle_start(handle); return 0; }
int uv_timer_stop(uv_timer_t* handle) { if (!uv__is_active(handle)) return 0; RB_REMOVE(uv__timers, &handle->loop->timer_handles, handle); uv__handle_stop(handle); return 0; }
int uv_timer_again(uv_timer_t* timer) { if (!uv__is_active(timer)) { uv__set_artificial_error(timer->loop, UV_EINVAL); return -1; } assert(uv__timer_repeating(timer)); ev_timer_again(timer->loop->ev, &timer->timer_watcher); return 0; }
int uv_timer_stop(uv_timer_t* handle) { if (!uv__is_active(handle)) return 0; heap_remove(timer_heap(handle->loop), (struct heap_node*) &handle->heap_node, timer_less_than); uv__handle_stop(handle); return 0; }
static void timer_cb(uv_timer_t* timer, int status) { uv_fs_poll_t* handle; handle = container_of(timer, uv_fs_poll_t, timer_handle); handle->start_time = uv_now(handle->loop); handle->fs_req->data = handle; if (uv_fs_stat(handle->loop, handle->fs_req, handle->path, poll_cb)) abort(); assert(uv__is_active(handle)); }
int uv_fs_poll_stop(uv_fs_poll_t* handle) { struct poll_ctx* ctx; if (!uv__is_active(handle)) return 0; ctx = handle->poll_ctx; assert(ctx != NULL); assert(ctx->parent_handle != NULL); ctx->parent_handle = NULL; handle->poll_ctx = NULL; /* Close the timer if it's active. If it's inactive, there's a stat request * in progress and poll_cb will take care of the cleanup. */ if (uv__is_active(&ctx->timer_handle)) uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); uv__handle_stop(handle); return 0; }
static void uv__timer_cb(EV_P_ ev_timer* w, int revents) { uv_timer_t* timer = container_of(w, uv_timer_t, timer_watcher); if (!uv__is_active(timer)) return; if (!uv__timer_repeating(timer)) uv__handle_stop(timer); if (timer->timer_cb) timer->timer_cb(timer, 0); }
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags) { struct watcher_list* w; int events; int err; int wd; if (uv__is_active(handle)) return -EINVAL; err = init_inotify(handle->loop); if (err) return err; events = UV__IN_ATTRIB | UV__IN_CREATE | UV__IN_MODIFY | UV__IN_DELETE | UV__IN_DELETE_SELF | UV__IN_MOVE_SELF | UV__IN_MOVED_FROM | UV__IN_MOVED_TO; wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events); if (wd == -1) return -errno; w = find_watcher(handle->loop, wd); if (w) goto no_insert; w = uv__malloc(sizeof(*w) + strlen(path) + 1); if (w == NULL) return -ENOMEM; w->wd = wd; w->path = strcpy((char*)(w + 1), path); QUEUE_INIT(&w->watchers); w->iterating = 0; RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w); no_insert: uv__handle_start(handle); QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); handle->path = w->path; handle->cb = cb; handle->wd = wd; return 0; }
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags) { #if defined(__APPLE__) struct stat statbuf; #endif /* defined(__APPLE__) */ int fd; if (uv__is_active(handle)) return -EINVAL; /* TODO open asynchronously - but how do we report back errors? */ fd = open(path, O_RDONLY); if (fd == -1) return -errno; uv__handle_start(handle); uv__io_init(&handle->event_watcher, uv__fs_event, fd); handle->path = uv__strdup(path); handle->cb = cb; #if defined(__APPLE__) /* Nullify field to perform checks later */ handle->cf_cb = NULL; handle->realpath = NULL; handle->realpath_len = 0; handle->cf_flags = flags; if (fstat(fd, &statbuf)) goto fallback; /* FSEvents works only with directories */ if (!(statbuf.st_mode & S_IFDIR)) goto fallback; /* The fallback fd is no longer needed */ uv__close(fd); handle->event_watcher.fd = -1; return uv__fsevents_init(handle); fallback: #endif /* defined(__APPLE__) */ uv__io_start(handle->loop, &handle->event_watcher, POLLIN); return 0; }
void uv__finish_close(uv_handle_t* handle) { assert(!uv__is_active(handle)); assert(handle->flags & UV_CLOSING); assert(!(handle->flags & UV_CLOSED)); handle->flags |= UV_CLOSED; switch (handle->type) { case UV_PREPARE: case UV_CHECK: case UV_IDLE: case UV_ASYNC: case UV_TIMER: case UV_PROCESS: break; case UV_NAMED_PIPE: case UV_TCP: case UV_TTY: assert(!ev_is_active(&((uv_stream_t*)handle)->read_watcher)); assert(!ev_is_active(&((uv_stream_t*)handle)->write_watcher)); assert(((uv_stream_t*)handle)->fd == -1); uv__stream_destroy((uv_stream_t*)handle); break; case UV_UDP: uv__udp_finish_close((uv_udp_t*)handle); break; case UV_FS_EVENT: break; case UV_POLL: break; default: assert(0); break; } if (handle->close_cb) { handle->close_cb(handle); } uv__handle_unref(handle); }
int uv_fs_event_stop(uv_fs_event_t* handle) { struct watcher_list* w; if (!uv__is_active(handle)) return 0; w = find_watcher(handle->loop, handle->wd); assert(w != NULL); handle->wd = -1; handle->path = NULL; uv__handle_stop(handle); QUEUE_REMOVE(&handle->watchers); maybe_free_watcher_list(w, handle->loop); return 0; }
int uv_timer_start(uv_timer_t* handle, uv_timer_cb cb, uint64_t timeout, uint64_t repeat) { if (uv__is_active(handle)) uv_timer_stop(handle); handle->timer_cb = cb; handle->timeout = handle->loop->time + timeout; handle->repeat = repeat; /* start_id is the second index to be compared in uv__timer_cmp() */ handle->start_id = handle->loop->timer_counter++; RB_INSERT(uv__timers, &handle->loop->timer_handles, handle); uv__handle_start(handle); return 0; }
int uv_fs_poll_stop(uv_fs_poll_t* handle) { struct poll_ctx* ctx; if (!uv__is_active(handle)) return 0; ctx = handle->poll_ctx; assert(ctx != NULL); assert(ctx->parent_handle != NULL); ctx->parent_handle = NULL; uv_timer_stop(&ctx->timer_handle); handle->poll_ctx = NULL; uv__handle_stop(handle); return 0; }
int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) return -EINVAL; if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) { port_dissociate(handle->loop->fs_fd, PORT_SOURCE_FILE, (uintptr_t) &handle->fo); } handle->fd = PORT_DELETED; free(handle->filename); handle->filename = NULL; handle->fo.fo_name = NULL; uv__handle_stop(handle); return 0; }
int uv_timer_start(uv_timer_t* handle, uv_timer_cb cb, int64_t timeout, int64_t repeat) { assert(timeout >= 0); assert(repeat >= 0); if (uv__is_active(handle)) uv_timer_stop(handle); handle->timer_cb = cb; handle->timeout = handle->loop->time + timeout; handle->repeat = repeat; RB_INSERT(uv__timers, &handle->loop->timer_handles, handle); uv__handle_start(handle); return 0; }
int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) return 0; uv__io_close(handle->loop, &handle->event_watcher); uv__handle_stop(handle); if (uv__path_is_a_directory(handle->path) == 0) { uv__free(handle->dir_filename); handle->dir_filename = NULL; } uv__free(handle->path); handle->path = NULL; uv__close(handle->event_watcher.fd); handle->event_watcher.fd = -1; return 0; }
int uv_timer_start(uv_timer_t* timer, uv_timer_cb cb, int64_t timeout, int64_t repeat) { if (uv__is_active(timer)) uv_timer_stop(timer); timer->timer_cb = cb; if (repeat) timer->flags |= UV_TIMER_REPEAT; else timer->flags &= ~UV_TIMER_REPEAT; ev_timer_set(&timer->timer_watcher, timeout / 1000.0, repeat / 1000.0); ev_timer_start(timer->loop->ev, &timer->timer_watcher); uv__handle_start(timer); return 0; }
static void uv__finish_close(uv_handle_t* handle) { assert(!uv__is_active(handle)); assert(handle->flags & UV_CLOSING); assert(!(handle->flags & UV_CLOSED)); handle->flags |= UV_CLOSED; switch (handle->type) { case UV_PREPARE: case UV_CHECK: case UV_IDLE: case UV_ASYNC: case UV_TIMER: case UV_PROCESS: case UV_FS_EVENT: case UV_FS_POLL: case UV_POLL: case UV_SIGNAL: break; case UV_NAMED_PIPE: case UV_TCP: case UV_TTY: uv__stream_destroy((uv_stream_t*)handle); break; case UV_UDP: uv__udp_finish_close((uv_udp_t*)handle); break; default: assert(0); break; } uv__handle_unref(handle); ngx_queue_remove(&handle->handle_queue); if (handle->close_cb) { handle->close_cb(handle); } }
int uv_fs_event_stop(uv_fs_event_t* handle) { if (!uv__is_active(handle)) return -EINVAL; uv__handle_stop(handle); #if defined(__APPLE__) if (uv__fsevents_close(handle)) #endif /* defined(__APPLE__) */ { uv__io_close(handle->loop, &handle->event_watcher); } free(handle->path); handle->path = NULL; uv__close(handle->event_watcher.fd); handle->event_watcher.fd = -1; return 0; }
int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb, const char* path, unsigned int flags) { int portfd; int first_run; int err; if (uv__is_active(handle)) return -EINVAL; first_run = 0; if (handle->loop->fs_fd == -1) { portfd = port_create(); if (portfd == -1) return -errno; handle->loop->fs_fd = portfd; first_run = 1; } uv__handle_start(handle); handle->path = uv__strdup(path); handle->fd = PORT_UNUSED; handle->cb = cb; memset(&handle->fo, 0, sizeof handle->fo); handle->fo.fo_name = handle->path; err = uv__fs_event_rearm(handle); if (err != 0) return err; if (first_run) { uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd); uv__io_start(handle->loop, &handle->loop->fs_event_watcher, UV__POLLIN); } return 0; }
int uv_fs_event_stop(uv_fs_event_t* handle) { struct watcher_list* w; if (!uv__is_active(handle)) return 0; w = find_watcher(handle->loop, handle->wd); assert(w != NULL); handle->wd = -1; handle->path = NULL; uv__handle_stop(handle); QUEUE_REMOVE(&handle->watchers); if (QUEUE_EMPTY(&w->watchers)) { /* No watchers left for this path. Clean up. */ RB_REMOVE(watcher_root, CAST(&handle->loop->inotify_watchers), w); uv__inotify_rm_watch(handle->loop->inotify_fd, w->wd); uv__free(w); } return 0; }
int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buf, size_t* len) { struct poll_ctx* ctx; size_t required_len; if (!uv__is_active(handle)) { *len = 0; return UV_EINVAL; } ctx = handle->poll_ctx; assert(ctx != NULL); required_len = strlen(ctx->path) + 1; if (required_len > *len) { *len = required_len; return UV_ENOBUFS; } memcpy(buf, ctx->path, required_len); *len = required_len; return 0; }
int uv_fs_event_stop(uv_fs_event_t* handle) { #ifdef HAVE_SYS_AHAFS_EVPRODS_H if (!uv__is_active(handle)) return 0; uv__io_close(handle->loop, &handle->event_watcher); uv__handle_stop(handle); if (uv__path_is_a_directory(handle->path) == 0) { uv__free(handle->dir_filename); handle->dir_filename = NULL; } uv__free(handle->path); handle->path = NULL; uv__close(handle->event_watcher.fd); handle->event_watcher.fd = -1; return 0; #else return -ENOSYS; #endif }
int uv_fs_poll_stop(uv_fs_poll_t* handle) { if (!uv__is_active(handle)) return 0; /* Don't free the fs req if it's active. Signal poll_cb that it needs to free * the req by removing the handle backlink. * * TODO(bnoordhuis) Have uv-unix postpone the close callback until the req * finishes so we don't need this pointer / lifecycle hackery. The callback * always runs on the next tick now. */ if (handle->fs_req->data) handle->fs_req->data = NULL; else free(handle->fs_req); handle->fs_req = NULL; handle->path = NULL; uv_timer_stop(&handle->timer_handle); uv__handle_stop(handle); return 0; }
int uv_is_active(const uv_handle_t* handle) { return uv__is_active(handle); }