void rxvt_init () { ptytty::init (); if (!ev_default_loop (0)) rxvt_fatal ("cannot initialise libev (bad value for LIBEV_METHODS?)\n"); rxvt_environ = environ; signal (SIGHUP, SIG_IGN); signal (SIGPIPE, SIG_IGN); sig_handlers.sw_term.start (SIGTERM); ev_unref (); sig_handlers.sw_int.start (SIGINT); ev_unref (); /* need to trap SIGURG for SVR4 (Unixware) rlogin */ /* signal (SIGURG, SIG_DFL); */ old_xerror_handler = XSetErrorHandler ((XErrorHandler) rxvt_xerror_handler); // TODO: handle this with exceptions and tolerate the memory loss XSetIOErrorHandler (rxvt_xioerror_handler); XrmInitialize (); }
int Zipper::EIO_AfterAddFile(eio_req *req) { HandleScope scope; closure_t *closure = static_cast<closure_t *>(req->data); ev_unref(EV_DEFAULT_UC); TryCatch try_catch; if (closure->error) { Local<Value> argv[1] = { Exception::Error(String::New(closure->error_name.c_str())) }; closure->cb->Call(Context::GetCurrent()->Global(), 1, argv); } else { Local<Value> argv[1] = { Local<Value>::New(Null()) }; closure->cb->Call(Context::GetCurrent()->Global(), 1, argv); } if (try_catch.HasCaught()) { FatalException(try_catch); //try_catch.ReThrow(); } closure->zf->Unref(); closure->cb.Dispose(); delete closure; return 0; }
void li_event_loop_end(liEventLoop *loop) { if (loop->end) return; loop->end = TRUE; ev_unref(loop->loop); li_event_loop_force_close_sockets(loop); }
int ZipFile::EIO_AfterRead(eio_req *req) { read_closure_t *closure = static_cast<read_closure_t *>(req->data); Handle<Value> argv[2]; if (closure->read < 0) { std::stringstream s; s << "Error while reading zip file: " << zip_file_strerror(closure->zf->file) << "\n"; argv[0] = Exception::Error(String::New(s.str().c_str())); argv[1] = Undefined(); } else { argv[0] = Undefined(); argv[1] = Integer::New(closure->read); } closure->cb->Call(Context::GetCurrent()->Global(), 2, argv); closure->zf->Unref(); closure->cb.Dispose(); delete closure; ev_unref(EV_DEFAULT_UC); return 0; }
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle, const char* filename, uv_fs_event_cb cb, int flags) { int portfd; loop->counters.fs_event_init++; /* We don't support any flags yet. */ assert(!flags); if ((portfd = port_create()) == -1) { uv__set_sys_error(loop, errno); return -1; } uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); handle->filename = strdup(filename); handle->fd = portfd; handle->cb = cb; memset(&handle->fo, 0, sizeof handle->fo); handle->fo.fo_name = handle->filename; uv__fs_event_rearm(handle); ev_io_init(&handle->event_watcher, uv__fs_event_read, portfd, EV_READ); ev_io_start(loop->ev, &handle->event_watcher); ev_unref(loop->ev); return 0; }
static void uv__poll(uv_loop_t* loop, int block) { /* bump the loop's refcount, otherwise libev does * a zero timeout poll and we end up busy looping */ ev_ref(loop->ev); ev_run(loop->ev, block ? EVRUN_ONCE : EVRUN_NOWAIT); ev_unref(loop->ev); }
/***************************************************************************** * Signal Handler *****************************************************************************/ static void signal_watcher_init(struct ev_signal *w, struct ev_loop *loop, void (*cb)(struct ev_loop*, struct ev_signal*, int), int signum) { ev_signal_init(w, cb, signum); ev_signal_start(loop, w); ev_unref(loop); }
static void uv__fs_event_start(uv_fs_event_t* handle) { ev_io_init(&handle->event_watcher, uv__fs_event, handle->fd, EV_LIBUV_KQUEUE_HACK); ev_io_start(handle->loop->ev, &handle->event_watcher); ev_unref(handle->loop->ev); }
void uv__finish_close(uv_handle_t* handle) { uv_loop_t* loop = handle->loop; assert(handle->flags & UV_CLOSING); assert(!(handle->flags & UV_CLOSED)); handle->flags |= UV_CLOSED; switch (handle->type) { case UV_PREPARE: assert(!ev_is_active(&((uv_prepare_t*)handle)->prepare_watcher)); break; case UV_CHECK: assert(!ev_is_active(&((uv_check_t*)handle)->check_watcher)); break; case UV_IDLE: assert(!ev_is_active(&((uv_idle_t*)handle)->idle_watcher)); break; case UV_ASYNC: assert(!ev_is_active(&((uv_async_t*)handle)->async_watcher)); break; case UV_TIMER: assert(!ev_is_active(&((uv_timer_t*)handle)->timer_watcher)); break; case UV_NAMED_PIPE: case UV_TCP: assert(!ev_is_active(&((uv_stream_t*)handle)->read_watcher)); assert(!ev_is_active(&((uv_stream_t*)handle)->write_watcher)); break; case UV_UDP: assert(!ev_is_active(&((uv_udp_t*)handle)->read_watcher)); assert(!ev_is_active(&((uv_udp_t*)handle)->write_watcher)); assert(((uv_udp_t*)handle)->fd == -1); uv__udp_destroy((uv_udp_t*)handle); break; case UV_PROCESS: assert(!ev_is_active(&((uv_process_t*)handle)->child_watcher)); break; default: assert(0); break; } ev_idle_stop(loop->ev, &handle->next_watcher); if (handle->close_cb) { handle->close_cb(handle); } ev_unref(loop->ev); }
void uv__finish_close(uv_handle_t* handle) { assert(uv_flag_is_set(handle, UV_CLOSING)); assert(!uv_flag_is_set(handle, UV_CLOSED)); uv_flag_set(handle, UV_CLOSED); switch (handle->type) { case UV_TCP: /* XXX Is it necessary to stop these watchers here? weren't they * supposed to be stopped in uv_close()? */ ev_io_stop(EV_DEFAULT_ &handle->write_watcher); ev_io_stop(EV_DEFAULT_ &handle->read_watcher); assert(!ev_is_active(&handle->read_watcher)); assert(!ev_is_active(&handle->write_watcher)); close(handle->fd); handle->fd = -1; if (handle->accepted_fd >= 0) { close(handle->accepted_fd); handle->accepted_fd = -1; } break; case UV_PREPARE: assert(!ev_is_active(&handle->prepare_watcher)); break; case UV_CHECK: assert(!ev_is_active(&handle->check_watcher)); break; case UV_IDLE: assert(!ev_is_active(&handle->idle_watcher)); break; case UV_ASYNC: assert(!ev_is_active(&handle->async_watcher)); break; case UV_TIMER: assert(!ev_is_active(&handle->timer_watcher)); break; default: assert(0); break; } ev_idle_stop(EV_DEFAULT_ &handle->next_watcher); if (handle->close_cb) { handle->close_cb(handle, 0); } ev_unref(EV_DEFAULT_UC); }
static int signal_child_watch(lua_State *T) { if (!signal_child_active()) { ev_child_start(LEM_ &signal_child_watcher); ev_unref(LEM); /* watcher shouldn't keep loop alive */ } lua_pushboolean(T, 1); return 1; }
~DecodeBaton() { (*image).buffer.Dispose(); #if NODE_MAJOR_VERSION == 0 && NODE_MINOR_VERSION <= 4 ev_unref(EV_DEFAULT_UC); #endif // Note: The result buffer is freed by the node Buffer's free callback callback.Dispose(); }
static void uv__udp_start_watcher(uv_udp_t* handle, ev_io* w, void (*cb)(EV_P_ ev_io*, int), int flags) { if (ev_is_active(w)) return; ev_set_cb(w, cb); ev_io_set(w, handle->fd, flags); ev_io_start(handle->loop->ev, w); ev_unref(handle->loop->ev); }
static void se_thread_free(lua_State *L, struct se_thread *thread) { ev_unref(EV_DEFAULT); list_del(&thread->node); se_assert(L, !thread->buf); luaL_unref(L, LUA_REGISTRYINDEX, thread->lref); se_assert(L, !ev_is_active(&thread->timer)); se_assert(L, !ev_is_active(&thread->io)); se_free(thread); }
int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb) { int was_active = ev_is_active(&idle->idle_watcher); idle->idle_cb = cb; ev_idle_start(idle->loop->ev, &idle->idle_watcher); if (!was_active) { ev_unref(idle->loop->ev); } return 0; }
~BlendBaton() { for (Images::iterator cur = images.begin(); cur != images.end(); cur++) { (*cur)->buffer.Dispose(); } #if NODE_MAJOR_VERSION == 0 && NODE_MINOR_VERSION <= 4 ev_unref(EV_DEFAULT_UC); #endif // Note: The result buffer is freed by the node Buffer's free callback callback.Dispose(); }
int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout, int64_t repeat) { if (ev_is_active(&handle->timer_watcher)) { return -1; } handle->timer_cb = cb; ev_timer_set(&handle->timer_watcher, timeout / 1000.0, repeat / 1000.0); ev_timer_start(EV_DEFAULT_UC_ &handle->timer_watcher); ev_unref(EV_DEFAULT_UC); return 0; }
int uv_timer_start(uv_timer_t* timer, uv_timer_cb cb, int64_t timeout, int64_t repeat) { if (ev_is_active(&timer->timer_watcher)) { return -1; } timer->timer_cb = cb; ev_timer_set(&timer->timer_watcher, timeout / 1000.0, repeat / 1000.0); ev_timer_start(timer->loop->ev, &timer->timer_watcher); ev_unref(timer->loop->ev); return 0; }
int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb) { int was_active = ev_is_active(&handle->idle_watcher); handle->idle_cb = cb; ev_idle_start(EV_DEFAULT_UC_ &handle->idle_watcher); if (!was_active) { ev_unref(EV_DEFAULT_UC); } return 0; }
int uv_timer_start(uv_timer_t* timer, uv_timer_cb cb, int64_t timeout, int64_t repeat) { if (ev_is_active(&timer->timer_watcher)) { return -1; } timer->timer_cb = cb; ev_timer_set(&timer->timer_watcher, timeout / 1000.0, repeat / 1000.0); ev_timer_start(EV_DEFAULT_UC_ &timer->timer_watcher); ev_unref(EV_DEFAULT_UC); return 0; }
int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb) { int was_active = ev_is_active(&prepare->prepare_watcher); prepare->prepare_cb = cb; ev_prepare_start(EV_DEFAULT_UC_ &prepare->prepare_watcher); if (!was_active) { ev_unref(EV_DEFAULT_UC); } return 0; }
int uv_check_start(uv_check_t* check, uv_check_cb cb) { int was_active = ev_is_active(&check->check_watcher); check->check_cb = cb; ev_check_start(EV_DEFAULT_UC_ &check->check_watcher); if (!was_active) { ev_unref(EV_DEFAULT_UC); } return 0; }
static int se_schedule(lua_State *L) { ev_prepare prepare; prepare.data = L; ev_prepare_init(&prepare, se_on_schedule); ev_prepare_start(EV_DEFAULT_ &prepare); ev_unref(EV_DEFAULT); ev_run(EV_DEFAULT_ 0); return 0; }
int uv_async_init(uv_loop_t* loop, uv_async_t* async, uv_async_cb async_cb) { uv__handle_init(loop, (uv_handle_t*)async, UV_ASYNC); loop->counters.async_init++; ev_async_init(&async->async_watcher, uv__async); async->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ ev_async_start(loop->ev, &async->async_watcher); ev_unref(loop->ev); return 0; }
int uv_check_start(uv_check_t* check, uv_check_cb cb) { int was_active = ev_is_active(&check->check_watcher); check->check_cb = cb; ev_check_start(check->loop->ev, &check->check_watcher); if (!was_active) { ev_unref(check->loop->ev); } return 0; }
int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb) { int was_active = ev_is_active(&prepare->prepare_watcher); prepare->prepare_cb = cb; ev_prepare_start(prepare->loop->ev, &prepare->prepare_watcher); if (!was_active) { ev_unref(prepare->loop->ev); } return 0; }
int uv_async_init(uv_async_t* async, uv_async_cb async_cb) { uv__handle_init((uv_handle_t*)async, UV_ASYNC); uv_counters()->async_init++; ev_async_init(&async->async_watcher, uv__async); async->async_watcher.data = async; async->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ ev_async_start(EV_DEFAULT_UC_ &async->async_watcher); ev_unref(EV_DEFAULT_UC); return 0; }
/** * Ref/unref can be used to add or remove a reference count on the event loop: * Every watcher keeps one reference, and as long as the reference count is nonzero, * ev_run will not return on its own. * * This is useful when you have a watcher that you never intend to unregister, * but that nevertheless should not keep ev_run from returning. In such a case, * call EventLoop::unref after starting, and EventLoop::ref before stopping it. * * As an example, libev itself uses this for its internal signal pipe: * It is not visible to the libev user and should not keep ev_run from exiting if * no event watchers registered by it are active. It is also an excellent way to do * this for generic recurring timers or from within third-party libraries. Just remember * to unref after start and ref before stop (but only if the watcher wasn't active before, * or was active before, respectively. Note also that libev might stop watchers itself * (e.g. non-repeating timers) in which case you have to EventLoop::ref() in the callback). * * Example: Create a signal watcher, but keep it from keeping ev_run running when nothing * else is active: * <code> * $sig = new libev\SignalEvent(libev\SignalEvent::SIGINT, function() * { * // Do something * }); * * $loop->add($sig); * $loop->unref(); * * // For some weird reason we want to unregister the above handler * $loop->ref(); * $sig->stop(); // or $loop->remove($sig); * </code> * * @return boolean */ PHP_METHOD(EventLoop, unref) { event_loop_object *obj = (event_loop_object *)zend_object_store_get_object(getThis() TSRMLS_CC); assert(obj->loop); if(obj->loop) { ev_unref(obj->loop); RETURN_BOOL(1); } RETURN_BOOL(0); }
int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, uv_close_cb close_cb, void* data) { uv__handle_init(handle, UV_ASYNC, close_cb, data); ev_async_init(&handle->async_watcher, uv__async); handle->async_watcher.data = handle; handle->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ ev_async_start(EV_DEFAULT_UC_ &handle->async_watcher); ev_unref(EV_DEFAULT_UC); return 0; }
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle, const char* filename, uv_fs_event_cb cb, int flags) { int events; int fd; loop->counters.fs_event_init++; /* We don't support any flags yet. */ assert(!flags); /* * TODO share a single inotify fd across the event loop? * We'll run into fs.inotify.max_user_instances if we * keep creating new inotify fds. */ if ((fd = new_inotify_fd()) == -1) { uv__set_sys_error(loop, errno); return -1; } events = IN_ATTRIB | IN_CREATE | IN_MODIFY | IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM | IN_MOVED_TO; if (inotify_add_watch(fd, filename, events) == -1) { uv__set_sys_error(loop, errno); uv__close(fd); return -1; } uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); handle->filename = strdup(filename); /* this should go! */ handle->cb = cb; handle->fd = fd; ev_io_init(&handle->read_watcher, uv__inotify_read, fd, EV_READ); ev_io_start(loop->ev, &handle->read_watcher); ev_unref(loop->ev); return 0; }