void gputop_perf_stream_unref(struct gputop_perf_stream *stream) { if (--(stream->ref_count) == 0) { switch(stream->type) { case GPUTOP_STREAM_PERF: if (stream->fd > 0) { uv_poll_stop(&stream->fd_poll); if (stream->perf.mmap_page) { munmap(stream->perf.mmap_page, stream->perf.buffer_size + page_size); stream->perf.mmap_page = NULL; stream->perf.buffer = NULL; stream->perf.buffer_size = 0; } if (stream->perf.header_buf.offsets) { free(stream->perf.header_buf.offsets); stream->perf.header_buf.offsets = NULL; } close(stream->fd); stream->fd = -1; fprintf(stderr, "closed perf stream\n"); } break; case GPUTOP_STREAM_I915_PERF: if (stream->fd > 0) { uv_poll_stop(&stream->fd_poll); if (stream->oa.bufs[0]) free(stream->oa.bufs[0]); if (stream->oa.bufs[1]) free(stream->oa.bufs[1]); close(stream->fd); stream->fd = -1; fprintf(stderr, "closed i915_oa perf stream\n"); } break; } if (stream->user.destroy_cb) stream->user.destroy_cb(stream); free(stream); fprintf(stderr, "freed gputop-perf stream\n"); } }
static int handle_socket(CURL *easy, curl_socket_t s, int action, void *userp, void *socketp) { curl_context_t *curl_context; int events = 0; switch(action) { case CURL_POLL_IN: case CURL_POLL_OUT: case CURL_POLL_INOUT: curl_context = socketp ? (curl_context_t *) socketp : create_curl_context(s); curl_multi_assign(curl_handle, s, (void *) curl_context); if(action != CURL_POLL_IN) events |= UV_WRITABLE; if(action != CURL_POLL_OUT) events |= UV_READABLE; uv_poll_start(&curl_context->poll_handle, events, curl_perform); break; case CURL_POLL_REMOVE: if(socketp) { uv_poll_stop(&((curl_context_t*)socketp)->poll_handle); destroy_curl_context((curl_context_t*) socketp); curl_multi_assign(curl_handle, s, NULL); } break; default: abort(); } return 0; }
int handle_socket(CURL *easy, curl_socket_t s, int action, void *userp, void *socketp) { curl_context_t *curl_context; if (action == CURL_POLL_IN || action == CURL_POLL_OUT) { if (socketp) { curl_context = (curl_context_t*) socketp; } else { curl_context = create_curl_context(s); } curl_multi_assign(curl_handle, s, (void *) curl_context); } switch (action) { case CURL_POLL_IN: uv_poll_start(&curl_context->poll_handle, UV_READABLE, curl_perform); break; case CURL_POLL_OUT: uv_poll_start(&curl_context->poll_handle, UV_WRITABLE, curl_perform); break; case CURL_POLL_REMOVE: if (socketp) { uv_poll_stop(&((curl_context_t*)socketp)->poll_handle); destroy_curl_context((curl_context_t*) socketp); curl_multi_assign(curl_handle, s, NULL); } break; default: abort(); } return 0; }
void Poller::stop() { int status = uv_poll_stop(poll_handle); if (0 != status) { Nan::ThrowTypeError(uv_strerror(status)); return; } }
int uv_custom_close(uv_poll_t *req) { struct uv_custom_poll_t *custom_poll_data = req->data; struct iobuf_t *send_io = NULL; if(uv_is_closing((uv_handle_t *)req)) { return -1; } if(custom_poll_data != NULL) { custom_poll_data->doclose = 1; } send_io = &custom_poll_data->send_iobuf; if(custom_poll_data->doclose == 1 && send_io->len == 0) { custom_poll_data->doclose = 2; if(custom_poll_data->close_cb != NULL) { custom_poll_data->close_cb(req); } if(!uv_is_closing((uv_handle_t *)req)) { uv_poll_stop(req); } } else if(send_io->len > 0) { uv_custom_write(req); } return 0; }
static int luv_poll_stop(lua_State* L) { uv_poll_t* handle = luv_check_poll(L, 1); int ret = uv_poll_stop(handle); if (ret < 0) return luv_error(L, ret); lua_pushinteger(L, ret); return 1; }
static void pyuv__check_signals(uv_poll_t *handle, int status, int events) { PyGILState_STATE gstate = PyGILState_Ensure(); SignalChecker *self; ASSERT(handle); self = PYUV_CONTAINER_OF(handle, SignalChecker, poll_h); if (status == 0) { ASSERT(events == UV_READABLE); } /* Drain the fd */ if (pyuv__drain_poll_fd(self->fd) != 0) { uv_poll_stop(handle); } /* Check for signals */ PyErr_CheckSignals(); if (PyErr_Occurred()) { handle_uncaught_exception(HANDLE(self)->loop); } Py_DECREF(self); PyGILState_Release(gstate); }
int handle_socket(CURL *easy, curl_socket_t s, int action, void *userp, void *socketp) { (void)easy; //unused bear_stream_t *stream = (bear_stream_t*) userp; if(action == CURL_POLL_IN || action == CURL_POLL_OUT) { if(socketp) { } else { create_curl_context(stream,s); } } switch(action) { case CURL_POLL_IN: uv_poll_start(&stream->poll_handle, UV_READABLE, curl_perform); break; case CURL_POLL_OUT: uv_poll_start(&stream->poll_handle, UV_WRITABLE, curl_perform); break; case CURL_POLL_REMOVE: if(socketp) { uv_poll_stop(&stream->poll_handle); } break; default: abort(); } return 0; }
void stop() { assert(poll.data); uv_poll_stop(&poll); uv_close((uv_handle_t *)&poll, [](uv_handle_t *handle) { assert(handle->data); delete reinterpret_cast<Socket *>(handle->data); }); }
void udprelay_close(struct server_context *server) { uv_poll_stop(&server->watcher); close(server->udp_fd); uv_mutex_lock(&mutex); cache_removeall(cache, server->udp.loop, select_cb); uv_mutex_unlock(&mutex); }
Poller::~Poller() { // if we call uv_poll_stop after uv_poll_init failed we segfault if (uv_poll_init_success) { uv_poll_stop(poll_handle); uv_close(reinterpret_cast<uv_handle_t*> (poll_handle), Poller::onClose); } else { delete poll_handle; } }
static void del_io(void *glue_data, void *id) { io_t *io = (io_t *)id; IOT_UNUSED(glue_data); uv_poll_stop(&io->uv_poll); iot_free(io); }
static getdns_return_t getdns_libuv_cleanup(struct getdns_context* context, void* data) { struct getdns_libuv_data *uv_data = (struct getdns_libuv_data*) data; uv_poll_stop(uv_data->poll_handle); uv_close((uv_handle_t*) uv_data->poll_handle, getdns_libuv_close_cb); /* handle itself gets cleaned up in close_cb */ free(uv_data); return GETDNS_RETURN_GOOD; }
void iotjs_https_poll_close(iotjs_https_poll_t* poll_data) { IOTJS_VALIDATED_STRUCT_METHOD(iotjs_https_poll_t, poll_data); if (_this->closing == false) { _this->closing = true; uv_poll_stop(&_this->poll_handle); _this->poll_handle.data = _this->https_data; uv_close((uv_handle_t*)&_this->poll_handle, iotjs_https_uv_close_callback); } return; }
static void request_count_changed(uint32_t request_count, struct getdns_libuv_data *uv_data) { if (request_count > 0 && uv_data->polling == 0) { uv_poll_start(uv_data->poll_handle, UV_READABLE, getdns_libuv_cb); uv_data->polling = 1; } else if (request_count == 0 && uv_data->polling == 1) { uv_poll_stop(uv_data->poll_handle); uv_data->polling = 0; } }
void lws_libuv_io(struct lws *wsi, int flags) { struct lws_context *context = lws_get_context(wsi); struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; #if defined(WIN32) || defined(_WIN32) int current_events = wsi->w_read.uv_watcher.events & (UV_READABLE | UV_WRITABLE); #else int current_events = wsi->w_read.uv_watcher.io_watcher.pevents & (UV_READABLE | UV_WRITABLE); #endif struct lws_io_watcher *w = &wsi->w_read; if (!LWS_LIBUV_ENABLED(context)) return; // lwsl_notice("%s: wsi: %p, flags:0x%x\n", __func__, wsi, flags); // w->context is set after the loop is initialized if (!pt->io_loop_uv || !w->context) { lwsl_info("%s: no io loop yet\n", __func__); return; } if (!((flags & (LWS_EV_START | LWS_EV_STOP)) && (flags & (LWS_EV_READ | LWS_EV_WRITE)))) { lwsl_err("%s: assert: flags %d", __func__, flags); assert(0); } if (flags & LWS_EV_START) { if (flags & LWS_EV_WRITE) current_events |= UV_WRITABLE; if (flags & LWS_EV_READ) current_events |= UV_READABLE; uv_poll_start(&w->uv_watcher, current_events, lws_io_cb); } else { if (flags & LWS_EV_WRITE) current_events &= ~UV_WRITABLE; if (flags & LWS_EV_READ) current_events &= ~UV_READABLE; if (!(current_events & (UV_READABLE | UV_WRITABLE))) uv_poll_stop(&w->uv_watcher); else uv_poll_start(&w->uv_watcher, current_events, lws_io_cb); } }
void EventServer::stop_poll(int desc) { CHECK_EQ(std::this_thread::get_id(), thread_id_); auto es_handle = poll_handles_[desc]; CHECK_EQ(0, uv_poll_stop(&es_handle->uv_handle)); uv_close(reinterpret_cast<uv_handle_t*>(&es_handle->uv_handle), &EventServer::uv_close_cb); poll_handles_.erase(desc); }
static int lluv_poll_stop(lua_State *L){ lluv_handle_t *handle = lluv_check_poll(L, 1, LLUV_FLAG_OPEN); int err = uv_poll_stop(LLUV_H(handle, uv_poll_t)); if(err < 0){ return lluv_fail(L, handle->flags, LLUV_ERR_UV, err, NULL); } lluv_handle_unlock(L, handle, LLUV_LOCK_START); lua_settop(L, 1); return 1; }
void tun_stop(struct tundev *tun) { #ifndef ANDROID struct tundev_context *ctx = tun->contexts; close_socket_handle(ctx); uv_poll_stop(&ctx->watcher); close_tunfd(ctx->tunfd); #else struct tundev_context *ctx = tun->contexts; uv_async_send(&ctx->async_handle); #endif }
~Impl() { uv_poll_stop( pollHandle ); // the handle will still be alive for a short period after calling uv_close // set the data to NULL to avoid a dangling pointer pollHandle->data = NULL; uv_close(reinterpret_cast<uv_handle_t*> ( pollHandle ) , FreeHandleCallback ); delete mCallback; }
static void elops_io_uv(struct lws *wsi, int flags) { struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; struct lws_io_watcher *w = &wsi->w_read; int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE); lwsl_debug("%s: %p: %d\n", __func__, wsi, flags); /* w->context is set after the loop is initialized */ if (!pt->uv.io_loop || !w->context) { lwsl_info("%s: no io loop yet\n", __func__); return; } if (!((flags & (LWS_EV_START | LWS_EV_STOP)) && (flags & (LWS_EV_READ | LWS_EV_WRITE)))) { lwsl_err("%s: assert: flags %d", __func__, flags); assert(0); } if (!w->uv.pwatcher || wsi->told_event_loop_closed) { lwsl_err("%s: no watcher\n", __func__); return; } if (flags & LWS_EV_START) { if (flags & LWS_EV_WRITE) current_events |= UV_WRITABLE; if (flags & LWS_EV_READ) current_events |= UV_READABLE; uv_poll_start(w->uv.pwatcher, current_events, lws_io_cb); } else { if (flags & LWS_EV_WRITE) current_events &= ~UV_WRITABLE; if (flags & LWS_EV_READ) current_events &= ~UV_READABLE; if (!(current_events & (UV_READABLE | UV_WRITABLE))) uv_poll_stop(w->uv.pwatcher); else uv_poll_start(w->uv.pwatcher, current_events, lws_io_cb); } w->actual_events = current_events; }
static getdns_return_t getdns_libuv_clear(getdns_eventloop *loop, getdns_eventloop_event *el_ev) { poll_timer *my_ev = (poll_timer *)el_ev->ev; uv_poll_t *my_poll; uv_timer_t *my_timer; assert(my_ev); DEBUG_UV("enter libuv_clear(el_ev = %p, my_ev = %p, to_close = %d)\n" , el_ev, my_ev, my_ev->to_close); if (el_ev->read_cb) { my_poll = &my_ev->read; uv_poll_stop(my_poll); my_ev->to_close += 1; my_poll->data = my_ev; uv_close((uv_handle_t *)my_poll, getdns_libuv_close_cb); } if (el_ev->write_cb) { my_poll = &my_ev->write; uv_poll_stop(my_poll); my_ev->to_close += 1; my_poll->data = my_ev; uv_close((uv_handle_t *)my_poll, getdns_libuv_close_cb); } if (el_ev->timeout_cb) { my_timer = &my_ev->timer; uv_timer_stop(my_timer); my_ev->to_close += 1; my_timer->data = my_ev; uv_close((uv_handle_t *)my_timer, getdns_libuv_close_cb); } el_ev->ev = NULL; DEBUG_UV("exit libuv_clear(el_ev = %p, my_ev = %p, to_close = %d)\n" , el_ev, my_ev, my_ev->to_close); return GETDNS_RETURN_GOOD; }
void CANWrap::doClose() { if (!m_closed) { uv_poll_stop(&m_uvHandle); uv_close(reinterpret_cast<uv_handle_t*>(&m_uvHandle), [](uv_handle_t* handle) { auto* self = reinterpret_cast<CANWrap*>(handle->data); assert(!self->persistent().IsEmpty()); self->Unref(); }); m_closed = true; } }
int CANWrap::doPoll() { if (m_closed) { return -1; } if (m_pollEvents) { return uv_poll_start(&m_uvHandle, m_pollEvents, uvPollCallback); } else { return uv_poll_stop(&m_uvHandle); } }
/* * This does not actually stop the event loop. The reason is we have to pass * libuv handle closures through its event loop. So this tries to close all * wsi, and set a flag; when all the wsi closures are finalized then we * actually stop the libuv event loops. */ static void lws_libuv_stop(struct lws_context *context) { struct lws_context_per_thread *pt; int n, m; lwsl_err("%s\n", __func__); if (context->requested_kill) { lwsl_err("%s: ignoring\n", __func__); return; } context->requested_kill = 1; m = context->count_threads; context->being_destroyed = 1; /* * Phase 1: start the close of every dynamic uv handle */ while (m--) { pt = &context->pt[m]; if (pt->pipe_wsi) { uv_poll_stop(pt->pipe_wsi->w_read.uv.pwatcher); lws_destroy_event_pipe(pt->pipe_wsi); pt->pipe_wsi = NULL; } for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) { struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd); if (!wsi) continue; lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY, __func__ /* no protocol close */); n--; } } lwsl_info("%s: started closing all wsi\n", __func__); /* we cannot have completed... there are at least the cancel pipes */ }
static void tun_close(uv_async_t *handle) { struct tundev_context *ctx = container_of(handle, struct tundev_context, async_handle); struct tundev *tun = ctx->tun; uv_close((uv_handle_t *) &ctx->async_handle, NULL); close_socket_handle(ctx); uv_poll_stop(&ctx->watcher); close_tunfd(ctx->tunfd); if (!tun->global) { clear_dns_query(); } tun_free(tun); }
static PyObject * SignalChecker_func_stop(SignalChecker *self) { int err; RAISE_IF_HANDLE_NOT_INITIALIZED(self, NULL); RAISE_IF_HANDLE_CLOSED(self, PyExc_HandleClosedError, NULL); err = uv_poll_stop(&self->poll_h); if (err < 0) { RAISE_UV_EXCEPTION(err, PyExc_UVError); return NULL; } PYUV_HANDLE_DECREF(self); Py_RETURN_NONE; }
void http_perform(uv_poll_t *req, int status, int events) { fprintf(stderr, "perform (status=%d)\n", status); client_context * ctx = req->data; int flags = 0; if (events & UV_READABLE) { flags |= HTTP_SERVER_POLL_IN; } if (events & UV_WRITABLE) { flags |= HTTP_SERVER_POLL_OUT; } assert(flags); fprintf(stderr, "execute socket action %d\n", ctx->sockfd); uv_poll_stop(req); http_server_socket_action(&srv, ctx->sockfd, flags); }
void lws_libuv_io(struct lws *wsi, int flags) { struct lws_context *context = lws_get_context(wsi); struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; int current_events = wsi->w_read.uv_watcher.io_watcher.pevents & (UV_READABLE | UV_WRITABLE); struct lws_io_watcher *w = &wsi->w_read; if (!LWS_LIBUV_ENABLED(context)) return; lwsl_debug("%s: wsi: %p, flags:%d\n", __func__, wsi, flags); if (!pt->io_loop_uv) { lwsl_info("%s: no io loop yet\n", __func__); return; } assert((flags & (LWS_EV_START | LWS_EV_STOP)) && (flags & (LWS_EV_READ | LWS_EV_WRITE))); if (flags & LWS_EV_START) { if (flags & LWS_EV_WRITE) current_events |= UV_WRITABLE; if (flags & LWS_EV_READ) current_events |= UV_READABLE; uv_poll_start(&w->uv_watcher, current_events, lws_accept_cb); } else { if (flags & LWS_EV_WRITE) current_events &= ~UV_WRITABLE; if (flags & LWS_EV_READ) current_events &= ~UV_READABLE; if (!(current_events & (UV_READABLE | UV_WRITABLE))) uv_poll_stop(&w->uv_watcher); else uv_poll_start(&w->uv_watcher, current_events, lws_accept_cb); } }
static int elops_wsi_logical_close_uv(struct lws *wsi) { if (!lws_socket_is_valid(wsi->desc.sockfd)) return 0; if (wsi->listener || wsi->event_pipe) { lwsl_debug("%s: %p: %d %d stop listener / pipe poll\n", __func__, wsi, wsi->listener, wsi->event_pipe); if (wsi->w_read.uv.pwatcher) uv_poll_stop(wsi->w_read.uv.pwatcher); } lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi); /* * libuv has to do his own close handle processing asynchronously */ lws_libuv_closehandle(wsi); return 1; /* do not complete the wsi close, uv close cb will do it */ }