static void(loop_stop)() { uv_stop(loop); uv_idle_t* idle = s_malloc0(sizeof(*idle)); uv_idle_init(loop, idle); uv_idle_start(idle, loop_stop_cb); }
void App::close() { m_network->stop(); Workers::stop(); uv_stop(uv_default_loop()); }
static int elops_destroy_context2_uv(struct lws_context *context) { struct lws_context_per_thread *pt; int n, internal = 0; for (n = 0; n < context->count_threads; n++) { pt = &context->pt[n]; /* only for internal loops... */ if (!pt->event_loop_foreign && pt->uv.io_loop) { internal = 1; if (!context->finalize_destroy_after_internal_loops_stopped) uv_stop(pt->uv.io_loop); else { #if UV_VERSION_MAJOR > 0 uv_loop_close(pt->uv.io_loop); #endif lws_free_set_NULL(pt->uv.io_loop); } } } return internal; }
void lws_libuv_destroyloop(struct lws_context *context, int tsi) { struct lws_context_per_thread *pt = &context->pt[tsi]; int m; if (!(context->options & LWS_SERVER_OPTION_LIBUV)) return; if (!pt->io_loop_uv) return; if (context->use_ev_sigint) uv_signal_stop(&pt->w_sigint.uv_watcher); for (m = 0; m < ARRAY_SIZE(sigs); m++) uv_signal_stop(&pt->signals[m]); if (!pt->ev_loop_foreign) { uv_stop(pt->io_loop_uv); uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL); while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT)); m = uv_loop_close(pt->io_loop_uv); if (m == UV_EBUSY) lwsl_debug("%s: uv_loop_close: UV_EBUSY\n", __func__); lws_free(pt->io_loop_uv); } }
void thread_pool_async_cb(uv_async_t* async) { ThreadPool* l = static_cast<ThreadPool*>((Loop*)(async->data)); function<void()> fn; while (true) { l->mutex_.lock(); if (l->queue_.size() == 0) { if (l->stop_) { if (l->count_ == 0) { uv_stop(l->loop_); uv_close((uv_handle_t*)&l->async_, NULL); } } l->mutex_.unlock(); break; } fn = l->queue_.front(); l->queue_.pop_front(); l->mutex_.unlock(); // post fn to threadpool uv_work_t* w = new uv_work_t(); w->data = new ThreadPool::work_data(l, fn); ++(l->count_); // add to the counter, only happens in the loop thread uv_queue_work(l->loop_, w, work_cb, after_work);/*start work_cb , finish function is after_work*/ } }
static int unload_module(void) { unsigned int i = 0; LNOTICE("unloaded module %s", module_name); for (i = 0; i < profile_size; i++) { close_socket(i); free_profile(i); } #if UV_VERSION_MAJOR == 0 uv_async_send(&async_handle); uv_loop_delete(loop); #else if (uv_loop_alive(loop)) { uv_async_send(&async_handle); } uv_stop(loop); uv_loop_close(loop); free(loop); #endif /* Close socket */ return 0; }
static void jl_signal_async_cb(uv_async_t *hdl) { // This should abort the current loop and the julia code it returns to // or the safepoint in the callers of `uv_run` should throw the exception. (void)hdl; uv_stop(jl_io_loop); }
bool as_event_send_close_loop(as_event_loop* event_loop) { // Send stop command through queue so it can be executed in event loop thread. pthread_mutex_lock(&event_loop->lock); as_uv_command qcmd = {.type = AS_UV_EXIT_LOOP, .ptr = 0}; bool queued = as_queue_push(&event_loop->queue, &qcmd); pthread_mutex_unlock(&event_loop->lock); if (queued) { uv_async_send(event_loop->wakeup); } return queued; } void as_event_close_loop(as_event_loop* event_loop) { uv_close((uv_handle_t*)event_loop->wakeup, as_uv_wakeup_closed); // Only stop event loop if client created event loop. if (as_event_threads_created) { uv_stop(event_loop->loop); } // Cleanup event loop resources. as_queue_destroy(&event_loop->queue); as_queue_destroy(&event_loop->pipe_cb_queue); pthread_mutex_unlock(&event_loop->lock); pthread_mutex_destroy(&event_loop->lock); }
int wsn_client_start(wsn_client_ctx_t *client) { int err = 0; if (wsn_is_pipe(client->conf)) { err = wsn_client_start_connect_(client); if (err) { uv_stop(client->loop); } return err; } struct addrinfo hints; memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = IPPROTO_TCP; connect_timer_reset_(client); err = uv_getaddrinfo(client->loop, &client->getaddrinfo_req, on_get_host_addrs_, client->conf->host, NULL, &hints); if (err) { wsn_report_err(WSN_ERR_GET_ADDR_INFO, "Failed to start client to (\"%s\"), getaddrinfo error: %s", client->conf->host, uv_strerror(err)); err = WSN_ERR_GET_ADDR_INFO; } return err; }
// ============================================================================= static void read_stdin (uv_stream_t *stream, ssize_t nread, uv_buf_t buf) { UNUSED (stream); if (nread == -1) { if (uv_last_error (uv_default_loop ()).code == UV_EOF) { uv_close ((uv_handle_t *) &stdin_pipe, NULL); } } else if (nread > 0) { std::string cmd (buf.base, nread); if (HF::Application::Handle (cmd)) { transport.destroy (); uv_stop (uv_default_loop ()); } } if (buf.base) { free (buf.base); } }
static void timer_cb(uv_timer_t* handle) { ASSERT(handle == &timer_handle); timer_called++; if (timer_called == 1) uv_stop(uv_default_loop()); else if (timer_called == num_ticks) uv_timer_stop(handle); }
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { char *str = malloc(nread + 1); strncpy(str, buf->base, nread); str[nread] = '\0'; printf("%s\n", str); free(str); uv_stop(loop); }
static void lws_libuv_kill(const struct lws_context *context) { int n; for (n = 0; n < context->count_threads; n++) if (context->pt[n].io_loop_uv && LWS_LIBUV_ENABLED(context)) uv_stop(context->pt[n].io_loop_uv); }
static void on_get_host_addrs_(uv_getaddrinfo_t *req, int status, struct addrinfo *addrs) { wsn_client_ctx_t *client = CONTAINER_OF(req, wsn_client_ctx_t, getaddrinfo_req); uv_timer_stop(&client->timer_handle); if (status < 0) { wsn_report_err(WSN_ERR_GET_ADDR_INFO, "Failed to getaddrinfo for client to (\"%s\"): %s", client->conf->host, uv_strerror(status)); uv_freeaddrinfo(addrs); return; } int ipv4_naddrs = 0; int ipv6_naddrs = 0; for (struct addrinfo *ai = addrs; ai != NULL; ai = ai->ai_next) { if (ai->ai_family == AF_INET) { ipv4_naddrs += 1; } else if (ai->ai_family == AF_INET6) { ipv6_naddrs += 1; } } if (ipv4_naddrs == 0 && ipv6_naddrs == 0) { wsn_report_err(WSN_ERR_GET_ADDR_INFO, "Remote host (\"%s\") has no IPv4/6 addresses", client->conf->host); uv_freeaddrinfo(addrs); return; } union { struct sockaddr addr; struct sockaddr_in addr4; struct sockaddr_in6 addr6; } s; for (struct addrinfo *ai = addrs; ai != NULL; ai = ai->ai_next) { if (ai->ai_family == AF_INET) { s.addr4 = *(const struct sockaddr_in *) ai->ai_addr; s.addr4.sin_port = htons(client->conf->port); } else if (ai->ai_family == AF_INET6) { s.addr6 = *(const struct sockaddr_in6 *) ai->ai_addr; s.addr6.sin6_port = htons(client->conf->port); } else { continue; } client->host_addr = s.addr; break; } uv_freeaddrinfo(addrs); if (wsn_client_start_connect_(client) != 0) { uv_stop(client->loop); return; } }
void loop_on_put(MultiQueue *queue, void *data) { Loop *loop = data; // Sometimes libuv will run pending callbacks(timer for example) before // blocking for a poll. If this happens and the callback pushes a event to one // of the queues, the event would only be processed after the poll // returns(user hits a key for example). To avoid this scenario, we call // uv_stop when a event is enqueued. uv_stop(&loop->uv); }
// Queue an event void event_push(Event event, bool deferred) { // Sometimes libuv will run pending callbacks(timer for example) before // blocking for a poll. If this happens and the callback pushes a event to one // of the queues, the event would only be processed after the poll // returns(user hits a key for example). To avoid this scenario, we call // uv_stop when a event is enqueued. uv_stop(uv_default_loop()); *kl_pushp(Event, deferred ? deferred_events : immediate_events) = event; }
void lws_libuv_destroyloop(struct lws_context *context, int tsi) { struct lws_context_per_thread *pt = &context->pt[tsi]; // struct lws_context *ctx; int m, budget = 100, ns; if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV)) return; if (!pt->io_loop_uv) return; lwsl_notice("%s: closing signals + timers context %p\n", __func__, context); if (context->use_ev_sigint) { uv_signal_stop(&pt->w_sigint.uv_watcher); ns = ARRAY_SIZE(sigs); if (lws_check_opt(context->options, LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN)) ns = 2; for (m = 0; m < ns; m++) { uv_signal_stop(&pt->signals[m]); uv_close((uv_handle_t *)&pt->signals[m], lws_uv_close_cb); } } uv_timer_stop(&pt->uv_timeout_watcher); uv_close((uv_handle_t *)&pt->uv_timeout_watcher, lws_uv_close_cb); uv_idle_stop(&pt->uv_idle); uv_close((uv_handle_t *)&pt->uv_idle, lws_uv_close_cb); if (pt->ev_loop_foreign) return; while (budget-- && uv_run(pt->io_loop_uv, UV_RUN_NOWAIT)) ; lwsl_notice("%s: closing all loop handles context %p\n", __func__, context); uv_stop(pt->io_loop_uv); uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL); while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT)) ; #if UV_VERSION_MAJOR > 0 m = uv_loop_close(pt->io_loop_uv); if (m == UV_EBUSY) lwsl_err("%s: uv_loop_close: UV_EBUSY\n", __func__); #endif lws_free(pt->io_loop_uv); }
static void uv_finit(void) { //uv_unref((uv_handle_t*)&g_timer); uv_timer_stop(&g_timer); uv_stop(&g_loop); uv_async_init(&g_loop, &g_async, NULL); uv_async_send(&g_async); uv_thread_join(&g_loop_thread); if (g_sys) delete g_sys; //if (g_remote) delete g_remote; }
void Thread::stop() { if (!this->is_started.exchange(false)) { LOG(INFO) << "Thread " << static_cast<void*>(this) << " already stopped"; return; } uv_stop(&this->loop); uv_async_send(&this->async); uv_thread_join(&this->thread_id); }
static void resume(WrenValue* method) { WrenInterpretResult result = wrenCall(getVM(), method); // If a runtime error occurs in response to an async operation and nothing // catches the error in the fiber, then exit the CLI. if (result == WREN_RESULT_RUNTIME_ERROR) { uv_stop(getLoop()); setExitCode(70); // EX_SOFTWARE. } }
static void lws_libuv_kill(const struct lws_context *context) { int n; lwsl_notice("%s\n", __func__); for (n = 0; n < context->count_threads; n++) if (context->pt[n].io_loop_uv && LWS_LIBUV_ENABLED(context) )//&& //!context->pt[n].ev_loop_foreign) uv_stop(context->pt[n].io_loop_uv); }
JL_DLLEXPORT void jl_wakeup_thread(int16_t tid) { jl_ptls_t ptls = jl_get_ptls_states(); /* ensure thread tid is awake if necessary */ if (ptls->tid != tid && !_threadedregion && tid != -1) { uv_mutex_lock(&sleep_lock); uv_cond_broadcast(&sleep_alarm); // TODO: make this uv_cond_signal / just wake up correct thread uv_mutex_unlock(&sleep_lock); } if (_threadedregion && jl_uv_mutex.owner != jl_thread_self()) jl_wake_libuv(); else uv_stop(jl_global_event_loop()); }
static void stop_server_handler(uv_signal_t* handle, int signum) { const char *sig; if (signum == SIGINT) { sig = "SIGINT"; } else if (signum == SIGTERM) { sig = "SIGTERM"; } else { // Ignore unknown signal return; } log_warn("Received %s, gracefully terminating broker...\n", sig); Broker* broker = handle->loop->data; broker_stop(broker); uv_stop(handle->loop); }
static void address_cb(uv_getaddrinfo_t *req, int status, struct addrinfo *addrs) { struct addrinfo *ai; char addrbuf[INET6_ADDRSTRLEN + 1]; const void *addrv; union { struct sockaddr addr; struct sockaddr_in addr4; struct sockaddr_in6 addr6; } s; if (status < 0) { printf("get address info has cancel it\n"); return; } for (ai = addrs; ai != NULL; ai = ai->ai_next) { printf("==> GET AN ADDRESS\n"); printf(" FAMILY:%s\n",ai->ai_family == AF_INET ? "IPV4" : "IPV6"); if (ai->ai_family == AF_INET) { s.addr4 = *(const struct sockaddr_in *)ai->ai_addr; addrv = &s.addr4.sin_addr; } else if (ai->ai_family == AF_INET6) { s.addr6 = *(const struct sockaddr_in6 *)ai->ai_addr; addrv = &s.addr6.sin6_addr; } else { printf("UNREACHABLE\n"); return; } if (uv_inet_ntop(s.addr.sa_family, addrv, addrbuf, sizeof(addrbuf))) { printf("UNREACHABLE\n"); return; } printf(" ADDRESS:%s\n",addrbuf); } uv_freeaddrinfo(addrs); uv_stop(uv_default_loop()); }
static void callResume(WrenValue* resumeMethod, WrenValue* fiber, const char* argTypes, ...) { va_list args; va_start(args, argTypes); WrenInterpretResult result = wrenCallVarArgs(getVM(), resumeMethod, NULL, argTypes, args); va_end(args); wrenReleaseValue(getVM(), fiber); // If a runtime error occurs in response to an async operation and nothing // catches the error in the fiber, then exit the CLI. if (result == WREN_RESULT_RUNTIME_ERROR) { uv_stop(getLoop()); setExitCode(70); // EX_SOFTWARE. } }
static void lws_uv_close_cb_sa(uv_handle_t *handle) { struct lws_context *context = LWS_UV_REFCOUNT_STATIC_HANDLE_TO_CONTEXT(handle); int n; lwsl_info("%s: sa left %d: dyn left: %d\n", __func__, context->count_event_loop_static_asset_handles, context->count_wsi_allocated); /* any static assets left? */ if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) || context->count_wsi_allocated) return; /* * That's it... all wsi were down, and now every * static asset lws had a UV handle for is down. * * Stop the loop so we can get out of here. */ for (n = 0; n < context->count_threads; n++) { struct lws_context_per_thread *pt = &context->pt[n]; if (pt->uv.io_loop && !pt->event_loop_foreign) uv_stop(pt->uv.io_loop); } if (!context->pt[0].event_loop_foreign) { lwsl_info("%s: calling lws_context_destroy2\n", __func__); lws_context_destroy2(context); } lwsl_info("%s: all done\n", __func__); }
// worker主动调用finish_session int worker_stop(ls_worker_t* w) { LOG(" worker_stop(%p)\n", w); // stop all sessions in the worker for (size_t i = 0; i < w->sessions->size(); ++i) { if (finish_session((*w->sessions)[i]) < 0) { LOG("ERROR failed to finish_session()\n"); // return -1;// 确保所有session都执行了session_destroy() } } for (size_t i = 0; i < master.num_plugins; ++i) { ls_plugin_t* plugin = master.plugins + i; if (plugin->worker_terminate(w), 0) { LOGE(" %s.worker_terminate() error\n", plugin->plugin_name); // return -1; } } uv_stop(w->worker_loop); return 0; }
void async_cb(uv_async_t* async) { Loop* l = (Loop*)(async->data); function<void()> fn; static int t = 0; // printf("async_cb time %d size %d\n", t, (int)l->queue_.size()); while (true) { l->mutex_.lock(); if (l->queue_.size() == 0) { if (l->stop_) { uv_stop(l->loop_); uv_close((uv_handle_t*)&l->async_, NULL); } l->mutex_.unlock(); break; } fn = l->queue_.front(); l->queue_.pop_front(); l->mutex_.unlock(); fn(); // printf("async_cb time %d size %d\n", t, (int)l->queue_.size()); } t++; }
void EventBase_endLoop(struct EventBase* eventBase) { struct EventBase_pvt* ctx = Identity_cast((struct EventBase_pvt*) eventBase); uv_stop(ctx->loop); }
void on_close(uv_handle_t * handle) { //free(handle); uv_stop(handle->loop); }