static int lua_time_interval(lua_State * L) { int ret = 0,t=lua_type(L,2); luatimer_t * tm = (luatimer_t *)luaL_checkudata(L, 1,UVLUA_TIMER_CLS); if(NULL != tm){ if(LUA_TNIL != t && LUA_TNONE !=t){ int repeat = lua_tointeger(L, 2); if(repeat > 0){ uv_loop_t * loop = tm->timer.loop; int restart = 0; if(uv_is_active((uv_handle_t *)&tm->timer)){ uv_timer_stop(&tm->timer); restart=1; } uv_timer_set_repeat(&tm->timer, repeat); if(restart) uv_timer_again(&tm->timer); } } lua_pushinteger(L, uv_timer_get_repeat(&tm->timer)); }else{ lua_pushinteger(L, 0); } return 1; }
static void close_socket_handle(struct tundev_context *ctx) { if (uv_is_active(&ctx->inet_tcp.handle)) { uv_close(&ctx->inet_tcp.handle, NULL); } uv_close((uv_handle_t *) &ctx->timer, NULL); }
static int couv_is_active(lua_State *L) { uv_handle_t *handle; handle = couvL_checkudataclass(L, 1, COUV_HANDLE_MTBL_NAME); lua_pushboolean(L, uv_is_active(handle)); return 1; }
static int luv_is_active(lua_State* L) { uv_handle_t* handle = luv_check_handle(L, 1); int ret = uv_is_active(handle); if (ret < 0) return luv_error(L, ret); lua_pushboolean(L, ret); return 1; }
int lws_libuv_check_watcher_active(struct lws *wsi) { uv_handle_t *h = (void *)&wsi->w_read.uv_watcher; return uv_is_active(h); }
static void once_close_cb(uv_handle_t* handle) { printf("ONCE_CLOSE_CB\n"); ASSERT(handle != NULL); ASSERT(!uv_is_active(handle)); once_close_cb_called++; }
int luv_timer_get_active(lua_State* L) { uv_timer_t* timer = (uv_timer_t*)luv_checkudata(L, 1, "timer"); int active = uv_is_active((uv_handle_t*)timer); lua_pushboolean(L, active); return 1; }
static PyObject * Handle_active_get(Handle *self, void *closure) { UNUSED_ARG(closure); RAISE_IF_HANDLE_NOT_INITIALIZED(self, NULL); return PyBool_FromLong((long)uv_is_active(self->uv_handle)); }
static void repeat_cb(uv_timer_t* handle) { TUV_ASSERT(handle != NULL); TUV_ASSERT(1 == uv_is_active((uv_handle_t*) handle)); repeat_cb_called++; if (repeat_cb_called == 5) { uv_close((uv_handle_t*)handle, repeat_close_cb); } }
int lws_libuv_check_watcher_active(struct lws *wsi) { uv_handle_t *h = (uv_handle_t *)wsi->w_read.uv.pwatcher; if (!h) return 0; return uv_is_active(h); }
/* Callback from ares when socket operation is started */ static void uv__ares_sockstate_cb(void* data, ares_socket_t sock, int read, int write) { uv_loop_t* loop = data; uv_ares_task_t* h; assert((uv_loop_t*)loop->timer.data == loop); h = uv_find_ares_handle(loop, sock); if (read || write) { if (!h) { /* New socket */ /* If this is the first socket then start the timer. */ if (!uv_is_active((uv_handle_t*)&loop->timer)) { assert(uv_ares_handles_empty(loop)); uv__ares_timer_start(loop); } h = uv__ares_task_create(loop, sock); uv_add_ares_handle(loop, h); } if (read) { ev_io_start(loop->ev, &h->read_watcher); } else { ev_io_stop(loop->ev, &h->read_watcher); } if (write) { ev_io_start(loop->ev, &h->write_watcher); } else { ev_io_stop(loop->ev, &h->write_watcher); } } else { /* * read == 0 and write == 0 this is c-ares's way of notifying us that * the socket is now closed. We must free the data associated with * socket. */ assert(h && "When an ares socket is closed we should have a handle for it"); ev_io_stop(loop->ev, &h->read_watcher); ev_io_stop(loop->ev, &h->write_watcher); uv_remove_ares_handle(h); free(h); if (uv_ares_handles_empty(loop)) { uv__ares_timer_stop(loop); } } }
static void once_cb(uv_timer_t* handle) { TUV_ASSERT(handle != NULL); TUV_ASSERT(0 == uv_is_active((uv_handle_t*) handle)); once_cb_called++; uv_close((uv_handle_t*)handle, once_close_cb); /* Just call this randomly for the code coverage. */ uv_update_time(uv_default_loop()); }
static void poll_cb(uv_fs_poll_t* handle, int status, const uv_stat_t* prev, const uv_stat_t* curr) { uv_stat_t zero_statbuf; memset(&zero_statbuf, 0, sizeof(zero_statbuf)); ASSERT(handle == &poll_handle); ASSERT(1 == uv_is_active((uv_handle_t*) handle)); ASSERT(prev != NULL); ASSERT(curr != NULL); switch (poll_cb_called++) { case 0: ASSERT(status == UV_ENOENT); ASSERT(0 == memcmp(prev, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 == memcmp(curr, &zero_statbuf, sizeof(zero_statbuf))); touch_file(FIXTURE); break; case 1: ASSERT(status == 0); ASSERT(0 == memcmp(prev, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 != memcmp(curr, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 20, 0)); break; case 2: ASSERT(status == 0); ASSERT(0 != memcmp(prev, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 != memcmp(curr, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 200, 0)); break; case 3: ASSERT(status == 0); ASSERT(0 != memcmp(prev, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 != memcmp(curr, &zero_statbuf, sizeof(zero_statbuf))); remove(FIXTURE); break; case 4: ASSERT(status == UV_ENOENT); ASSERT(0 != memcmp(prev, &zero_statbuf, sizeof(zero_statbuf))); ASSERT(0 == memcmp(curr, &zero_statbuf, sizeof(zero_statbuf))); uv_close((uv_handle_t*)handle, close_cb); break; default: ASSERT(0); } }
static void repeat_cb(uv_timer_t* handle) { printf("REPEAT_CB\n"); ASSERT(handle != NULL); ASSERT(1 == uv_is_active((uv_handle_t*) handle)); repeat_cb_called++; if (repeat_cb_called == 5) { uv_close((uv_handle_t*)handle, repeat_close_cb); } }
/** * shut down uv and cleanup ... and should be the first thing we do when exitting * this closes all current connections to uv. if we have multiple instances of the client, that will close their io */ void UVEventLoop::ForceStopAndClose() { DEBUG_OUT("UVEventLoop::~UVEventLoop()"); StopUVRunner(true, true); // wait till we are definitely out of harms way for (auto it : resolvers) { it->disposed = true; } DEBUG_OUT(readers.size() << " readers"); for (auto it : readers) { uv_handle_t *h = (uv_handle_t*)it->client->socket; if (uv_is_active(h)) uv_close(h, nullptr); it->disposed = true; } DEBUG_OUT(readers.size() << " writers"); for (auto it : writers) { uv_handle_t *h = (uv_handle_t*)it->client->socket; if (uv_is_active(h)) uv_close(h, nullptr); it->disposed = true; } DEBUG_OUT(readers.size() << " closers "); for (auto it : closers) { uv_handle_t *h = (uv_handle_t*)it->client->socket; if (uv_is_active(h)) uv_close(h, nullptr); it->disposed = true; } for (auto it : scheduledTimers) { // won't have been called it->disposed = true; } DEBUG_OUT(readers.size() << " timers "); for (auto it : activeTimers) { uv_handle_t *h = (uv_handle_t*)&it->timer; if (uv_is_active(h)) uv_close(h, nullptr); } for (auto it : activeWorkers) { // doesn't get closed it->disposed = true; } uv_run(loop, UV_RUN_DEFAULT); HandleRunnerQueues(); // do the actual cleanup }
static void once_cb(uv_timer_t* handle, int status) { printf("ONCE_CB %d\n", once_cb_called); ASSERT(handle != NULL); ASSERT(status == 0); ASSERT(!uv_is_active((uv_handle_t*)handle)); once_cb_called++; uv_close((uv_handle_t*)handle, once_close_cb); /* Just call this randomly for the code coverage. */ uv_update_time(uv_default_loop()); }
/** * static wrapper around the user callback function (typedef void (*uv_timer_cb)(uv_timer_t* handle)) */ void UVEventLoop::OnTick(uv_timer_t* timer) { // DEBUG_OUT("tick!!"); UVTimer* otCBp=nullptr; if (timer) { otCBp=static_cast<UVTimer*>(timer->data); if (otCBp) { (*otCBp->tikCB)(); if (!uv_is_active((uv_handle_t*)timer)) { // it's a 1 shot timer! otCBp->disposed = true; } } } }
CAMLprim value camluv_is_active(value handle) { CAMLparam1(handle); CAMLlocal1(is_active); camluv_handle_t *camluv_handle = camluv_handle_struct_val(handle); if (uv_is_active(camluv_handle->uv_handle)) { is_active = Val_int(1); CAMLreturn(is_active); } else { is_active = Val_int(0); CAMLreturn(is_active); } }
static void after_queue_work(uv_work_t* req, int status) { automem_t mem; webqueuework_t * qwork = container_of(req, webqueuework_t, work); rbnode_t * n = rb_first(&qwork->request->headers); automem_init(&mem, 256); if (qwork->conn->proto == WEB_PROTO_HTTP) { if (qwork->status == 101 && qwork->request->upgrade) { webheader_t k; rbnode_t * n; const char * ver = NULL, *key = NULL; k.key = "Sec-WebSocket-Version"; if (n = rb_find(&qwork->request->headers, &k.n)) ver = container_of(n, webheader_t, n)->val; k.key = "Sec-WebSocket-Key"; if (n = rb_find(&qwork->request->headers, &k.n)) key = container_of(n, webheader_t, n)->val; if (NULL != key && NULL != ver) { ws_do_handeshake(&mem, key, strlen(key)); qwork->conn->proto = WEB_PROTO_WEBSOCKET; if (uv_is_active((uv_handle_t *)&qwork->conn->conn)) { wsparser_init(&qwork->conn->ws_parser, 13, 20480); qwork->conn->request = webrequest_get(qwork->request);//引用起来,不丢掉. } goto contents_prepare_final; } } rbnode_t * n = rb_first(&qwork->headers); automem_init_headers(&mem, qwork->status, qwork->flags); while (n) { webheader_t * h = container_of(n, webheader_t, n); automem_append_voidp(&mem, h->key, strlen(h->key)); automem_append_voidp(&mem, ": ", 2); automem_append_voidp(&mem, h->val, strlen(h->val)); automem_append_voidp(&mem, "\r\n", 2); n = rb_next(n); } automem_append_contents(&mem, qwork->mem.pdata, qwork->mem.size); } else if(qwork->conn->proto==WEB_PROTO_WEBSOCKET) { wsframe_make(&mem, WS_TEXT_FRAME, 0, qwork->mem.pdata, qwork->mem.size); } contents_prepare_final: if (0 != webconn_sendmem(qwork->conn, &mem)) { automem_uninit(&mem); } webqueuework_free(qwork); }
Peer::~Peer() { VLOG(5) << "{" << static_cast< void * >(this) << "}" << " flags=" << std::hex << getHandle()->flags << " UV_CLOSING " << (getHandle()->flags & 0x00001) << " UV_CLOSED " << (getHandle()->flags & 0x00002) << " UV_STREAM_READING " << (getHandle()->flags & 0x00004) << " UV_STREAM_SHUTTING " << (getHandle()->flags & 0x00008) << " UV_STREAM_SHUT " << (getHandle()->flags & 0x00010) << " UV_STREAM_READABLE " << (getHandle()->flags & 0x00020) << " UV_STREAM_WRITABLE " << (getHandle()->flags & 0x00040) << " UV_STREAM_BLOCKING " << (getHandle()->flags & 0x00080) << " UV_STREAM_READ_PARTIAL " << (getHandle()->flags & 0x00100) << " UV_STREAM_READ_EOF " << (getHandle()->flags & 0x00200) << " UV_TCP_NODELAY " << (getHandle()->flags & 0x00400) << " UV_TCP_KEEPALIVE " << (getHandle()->flags & 0x00800) << " UV_TCP_SINGLE_ACCEPT " << (getHandle()->flags & 0x01000) << " UV_HANDLE_IPV6 " << (getHandle()->flags & 0x10000) ; assert(!uvRefCnt_); assert(!uv_is_active(reinterpret_cast< uv_handle_t * >(getHandle()))); getLoopData()->down(); #ifdef COMMS_DEBUG_OBJECT_COUNT --counter; #endif }
static void repeat_2_cb(uv_timer_t* handle) { TUV_ASSERT(handle == &repeat_2); TUV_ASSERT(repeat_2_cb_allowed); repeat_2_cb_called++; if (uv_timer_get_repeat(&repeat_2) == 0) { TUV_ASSERT(0 == uv_is_active((uv_handle_t*) handle)); uv_close((uv_handle_t*)handle, close_cb); return; } TUV_ASSERT(uv_timer_get_repeat(&repeat_2) == 100); /* This shouldn't take effect immediately. */ uv_timer_set_repeat(&repeat_2, 0); }
static void delay_timer_cb(uv_timer_t* timer) { connection_context_t* context = (connection_context_t*) timer->data; int r; /* Timer should auto stop. */ ASSERT(0 == uv_is_active((uv_handle_t*) timer)); /* Add the requested events to the poll mask. */ ASSERT(context->delayed_events != 0); context->events |= context->delayed_events; context->delayed_events = 0; r = uv_poll_start(&context->poll_handle, context->events, connection_poll_cb); ASSERT(r == 0); }
void tls__write_timeout_check_cb(uv_timer_t* t) { tr_uv_wi_t* wi = NULL; int cont = 0; time_t ct = time(0); // TODO: GET_TLS(t); wi = tls->should_retry; if (wi && wi->timeout != PC_WITHOUT_TIMEOUT && ct > wi->ts + wi->timeout) { if (TR_UV_WI_IS_NOTIFY(wi->type)) { pc_lib_log(PC_LOG_WARN, "tls__write_timeout_check_cb - notify timeout, seq num: %u", wi->seq_num); pc_trans_sent(tt->client, wi->seq_num, PC_RC_TIMEOUT); } else if (TR_UV_WI_IS_RESP(wi->type)) { pc_lib_log(PC_LOG_WARN, "tls__write_timeout_check_cb - request timeout, req id: %u", wi->req_id); pc_trans_resp(tt->client, wi->req_id, PC_RC_TIMEOUT, NULL); } // if internal, just drop it. pc_lib_free(wi->buf.base); wi->buf.base = NULL; wi->buf.len = 0; if (PC_IS_PRE_ALLOC(wi->type)) { PC_PRE_ALLOC_SET_IDLE(wi->type); } else { pc_lib_free(wi); } tls->should_retry = NULL; } pc_mutex_lock(&tt->wq_mutex); cont = tcp__check_queue_timeout(&tls->when_tcp_is_writing_queue, tt->client, cont); pc_mutex_unlock(&tt->wq_mutex); if (cont && !uv_is_active((uv_handle_t* )t)) { uv_timer_start(t, tt->write_check_timeout_cb, PC_TIMEOUT_CHECK_INTERVAL* 1000, 0); } tcp__write_check_timeout_cb(t); }
Status api_send (Server *server, uv_buf_t *buf, uv_write_cb callback) { uv_pipe_t *client = (uv_pipe_t *) &server->api.client; int rc = 0; if (!uv_is_active ((uv_handle_t *) client)) goto error; uv_write_t req; rc = uv_write (&req, (uv_stream_t *) client, buf, 1, callback); if (rc < 0) goto error; return G_OK; error: if (rc) debug ("error: %s", uv_strerror (rc)); return G_ERR; }
static void repeat_2_cb(uv_timer_t* handle, int status) { ASSERT(handle == &repeat_2); ASSERT(status == 0); ASSERT(repeat_2_cb_allowed); LOGF("repeat_2_cb called after %ld ms\n", (long int)(uv_now() - start_time)); repeat_2_cb_called++; if (uv_timer_get_repeat(&repeat_2) == 0) { ASSERT(!uv_is_active((uv_handle_t*)handle)); uv_close((uv_handle_t*)handle, close_cb); return; } LOGF("uv_timer_get_repeat %ld ms\n", (long int)uv_timer_get_repeat(&repeat_2)); ASSERT(uv_timer_get_repeat(&repeat_2) == 100); /* This shouldn't take effect immediately. */ uv_timer_set_repeat(&repeat_2, 0); }
void nub_loop_dispose(nub_loop_t* loop) { ASSERT(0 == uv_loop_alive(&loop->uvloop)); ASSERT(1 == fuq_empty(&loop->blocking_queue_)); ASSERT(0 == loop->ref_); ASSERT(NULL != loop->work_ping_); ASSERT(0 == uv_has_ref((uv_handle_t*) loop->work_ping_)); ASSERT(1 == fuq_empty(&loop->thread_dispose_queue_)); uv_close((uv_handle_t*) loop->work_ping_, nub__free_handle_cb); uv_close((uv_handle_t*) &loop->queue_processor_, NULL); ASSERT(0 == uv_is_active((uv_handle_t*) loop->work_ping_)); fuq_dispose(&loop->thread_dispose_queue_); uv_mutex_destroy(&loop->thread_dispose_lock_); uv_sem_destroy(&loop->loop_lock_sem_); fuq_dispose(&loop->blocking_queue_); uv_mutex_destroy(&loop->queue_processor_lock_); fuq_dispose(&loop->work_queue_); uv_mutex_destroy(&loop->work_lock_); CHECK_EQ(0, uv_run(&loop->uvloop, UV_RUN_NOWAIT)); CHECK_NE(UV_EBUSY, uv_loop_close(&loop->uvloop)); }
static void repeat_2_cb(uv_timer_t* handle) { ASSERT(handle == &repeat_2); ASSERT(repeat_2_cb_allowed); fprintf(stderr, "repeat_2_cb called after %ld ms\n", (long int)(uv_now(uv_default_loop()) - start_time)); fflush(stderr); repeat_2_cb_called++; if (uv_timer_get_repeat(&repeat_2) == 0) { ASSERT(0 == uv_is_active((uv_handle_t*) handle)); uv_close((uv_handle_t*)handle, close_cb); return; } fprintf(stderr, "uv_timer_get_repeat %ld ms\n", (long int)uv_timer_get_repeat(&repeat_2)); fflush(stderr); ASSERT(uv_timer_get_repeat(&repeat_2) == 100); /* This shouldn't take effect immediately. */ uv_timer_set_repeat(&repeat_2, 0); }
void skyray_timer_cancel(skyray_timer_t *self) { if (uv_is_active((uv_handle_t *)&self->timer)) { uv_close((uv_handle_t *)&self->timer, close_cb); } }
static void connection_poll_cb(uv_poll_t* handle, int status, int events) { connection_context_t* context = (connection_context_t*) handle->data; unsigned int new_events; int r; ASSERT(status == 0); ASSERT(events & context->events); ASSERT(!(events & ~context->events)); new_events = context->events; if (events & UV_READABLE) { int action = rand() % 7; switch (action) { case 0: case 1: { /* Read a couple of bytes. */ static char buffer[74]; r = recv(context->sock, buffer, sizeof buffer, 0); ASSERT(r >= 0); if (r > 0) { context->read += r; } else { /* Got FIN. */ context->got_fin = 1; new_events &= ~UV_READABLE; } break; } case 2: case 3: { /* Read until EAGAIN. */ static char buffer[931]; r = recv(context->sock, buffer, sizeof buffer, 0); ASSERT(r >= 0); while (r > 0) { context->read += r; r = recv(context->sock, buffer, sizeof buffer, 0); } if (r == 0) { /* Got FIN. */ context->got_fin = 1; new_events &= ~UV_READABLE; } else { ASSERT(got_eagain()); } break; } case 4: /* Ignore. */ break; case 5: /* Stop reading for a while. Restart in timer callback. */ new_events &= ~UV_READABLE; if (!uv_is_active((uv_handle_t*) &context->timer_handle)) { context->delayed_events = UV_READABLE; uv_timer_start(&context->timer_handle, delay_timer_cb, 10, 0); } else { context->delayed_events |= UV_READABLE; } break; case 6: /* Fudge with the event mask. */ uv_poll_start(&context->poll_handle, UV_WRITABLE, connection_poll_cb); uv_poll_start(&context->poll_handle, UV_READABLE, connection_poll_cb); context->events = UV_READABLE; break; default: ASSERT(0); } } if (events & UV_WRITABLE) { if (context->sent < TRANSFER_BYTES && !(test_mode == UNIDIRECTIONAL && context->is_server_connection)) { /* We have to send more bytes. */ int action = rand() % 7; switch (action) { case 0: case 1: { /* Send a couple of bytes. */ static char buffer[103]; int send_bytes = MIN(TRANSFER_BYTES - context->sent, sizeof buffer); ASSERT(send_bytes > 0); r = send(context->sock, buffer, send_bytes, 0); if (r < 0) { ASSERT(got_eagain()); spurious_writable_wakeups++; break; } ASSERT(r > 0); context->sent += r; valid_writable_wakeups++; break; } case 2: case 3: { /* Send until EAGAIN. */ static char buffer[1234]; int send_bytes = MIN(TRANSFER_BYTES - context->sent, sizeof buffer); ASSERT(send_bytes > 0); r = send(context->sock, buffer, send_bytes, 0); if (r < 0) { ASSERT(got_eagain()); spurious_writable_wakeups++; break; } ASSERT(r > 0); valid_writable_wakeups++; context->sent += r; while (context->sent < TRANSFER_BYTES) { send_bytes = MIN(TRANSFER_BYTES - context->sent, sizeof buffer); ASSERT(send_bytes > 0); r = send(context->sock, buffer, send_bytes, 0); if (r <= 0) break; context->sent += r; } ASSERT(r > 0 || got_eagain()); break; } case 4: /* Ignore. */ break; case 5: /* Stop sending for a while. Restart in timer callback. */ new_events &= ~UV_WRITABLE; if (!uv_is_active((uv_handle_t*) &context->timer_handle)) { context->delayed_events = UV_WRITABLE; uv_timer_start(&context->timer_handle, delay_timer_cb, 100, 0); } else { context->delayed_events |= UV_WRITABLE; } break; case 6: /* Fudge with the event mask. */ uv_poll_start(&context->poll_handle, UV_READABLE, connection_poll_cb); uv_poll_start(&context->poll_handle, UV_WRITABLE, connection_poll_cb); context->events = UV_WRITABLE; break; default: ASSERT(0); } } else { /* Nothing more to write. Send FIN. */ int r; #ifdef _WIN32 r = shutdown(context->sock, SD_SEND); #else r = shutdown(context->sock, SHUT_WR); #endif ASSERT(r == 0); context->sent_fin = 1; new_events &= ~UV_WRITABLE; } } if (events & UV_DISCONNECT) { context->got_disconnect = 1; ++disconnects; new_events &= ~UV_DISCONNECT; } if (context->got_fin && context->sent_fin && context->got_disconnect) { /* Sent and received FIN. Close and destroy context. */ close_socket(context->sock); destroy_connection_context(context); context->events = 0; } else if (new_events != context->events) { /* Poll mask changed. Call uv_poll_start again. */ context->events = new_events; uv_poll_start(handle, new_events, connection_poll_cb); } /* Assert that uv_is_active works correctly for poll handles. */ if (context->events != 0) { ASSERT(1 == uv_is_active((uv_handle_t*) handle)); } else { ASSERT(0 == uv_is_active((uv_handle_t*) handle)); } }
bool Handle::active() const { return _ptr && uv_is_active(_ptr) != 0; }