static void saturate_threadpool(void) { uv_work_t* req; ASSERT(0 == uv_cond_init(&signal_cond)); ASSERT(0 == uv_mutex_init(&signal_mutex)); ASSERT(0 == uv_mutex_init(&wait_mutex)); uv_mutex_lock(&signal_mutex); uv_mutex_lock(&wait_mutex); for (num_threads = 0; /* empty */; num_threads++) { req = malloc(sizeof(*req)); ASSERT(req != NULL); ASSERT(0 == uv_queue_work(uv_default_loop(), req, work_cb, done_cb)); /* Expect to get signalled within 350 ms, otherwise assume that * the thread pool is saturated. As with any timing dependent test, * this is obviously not ideal. */ if (uv_cond_timedwait(&signal_cond, &signal_mutex, (uint64_t)(350 * 1e6))) { ASSERT(0 == uv_cancel((uv_req_t*) req)); break; } } }
void FileRequestBaton::cancel() { canceled = true; // uv_cancel fails frequently when the request has already been started. // In that case, we have to let it complete and check the canceled bool // instead. uv_cancel((uv_req_t *)&req); }
CAMLprim value uwt_workreq_cancel_na(value o_req) { struct req * wp = (void*)Field(o_req,0); if ( wp && uv_cancel(wp->req) == 0 ){ return Val_long(1); } return Val_long(0); }
// Metamethod to allow storing anything in the userdata's environment static int luv_cancel(lua_State* L) { uv_req_t* req = (uv_req_t*)luv_check_req(L, 1); int ret = uv_cancel(req); if (ret < 0) return luv_error(L, ret); luv_cleanup_req(L, (luv_req_t*)req->data); req->data = NULL; lua_pushinteger(L, ret); return 1; }
static PyObject * Request_func_cancel(Request *self) { if (self->req_ptr && uv_cancel(self->req_ptr) == 0) { Py_RETURN_TRUE; } else { Py_RETURN_FALSE; } }
static void timer_cb(uv_timer_t* handle, int status) { struct cancel_info* ci; uv_req_t* req; unsigned i; ci = container_of(handle, struct cancel_info, timer_handle); for (i = 0; i < ci->nreqs; i++) { req = (uv_req_t*) ((char*) ci->reqs + i * ci->stride); ASSERT(0 == uv_cancel(req)); } uv_close((uv_handle_t*) &ci->timer_handle, NULL); unblock_threadpool(); timer_cb_called++; }
void FSTask::kill() { uv_cancel(reinterpret_cast<uv_req_t*>(&fs_)); }
void rig_pb_stream_disconnect(rig_pb_stream_t *stream) { switch (stream->type) { #ifdef USE_UV case STREAM_TYPE_FD: uv_read_stop((uv_stream_t *)&stream->fd.uv_fd_pipe); uv_close((uv_handle_t *)&stream->fd.uv_fd_pipe, NULL /* closed callback */); break; case STREAM_TYPE_TCP: uv_read_stop((uv_stream_t *)&stream->tcp.socket); uv_close((uv_handle_t *)&stream->tcp.socket, NULL /* closed callback */); break; #endif case STREAM_TYPE_BUFFER: { int i; /* Give all incoming write closures back to the other end * so they can be freed */ for (i = 0; i < stream->buffer.incoming_write_closures->len; i++) { rig_pb_stream_write_closure_t *closure = c_array_index(stream->buffer.incoming_write_closures, void *, i); c_array_append_val(stream->buffer.other_end->buffer.finished_write_closures, closure); } c_array_free(stream->buffer.incoming_write_closures, true /* free storage */); stream->buffer.incoming_write_closures = NULL; drain_finished_write_closures(stream); c_array_free(stream->buffer.finished_write_closures, true /* free storage */); stream->buffer.finished_write_closures = NULL; stream->buffer.other_end->buffer.other_end = NULL; stream->buffer.other_end = NULL; } if (stream->buffer.read_idle) { rut_poll_shell_remove_idle_FIXME(stream->shell, stream->buffer.read_idle); stream->buffer.read_idle = NULL; } break; #ifdef __EMSCRIPTEN__ case STREAM_TYPE_WORKER_IPC: stream->worker_ipc.worker = 0; break; case STREAM_TYPE_WEBSOCKET_CLIENT: if (stream->websocket_client.socket != -1) { close(stream->websocket_client.socket); stream->websocket_client.socket = -1; } break; #endif #ifdef USE_UV case STREAM_TYPE_WEBSOCKET_SERVER: stream->websocket_server.ctx = NULL; break; #endif case STREAM_TYPE_DISCONNECTED: #ifdef USE_UV if (stream->resolving) uv_cancel((uv_req_t *)&stream->resolver); if (stream->connecting) uv_cancel((uv_req_t *)&stream->connection_request); #endif return; } stream->type = STREAM_TYPE_DISCONNECTED; rut_closure_list_invoke(&stream->on_error_closures, rig_pb_stream_callback_t, stream); }
inline void PoolWorker<WorkReturn>::stop() { uv_cancel((uv_req_t*)_managedWorker); }