void uv_async_close(uv_loop_t* loop, uv_async_t* handle) { if (!((uv_async_t*)handle)->async_sent) { uv_want_endgame(loop, (uv_handle_t*) handle); } uv__handle_closing(handle); }
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, uv_write_t* req) { assert(handle->type == UV_TCP); assert(handle->write_queue_size >= req->queued_bytes); handle->write_queue_size -= req->queued_bytes; if (handle->flags & UV_HANDLE_EMULATE_IOCP) { if (req->wait_handle != INVALID_HANDLE_VALUE) { UnregisterWait(req->wait_handle); } if (req->event_handle) { CloseHandle(req->event_handle); } } if (req->cb) { uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req)); ((uv_write_cb)req->cb)(req, loop->last_err.code == UV_OK ? 0 : -1); } handle->write_reqs_pending--; if (handle->flags & UV_HANDLE_SHUTTING && handle->write_reqs_pending == 0) { uv_want_endgame(loop, (uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); uv_unref(loop); }
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, uv_write_t* req) { int err; assert(handle->type == UV_TCP); assert(handle->write_queue_size >= req->queued_bytes); handle->write_queue_size -= req->queued_bytes; UNREGISTER_HANDLE_REQ(loop, handle, req); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { if (req->wait_handle != INVALID_HANDLE_VALUE) { pUnregisterWait(req->wait_handle); req->wait_handle = INVALID_HANDLE_VALUE; } if (req->event_handle) { CloseHandle(req->event_handle); req->event_handle = NULL; } } if (req->cb) { err = GET_REQ_SOCK_ERROR(req); req->cb(req, uv_translate_sys_error(err)); } handle->write_reqs_pending--; if (handle->shutdown_req != NULL && handle->write_reqs_pending == 0) { uv_want_endgame(loop, (uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); }
static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) { unsigned char mask_events; AFD_POLL_INFO* afd_poll_info; if (req == &handle->poll_req_1) { afd_poll_info = &handle->afd_poll_info_1; handle->submitted_events_1 = 0; mask_events = handle->mask_events_1; } else if (req == &handle->poll_req_2) { afd_poll_info = &handle->afd_poll_info_2; handle->submitted_events_2 = 0; mask_events = handle->mask_events_2; } else { assert(0); } /* Report an error unless the select was just interrupted. */ if (!REQ_SUCCESS(req)) { DWORD error = GET_REQ_SOCK_ERROR(req); if (error != WSAEINTR && handle->events != 0) { handle->events = 0; /* Stop the watcher */ uv__set_sys_error(loop, error); handle->poll_cb(handle, -1, 0); } } else if (afd_poll_info->NumberOfHandles >= 1) { unsigned char events = 0; if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE | AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) { events |= UV_READABLE; } if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL)) != 0) { events |= UV_WRITABLE; } events &= handle->events & ~mask_events; if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) { /* Stop polling. */ handle->events = 0; uv__handle_stop(handle); } if (events != 0) { handle->poll_cb(handle, 0, events); } } if ((handle->events & ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) { uv__fast_poll_submit_poll_req(loop, handle); } else if ((handle->flags & UV_HANDLE_CLOSING) && handle->submitted_events_1 == 0 && handle->submitted_events_2 == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } }
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { uv_loop_t* loop = handle->loop; if (!(handle->flags & UV_HANDLE_WRITABLE)) { uv__set_artificial_error(loop, UV_EPIPE); return -1; } if (!(handle->flags & UV_HANDLE_WRITABLE)) { uv__set_artificial_error(loop, UV_EPIPE); return -1; } uv_req_init(loop, (uv_req_t*) req); req->type = UV_SHUTDOWN; req->handle = handle; req->cb = cb; handle->flags &= ~UV_HANDLE_WRITABLE; handle->shutdown_req = req; handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); uv_want_endgame(loop, (uv_handle_t*)handle); return 0; }
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) { uv_signal_stop(handle); if (handle->pending_signum == 0) { uv__handle_start(handle); uv_want_endgame(loop, (uv_handle_t*) handle); } }
static void uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) { handle->events = 0; if (handle->submitted_events_1 == 0 && handle->submitted_events_2 == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } }
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) { uv_fs_event_stop(handle); uv__handle_closing(handle); if (!handle->req_pending) { uv_want_endgame(loop, (uv_handle_t*)handle); } }
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) { if (handle->dir_handle != INVALID_HANDLE_VALUE) { CloseHandle(handle->dir_handle); handle->dir_handle = INVALID_HANDLE_VALUE; } if (!handle->req_pending) { uv_want_endgame(loop, (uv_handle_t*)handle); } }
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) { uv_udp_recv_stop(handle); closesocket(handle->socket); uv__handle_start(handle); if (handle->reqs_pending == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } }
void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle, uv_write_t* req) { assert(handle->type == UV_NAMED_PIPE); assert(handle->write_queue_size >= req->queued_bytes); handle->write_queue_size -= req->queued_bytes; if (handle->flags & UV_HANDLE_EMULATE_IOCP) { if (req->wait_handle != INVALID_HANDLE_VALUE) { UnregisterWait(req->wait_handle); req->wait_handle = INVALID_HANDLE_VALUE; } if (req->event_handle) { CloseHandle(req->event_handle); req->event_handle = NULL; } } if (req->ipc_header) { if (req == &handle->ipc_header_write_req) { req->type = UV_UNKNOWN_REQ; } else { free(req); } } else { if (req->cb) { if (!REQ_SUCCESS(req)) { uv__set_sys_error(loop, GET_REQ_ERROR(req)); ((uv_write_cb)req->cb)(req, -1); } else { ((uv_write_cb)req->cb)(req, 0); } } } handle->write_reqs_pending--; if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE && handle->non_overlapped_writes_tail) { assert(handle->write_reqs_pending > 0); uv_queue_non_overlapped_write(handle); } if (handle->write_reqs_pending == 0) { uv_unref(loop); } if (handle->write_reqs_pending == 0 && handle->flags & UV_HANDLE_SHUTTING) { uv_want_endgame(loop, (uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); }
static int uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) { handle->events = 0; uv__handle_closing(handle); if (handle->submitted_events_1 == 0 && handle->submitted_events_2 == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } return 0; }
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) { uv_udp_recv_stop(handle); closesocket(handle->socket); handle->socket = INVALID_SOCKET; uv__handle_closing(handle); if (handle->reqs_pending == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } }
void uv_process_close(uv_loop_t* loop, uv_process_t* handle) { if (handle->wait_handle != INVALID_HANDLE_VALUE) { handle->close_handle = CreateEvent(NULL, FALSE, FALSE, NULL); UnregisterWaitEx(handle->wait_handle, handle->close_handle); handle->wait_handle = NULL; RegisterWaitForSingleObject(&handle->wait_handle, handle->close_handle, close_wait_callback, (void*)handle, INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE); } else { uv_want_endgame(loop, (uv_handle_t*)handle); } }
static void uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) { handle->events = 0; uv__handle_closing(handle); if (handle->submitted_events_1 == 0 && handle->submitted_events_2 == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } else { /* Cancel outstanding poll requests by executing another, unique poll */ /* request that forces the outstanding ones to return. */ uv__fast_poll_cancel_poll_req(loop, handle); } }
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle, uv_req_t* req) { assert(handle->type == UV_ASYNC); assert(req->type == UV_WAKEUP); handle->async_sent = 0; if (handle->flags & UV__HANDLE_CLOSING) { uv_want_endgame(loop, (uv_handle_t*)handle); } else if (handle->async_cb != NULL) { handle->async_cb(handle); } }
void uv_process_pipe_write_req(uv_pipe_t* handle, uv_write_t* req) { assert(handle->type == UV_NAMED_PIPE); handle->write_queue_size -= req->queued_bytes; if (req->cb) { LOOP->last_error = req->error; ((uv_write_cb)req->cb)(req, LOOP->last_error.code == UV_OK ? 0 : -1); } handle->write_reqs_pending--; if (handle->write_reqs_pending == 0 && handle->flags & UV_HANDLE_SHUTTING) { uv_want_endgame((uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); }
void uv_process_tcp_write_req(uv_tcp_t* handle, uv_write_t* req) { assert(handle->type == UV_TCP); handle->write_queue_size -= req->queued_bytes; if (req->cb) { LOOP->last_error = GET_REQ_UV_SOCK_ERROR(req); ((uv_write_cb)req->cb)(req, LOOP->last_error.code == UV_OK ? 0 : -1); } handle->write_reqs_pending--; if (handle->flags & UV_HANDLE_SHUTTING && handle->write_reqs_pending == 0) { uv_want_endgame((uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); }
void uv_process_close(uv_loop_t* loop, uv_process_t* handle) { uv__handle_start(handle); if (handle->wait_handle != INVALID_HANDLE_VALUE) { /* This blocks until either the wait was cancelled, or the callback has */ /* completed. */ BOOL r = UnregisterWaitEx(handle->wait_handle, INVALID_HANDLE_VALUE); if (!r) { /* This should never happen, and if it happens, we can't recover... */ uv_fatal_error(GetLastError(), "UnregisterWaitEx"); } handle->wait_handle = INVALID_HANDLE_VALUE; } if (!handle->exit_cb_pending) { uv_want_endgame(loop, (uv_handle_t*)handle); } }
static void uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) { handle->events = 0; if (handle->submitted_events_1 == 0 && handle->submitted_events_2 == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } else { /* Try to cancel outstanding poll requests. */ if (pCancelIoEx) { /* Use CancelIoEx to cancel poll requests if available. */ if (handle->submitted_events_1) pCancelIoEx((HANDLE) handle->socket, &handle->poll_req_1.overlapped); if (handle->submitted_events_2) pCancelIoEx((HANDLE) handle->socket, &handle->poll_req_2.overlapped); } else if (handle->submitted_events_1 | handle->submitted_events_2) { /* Execute another unique poll to force the others to return. */ uv__fast_poll_cancel_poll_reqs(loop, handle); } } }
void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) { if (handle->flags & UV_HANDLE_READING) { handle->flags &= ~UV_HANDLE_READING; DECREASE_ACTIVE_COUNT(loop, handle); } if (handle->flags & UV_HANDLE_LISTENING) { handle->flags &= ~UV_HANDLE_LISTENING; DECREASE_ACTIVE_COUNT(loop, handle); } uv_pipe_cleanup(loop, handle); if (handle->reqs_pending == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE); uv__handle_closing(handle); }
static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) { unsigned char mask_events; int err; if (req == &handle->poll_req_1) { handle->submitted_events_1 = 0; mask_events = handle->mask_events_1; } else if (req == &handle->poll_req_2) { handle->submitted_events_2 = 0; mask_events = handle->mask_events_2; } else { assert(0); return; } if (!REQ_SUCCESS(req)) { /* Error. */ if (handle->events != 0) { err = GET_REQ_ERROR(req); handle->events = 0; /* Stop the watcher */ handle->poll_cb(handle, uv_translate_sys_error(err), 0); } } else { /* Got some events. */ int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events; if (events != 0) { handle->poll_cb(handle, 0, events); } } if ((handle->events & ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) { uv__slow_poll_submit_poll_req(loop, handle); } else if ((handle->flags & UV__HANDLE_CLOSING) && handle->submitted_events_1 == 0 && handle->submitted_events_2 == 0) { uv_want_endgame(loop, (uv_handle_t*) handle); } }
void uv_tcp_close(uv_tcp_t* tcp) { int non_ifs_lsp; int close_socket = 1; /* * In order for winsock to do a graceful close there must not be * any pending reads. */ if (tcp->flags & UV_HANDLE_READ_PENDING) { /* Just do shutdown on non-shared sockets, which ensures graceful close. */ if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) { shutdown(tcp->socket, SD_SEND); tcp->flags |= UV_HANDLE_SHUT; } else { /* Check if we have any non-IFS LSPs stacked on top of TCP */ non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 : uv_tcp_non_ifs_lsp_ipv4; if (!non_ifs_lsp) { /* * Shared socket with no non-IFS LSPs, request to cancel pending I/O. * The socket will be closed inside endgame. */ CancelIo((HANDLE)tcp->socket); close_socket = 0; } } } tcp->flags &= ~(UV_HANDLE_READING | UV_HANDLE_LISTENING); if (close_socket) { closesocket(tcp->socket); tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; } if (tcp->reqs_pending == 0) { uv_want_endgame(tcp->loop, (uv_handle_t*)tcp); } }
void uv_process_pipe_write_req(uv_pipe_t* handle, uv_write_t* req) { assert(handle->type == UV_NAMED_PIPE); handle->write_queue_size -= req->queued_bytes; if (req->cb) { if (!REQ_SUCCESS(req)) { LOOP->last_error = GET_REQ_UV_ERROR(req); ((uv_write_cb)req->cb)(req, -1); } else { ((uv_write_cb)req->cb)(req, 0); } } handle->write_reqs_pending--; if (handle->write_reqs_pending == 0 && handle->flags & UV_HANDLE_SHUTTING) { uv_want_endgame((uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); }
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle, uv_req_t* req) { unsigned long dispatched_signum; assert(handle->type == UV_SIGNAL); assert(req->type == UV_SIGNAL_REQ); dispatched_signum = InterlockedExchange(&handle->pending_signum, 0); assert(dispatched_signum != 0); /* Check if the pending signal equals the signum that we are watching for. */ /* These can get out of sync when the handler is stopped and restarted */ /* while the signal_req is pending. */ if (dispatched_signum == handle->signum) handle->signal_cb(handle, dispatched_signum); if (handle->flags & UV_HANDLE_CLOSING) { /* When it is closing, it must be stopped at this point. */ assert(handle->signum == 0); uv_want_endgame(loop, (uv_handle_t*) handle); } }
/* Called on main thread after a child process has exited. */ void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) { DWORD exit_code; assert(handle->exit_cb_pending); handle->exit_cb_pending = 0; /* If we're closing, don't call the exit callback. Just schedule a close */ /* callback now. */ if (handle->flags & UV__HANDLE_CLOSING) { uv_want_endgame(loop, (uv_handle_t*)handle); return; } /* Unregister from process notification. */ if (handle->wait_handle != INVALID_HANDLE_VALUE) { #ifndef WINONECORE UnregisterWait(handle->wait_handle); #endif handle->wait_handle = INVALID_HANDLE_VALUE; } /* Set the handle to inactive: no callbacks will be made after the exit */ /* callback.*/ uv__handle_stop(handle); if (handle->spawn_error.code != UV_OK) { /* Spawning failed. */ exit_code = (DWORD) - 1; } else if (!GetExitCodeProcess(handle->process_handle, &exit_code)) { /* Unable to to obtain the exit code. This should never happen. */ exit_code = (DWORD) - 1; } /* Fire the exit callback. */ if (handle->exit_cb) { loop->last_err = handle->spawn_error; handle->exit_cb(handle, exit_code, handle->exit_signal); } }
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, uv_write_t* req) { int err; assert(handle->type == UV_TCP); assert(handle->write_queue_size >= req->u.io.queued_bytes); handle->write_queue_size -= req->u.io.queued_bytes; UNREGISTER_HANDLE_REQ(loop, handle, req); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { if (req->wait_handle != INVALID_HANDLE_VALUE) { UnregisterWait(req->wait_handle); req->wait_handle = INVALID_HANDLE_VALUE; } if (req->event_handle) { CloseHandle(req->event_handle); req->event_handle = NULL; } } if (req->cb) { err = uv_translate_sys_error(GET_REQ_SOCK_ERROR(req)); if (err == UV_ECONNABORTED) { /* use UV_ECANCELED for consistency with Unix */ err = UV_ECANCELED; } req->cb(req, err); } handle->stream.conn.write_reqs_pending--; if (handle->stream.conn.shutdown_req != NULL && handle->stream.conn.write_reqs_pending == 0) { uv_want_endgame(loop, (uv_handle_t*)handle); } DECREASE_PENDING_REQ_COUNT(handle); }
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { if (!(handle->flags & UV_HANDLE_CONNECTION)) { uv_set_sys_error(WSAEINVAL); return -1; } if (handle->flags & UV_HANDLE_SHUTTING) { uv_set_sys_error(WSAESHUTDOWN); return -1; } uv_req_init((uv_req_t*) req); req->type = UV_SHUTDOWN; req->handle = handle; req->cb = cb; handle->flags |= UV_HANDLE_SHUTTING; handle->shutdown_req = req; handle->reqs_pending++; uv_want_endgame((uv_handle_t*)handle); return 0; }
/* Called on main thread after a child process has exited. */ void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) { int64_t exit_code; DWORD status; assert(handle->exit_cb_pending); handle->exit_cb_pending = 0; /* If we're closing, don't call the exit callback. Just schedule a close */ /* callback now. */ if (handle->flags & UV__HANDLE_CLOSING) { uv_want_endgame(loop, (uv_handle_t*) handle); return; } /* Unregister from process notification. */ if (handle->wait_handle != INVALID_HANDLE_VALUE) { UnregisterWait(handle->wait_handle); handle->wait_handle = INVALID_HANDLE_VALUE; } /* Set the handle to inactive: no callbacks will be made after the exit */ /* callback.*/ uv__handle_stop(handle); if (GetExitCodeProcess(handle->process_handle, &status)) { exit_code = status; } else { /* Unable to to obtain the exit code. This should never happen. */ exit_code = uv_translate_sys_error(GetLastError()); } /* Fire the exit callback. */ if (handle->exit_cb) { handle->exit_cb(handle, exit_code, handle->exit_signal); } }
void uv_close(uv_handle_t* handle, uv_close_cb cb) { uv_loop_t* loop = handle->loop; if (handle->flags & UV__HANDLE_CLOSING) { assert(0); return; } handle->close_cb = cb; /* Handle-specific close actions */ switch (handle->type) { case UV_TCP: uv_tcp_close(loop, (uv_tcp_t*)handle); return; case UV_NAMED_PIPE: uv_pipe_close(loop, (uv_pipe_t*) handle); return; case UV_TTY: uv_tty_close((uv_tty_t*) handle); return; case UV_UDP: uv_udp_close(loop, (uv_udp_t*) handle); return; case UV_POLL: uv_poll_close(loop, (uv_poll_t*) handle); return; case UV_TIMER: uv_timer_stop((uv_timer_t*)handle); uv__handle_closing(handle); uv_want_endgame(loop, handle); return; case UV_PREPARE: uv_prepare_stop((uv_prepare_t*)handle); uv__handle_closing(handle); uv_want_endgame(loop, handle); return; case UV_CHECK: uv_check_stop((uv_check_t*)handle); uv__handle_closing(handle); uv_want_endgame(loop, handle); return; case UV_IDLE: uv_idle_stop((uv_idle_t*)handle); uv__handle_closing(handle); uv_want_endgame(loop, handle); return; case UV_ASYNC: uv_async_close(loop, (uv_async_t*) handle); return; case UV_SIGNAL: uv_signal_close(loop, (uv_signal_t*) handle); return; case UV_PROCESS: uv_process_close(loop, (uv_process_t*) handle); return; case UV_FS_EVENT: uv_fs_event_close(loop, (uv_fs_event_t*) handle); return; case UV_FS_POLL: uv__fs_poll_close((uv_fs_poll_t*) handle); uv__handle_closing(handle); uv_want_endgame(loop, handle); return; default: /* Not supported */ abort(); } }