static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) { BOOL success; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; ULONG count; ULONG i; success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued */ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } /* Some time might have passed waiting for I/O, * so update the loop time here. */ uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } else if (timeout > 0) { /* GetQueuedCompletionStatus can occasionally return a little early. * Make sure that the desired timeout is reflected in the loop time. */ uv__time_forward(loop, timeout); } }
/* TODO: add support for links. */ int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { int len = strlen(path); char* path2 = NULL; int has_backslash = (path[len - 1] == '\\' || path[len - 1] == '/'); if (path[len - 1] == '\\' || path[len - 1] == '/') { path2 = strdup(path); if (!path2) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } path2[len - 1] = '\0'; } if (cb) { uv_fs_req_init_async(loop, req, UV_FS_LSTAT, NULL, cb); if (path2) { req->path = path2; } else { req->path = strdup(path); } QUEUE_FS_TP_JOB(loop, req); } else { uv_fs_req_init_sync(loop, req, UV_FS_LSTAT); fs__stat(req, path2 ? path2 : path); if (path2) { free(path2); } SET_UV_LAST_ERROR_FROM_REQ(req); return req->result; } return 0; }
void uv_run_blockingwork(uv_run_state* state) { BOOL success; // Check for pending events. if (pGetQueuedCompletionStatusEx) { success = pGetQueuedCompletionStatusEx( state->loop->iocp, state->overlappeds, 128, &state->count, state->timeout, FALSE); if (!success) state->count = 0; } else { success = GetQueuedCompletionStatus( state->loop->iocp, &state->overlappeds[0].dwNumberOfBytesTransferred, &state->overlappeds[0].lpCompletionKey, &state->overlappeds[0].lpOverlapped, state->timeout); if (state->overlappeds[0].lpOverlapped) state->count = 1; } if (!success && GetLastError() != WAIT_TIMEOUT) { // Serious error uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } }
static void uv_poll(uv_loop_t* loop, int block) { BOOL success; DWORD bytes, timeout; ULONG_PTR key; OVERLAPPED* overlapped; uv_req_t* req; if (block) { timeout = uv_get_poll_timeout(loop); } else { timeout = 0; } success = GetQueuedCompletionStatus(loop->iocp, &bytes, &key, &overlapped, timeout); if (overlapped) { /* Package was dequeued */ req = uv_overlapped_to_req(overlapped); uv_insert_pending_req(loop, req); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); } }
static void uv_fs_event_init_handle(uv_loop_t* loop, uv_fs_event_t* handle, const char* filename, uv_fs_event_cb cb) { uv_handle_init(loop, (uv_handle_t*) handle); handle->type = UV_FS_EVENT; handle->cb = cb; handle->dir_handle = INVALID_HANDLE_VALUE; handle->buffer = NULL; handle->req_pending = 0; handle->filew = NULL; handle->short_filew = NULL; handle->dirw = NULL; uv_req_init(loop, (uv_req_t*)&handle->req); handle->req.type = UV_FS_EVENT_REQ; handle->req.data = (void*)handle; handle->filename = strdup(filename); if (!handle->filename) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } uv__handle_start(handle); loop->counters.fs_event_init++; }
static NOINLINE void uv__once_inner(uv_once_t* guard, void (*callback)(void)) { DWORD result; HANDLE existing_event, created_event; created_event = CreateEvent(NULL, 1, 0, NULL); if (created_event == 0) { /* Could fail in a low-memory situation? */ uv_fatal_error(GetLastError(), "CreateEvent"); } existing_event = InterlockedCompareExchangePointer(&guard->event, created_event, NULL); if (existing_event == NULL) { /* We won the race */ callback(); result = SetEvent(created_event); assert(result); guard->ran = 1; } else { /* We lost the race. Destroy the event we created and wait for the */ /* existing one to become signaled. */ CloseHandle(created_event); result = WaitForSingleObject(existing_event, INFINITE); assert(result == WAIT_OBJECT_0); } }
static void uv_poll(uv_loop_t* loop, DWORD timeout) { DWORD bytes; ULONG_PTR key; OVERLAPPED* overlapped; uv_req_t* req; GetQueuedCompletionStatus(loop->iocp, &bytes, &key, &overlapped, timeout); if (overlapped) { /* Package was dequeued */ req = uv_overlapped_to_req(overlapped); uv_insert_pending_req(loop, req); /* Some time might have passed waiting for I/O, * so update the loop time here. */ uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); } else if (timeout > 0) { /* GetQueuedCompletionStatus can occasionally return a little early. * Make sure that the desired timeout is reflected in the loop time. */ uv__time_forward(loop, timeout); } }
static int uv__get_process_title() { WCHAR title_w[MAX_TITLE_LENGTH]; int length; if (!GetConsoleTitleW(title_w, sizeof(title_w) / sizeof(WCHAR))) { return -1; } /* Find out what the size of the buffer is that we need */ length = uv_utf16_to_utf8(title_w, -1, NULL, 0); if (!length) { return -1; } assert(!process_title); process_title = (char*)uv__malloc(length); if (!process_title) { uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); } /* Do utf16 -> utf8 conversion here */ if (!uv_utf16_to_utf8(title_w, -1, process_title, length)) { uv__free(process_title); return -1; } return 0; }
int uv_fs_stat(uv_fs_t* req, const char* path, uv_fs_cb cb) { int len = strlen(path); char* path2 = NULL; int has_backslash = (path[len - 1] == '\\' || path[len - 1] == '/'); if (path[len - 1] == '\\' || path[len - 1] == '/') { path2 = strdup(path); if (!path2) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } path2[len - 1] = '\0'; } if (cb) { uv_fs_req_init_async(req, UV_FS_STAT, cb); if (path2) { WRAP_REQ_ARGS1(req, path2); req->flags |= UV_FS_FREE_ARG0; } else { WRAP_REQ_ARGS1(req, path); STRDUP_ARG(req, 0); } QUEUE_FS_TP_JOB(req); } else { uv_fs_req_init_sync(req, UV_FS_STAT); fs__stat(req, path2 ? path2 : path); if (path2) { free(path2); } } return 0; }
static void uv_loop_init() { /* Create an I/O completion port */ LOOP->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); if (LOOP->iocp == NULL) { uv_fatal_error(GetLastError(), "CreateIoCompletionPort"); } LOOP->refs = 0; uv_update_time(); LOOP->pending_reqs_tail = NULL; LOOP->endgame_handles = NULL; RB_INIT(&LOOP->timers); LOOP->check_handles = NULL; LOOP->prepare_handles = NULL; LOOP->idle_handles = NULL; LOOP->next_prepare_handle = NULL; LOOP->next_check_handle = NULL; LOOP->next_idle_handle = NULL; LOOP->last_error = uv_ok_; LOOP->err_str = NULL; }
static void uv_poll_ex(uv_loop_t* loop, int block) { BOOL success; DWORD timeout; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; ULONG count; ULONG i; if (block) { timeout = uv_get_poll_timeout(loop); } else { timeout = 0; } assert(pGetQueuedCompletionStatusEx); success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued */ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } }
int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle, uv_buf_t bufs[], int bufcnt, uv_write_cb cb) { int result; DWORD bytes; uv_req_init(loop, (uv_req_t*) req); req->type = UV_WRITE; req->handle = (uv_stream_t*) handle; req->cb = cb; memset(&req->overlapped, 0, sizeof(req->overlapped)); /* Prepare the overlapped structure. */ memset(&(req->overlapped), 0, sizeof(req->overlapped)); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { req->event_handle = CreateEvent(NULL, 0, 0, NULL); if (!req->event_handle) { uv_fatal_error(GetLastError(), "CreateEvent"); } req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); req->wait_handle = INVALID_HANDLE_VALUE; } result = WSASend(handle->socket, (WSABUF*)bufs, bufcnt, &bytes, 0, &req->overlapped, NULL); if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { /* Request completed immediately. */ req->queued_bytes = 0; handle->reqs_pending++; handle->write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); uv_insert_pending_req(loop, (uv_req_t*) req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ req->queued_bytes = uv_count_bufs(bufs, bufcnt); handle->reqs_pending++; handle->write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); handle->write_queue_size += req->queued_bytes; if (handle->flags & UV_HANDLE_EMULATE_IOCP && !RegisterWaitForSingleObject(&req->wait_handle, req->event_handle, post_write_completion, (void*) req, INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) { SET_REQ_ERROR(req, GetLastError()); uv_insert_pending_req(loop, (uv_req_t*)req); } } else { /* Send failed due to an error. */ uv__set_sys_error(loop, WSAGetLastError()); return -1; } return 0; }
static void uv__poll(uv_loop_t* loop, DWORD timeout) { BOOL success; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; ULONG count; ULONG i; int repeat; uint64_t timeout_time; timeout_time = loop->time + timeout; for (repeat = 0; ; repeat++) { success = GetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued, but see if it is not a empty package * meant only to wake us up. */ if (overlappeds[i].lpOverlapped) { req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } } /* Some time might have passed waiting for I/O, * so update the loop time here. */ uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } else if (timeout > 0) { /* GetQueuedCompletionStatus can occasionally return a little early. * Make sure that the desired timeout target time is reached. */ uv_update_time(loop); if (timeout_time > loop->time) { timeout = (DWORD)(timeout_time - loop->time); /* The first call to GetQueuedCompletionStatus should return very * close to the target time and the second should reach it, but * this is not stated in the documentation. To make sure a busy * loop cannot happen, the timeout is increased exponentially * starting on the third round. */ timeout += repeat ? (1 << (repeat - 1)) : 0; continue; } } break; } }
static void uv_queue_non_overlapped_write(uv_pipe_t* handle) { uv_write_t* req = uv_remove_non_overlapped_write_req(handle); if (req) { if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc, req, WT_EXECUTELONGFUNCTION)) { uv_fatal_error(GetLastError(), "QueueUserWorkItem"); } } }
static void uv__init_overlapped_dummy(void) { HANDLE event; event = CreateEvent(NULL, TRUE, TRUE, NULL); if (event == NULL) uv_fatal_error(GetLastError(), "CreateEvent"); memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_); overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1); }
void uv_winapi_init() { HMODULE module; module = GetModuleHandleA("ntdll.dll"); if (module == NULL) { uv_fatal_error(GetLastError(), "GetModuleHandleA"); } pRtlNtStatusToDosError = (sRtlNtStatusToDosError) GetProcAddress(module, "RtlNtStatusToDosError"); if (pRtlNtStatusToDosError == NULL) { uv_fatal_error(GetLastError(), "GetProcAddress"); } pNtQueryInformationFile = (sNtQueryInformationFile) GetProcAddress(module, "NtQueryInformationFile"); if (pNtQueryInformationFile == NULL) { uv_fatal_error(GetLastError(), "GetProcAddress"); } }
static int uv_split_path(const WCHAR* filename, WCHAR** dir, WCHAR** file) { int len = wcslen(filename); int i = len; while (i > 0 && filename[--i] != '\\' && filename[i] != '/'); if (i == 0) { if (dir) { *dir = (WCHAR*)uv__malloc((MAX_PATH + 1) * sizeof(WCHAR)); if (!*dir) { uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); } if (!GetCurrentDirectoryW(MAX_PATH, *dir)) { uv__free(*dir); *dir = NULL; return -1; } } *file = wcsdup(filename); } else { if (dir) { *dir = (WCHAR*)uv__malloc((i + 2) * sizeof(WCHAR)); if (!*dir) { uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); } wcsncpy(*dir, filename, i + 1); (*dir)[i + 1] = L'\0'; } *file = (WCHAR*)uv__malloc((len - i) * sizeof(WCHAR)); if (!*file) { uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); } wcsncpy(*file, filename + i + 1, len - i - 1); (*file)[len - i - 1] = L'\0'; } return 0; }
static void uv__init_global_job_handle(void) { /* Create a job object and set it up to kill all contained processes when * it's closed. Since this handle is made non-inheritable and we're not * giving it to anyone, we're the only process holding a reference to it. * That means that if this process exits it is closed and all the processes * it contains are killed. All processes created with uv_spawn that are not * spawned with the UV_PROCESS_DETACHED flag are assigned to this job. * * We're setting the JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag so only the * processes that we explicitly add are affected, and *their* subprocesses * are not. This ensures that our child processes are not limited in their * ability to use job control on Windows versions that don't deal with * nested jobs (prior to Windows 8 / Server 2012). It also lets our child * processes created detached processes without explicitly breaking away * from job control (which uv_spawn doesn't, either). */ SECURITY_ATTRIBUTES attr; JOBOBJECT_EXTENDED_LIMIT_INFORMATION info; memset(&attr, 0, sizeof attr); attr.bInheritHandle = FALSE; memset(&info, 0, sizeof info); info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_BREAKAWAY_OK | JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK | JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION | JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; uv_global_job_handle_ = CreateJobObjectW(&attr, NULL); if (uv_global_job_handle_ == NULL) uv_fatal_error(GetLastError(), "CreateJobObjectW"); if (!SetInformationJobObject(uv_global_job_handle_, JobObjectExtendedLimitInformation, &info, sizeof info)) uv_fatal_error(GetLastError(), "SetInformationJobObject"); }
static int uv_relative_path(const WCHAR* filename, const WCHAR* dir, WCHAR** relpath) { int dirlen = wcslen(dir); int filelen = wcslen(filename); if (dir[dirlen - 1] == '\\') dirlen--; *relpath = uv__malloc((MAX_PATH + 1) * sizeof(WCHAR)); if (!*relpath) uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); wcsncpy(*relpath, filename + dirlen + 1, filelen - dirlen - 1); (*relpath)[filelen - dirlen - 1] = L'\0'; return 0; }
static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { DWORD bytes; ULONG_PTR key; OVERLAPPED* overlapped; uv_req_t* req; int repeat; uint64_t timeout_time; timeout_time = loop->time + timeout; for (repeat = 0; ; repeat++) { GetQueuedCompletionStatus(loop->iocp, &bytes, &key, &overlapped, timeout); if (overlapped) { /* Package was dequeued */ req = uv_overlapped_to_req(overlapped); uv_insert_pending_req(loop, req); /* Some time might have passed waiting for I/O, * so update the loop time here. */ uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); } else if (timeout > 0) { /* GetQueuedCompletionStatus can occasionally return a little early. * Make sure that the desired timeout target time is reached. */ uv_update_time(loop); if (timeout_time > loop->time) { timeout = (DWORD)(timeout_time - loop->time); /* The first call to GetQueuedCompletionStatus should return very * close to the target time and the second should reach it, but * this is not stated in the documentation. To make sure a busy * loop cannot happen, the timeout is increased exponentially * starting on the third round. */ timeout += repeat ? (1 << (repeat - 1)) : 0; continue; } } break; } }
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) { uv_loop_t* loop = handle->loop; unsigned int i; uv_tcp_accept_t* req; assert(backlog > 0); if (handle->flags & UV_HANDLE_BIND_ERROR) { uv__set_sys_error(loop, handle->bind_error); return -1; } if (handle->flags & UV_HANDLE_LISTENING || handle->flags & UV_HANDLE_READING) { /* Already listening. */ uv__set_sys_error(loop, WSAEALREADY); return -1; } if (!(handle->flags & UV_HANDLE_BOUND) && uv_tcp_bind(handle, uv_addr_ip4_any_) < 0) return -1; if (listen(handle->socket, backlog) == SOCKET_ERROR) { uv__set_sys_error(loop, WSAGetLastError()); return -1; } handle->flags |= UV_HANDLE_LISTENING; handle->connection_cb = cb; assert(!handle->accept_reqs); handle->accept_reqs = (uv_tcp_accept_t*) malloc(uv_simultaneous_server_accepts * sizeof(uv_tcp_accept_t)); if (!handle->accept_reqs) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } for (i = 0; i < uv_simultaneous_server_accepts; i++) { req = &handle->accept_reqs[i]; uv_req_init(loop, (uv_req_t*)req); req->type = UV_ACCEPT; req->accept_socket = INVALID_SOCKET; req->data = handle; uv_tcp_queue_accept(handle, req); } return 0; }
uv_loop_t* uv_loop_new(void) { uv_loop_t* loop; /* Initialize libuv itself first */ uv_once(&uv_init_guard_, uv_init); loop = (uv_loop_t*)malloc(sizeof(uv_loop_t)); if (!loop) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } uv_loop_init(loop); return loop; }
static void uv_relative_path(const WCHAR* filename, const WCHAR* dir, WCHAR** relpath) { size_t relpathlen; size_t filenamelen = wcslen(filename); size_t dirlen = wcslen(dir); if (dirlen > 0 && dir[dirlen - 1] == '\\') dirlen--; relpathlen = filenamelen - dirlen - 1; *relpath = uv__malloc((relpathlen + 1) * sizeof(WCHAR)); if (!*relpath) uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); wcsncpy(*relpath, filename + dirlen + 1, relpathlen); (*relpath)[relpathlen] = L'\0'; }
static uv_pipe_t* uv_make_pipe_for_std_handle(uv_loop_t* loop, HANDLE handle) { uv_pipe_t* pipe = NULL; pipe = (uv_pipe_t*)malloc(sizeof(uv_pipe_t)); if (!pipe) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } if (uv_pipe_init_with_handle(loop, pipe, handle)) { free(pipe); return NULL; } pipe->flags |= UV_HANDLE_UV_ALLOCED; return pipe; }
int uv_set_process_title(const char* title) { int err; int length; WCHAR* title_w = NULL; uv__once_init(); /* Find out how big the buffer for the wide-char title must be */ length = uv_utf8_to_utf16(title, NULL, 0); if (!length) { err = GetLastError(); goto done; } /* Convert to wide-char string */ title_w = (WCHAR*)malloc(sizeof(WCHAR) * length); if (!title_w) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } length = uv_utf8_to_utf16(title, title_w, length); if (!length) { err = GetLastError(); goto done; } /* If the title must be truncated insert a \0 terminator there */ if (length > MAX_TITLE_LENGTH) { title_w[MAX_TITLE_LENGTH - 1] = L'\0'; } if (!SetConsoleTitleW(title_w)) { err = GetLastError(); goto done; } EnterCriticalSection(&process_title_lock); free(process_title); process_title = strdup(title); LeaveCriticalSection(&process_title_lock); err = 0; done: free(title_w); return uv_translate_sys_error(err); }
/* Allocates and returns a new uv_ares_task_t */ static uv_ares_task_t* uv__ares_task_create(int fd) { uv_ares_task_t* h = malloc(sizeof(uv_ares_task_t)); if (h == NULL) { uv_fatal_error(ENOMEM, "malloc"); } h->sock = fd; ev_io_init(&h->read_watcher, uv__ares_io, fd, EV_READ); ev_io_init(&h->write_watcher, uv__ares_io, fd, EV_WRITE); h->read_watcher.data = h; h->write_watcher.data = h; return h; }
/* thread pool callback when socket is signalled */ static void CALLBACK uv_ares_socksignal_tp(void* parameter, BOOLEAN timerfired) { WSANETWORKEVENTS network_events; uv_ares_task_t* sockhandle; uv_ares_action_t* selhandle; uv_req_t* uv_ares_req; uv_loop_t* loop; assert(parameter != NULL); if (parameter != NULL) { sockhandle = (uv_ares_task_t*) parameter; loop = sockhandle->loop; /* clear socket status for this event */ /* do not fail if error, thread may run after socket close */ /* The code assumes that c-ares will write all pending data in the */ /* callback, unless the socket would block. We can clear the state here */ /* to avoid unnecessary signals. */ WSAEnumNetworkEvents(sockhandle->sock, sockhandle->h_event, &network_events); /* setup new handle */ selhandle = (uv_ares_action_t*)malloc(sizeof(uv_ares_action_t)); if (selhandle == NULL) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } selhandle->type = UV_ARES_EVENT; selhandle->close_cb = NULL; selhandle->data = sockhandle->data; selhandle->sock = sockhandle->sock; selhandle->read = (network_events.lNetworkEvents & (FD_READ | FD_CONNECT)) ? 1 : 0; selhandle->write = (network_events.lNetworkEvents & (FD_WRITE | FD_CONNECT)) ? 1 : 0; uv_ares_req = &selhandle->ares_req; uv_req_init(loop, uv_ares_req); uv_ares_req->type = UV_ARES_EVENT_REQ; uv_ares_req->data = selhandle; /* post ares needs to called */ POST_COMPLETION_FOR_REQ(loop, uv_ares_req); } }
static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) { uv_write_t* req; uv_tcp_t* handle; req = (uv_write_t*) context; assert(req != NULL); handle = (uv_tcp_t*)req->handle; assert(handle != NULL); assert(!timed_out); if (!PostQueuedCompletionStatus(handle->loop->iocp, req->overlapped.InternalHigh, 0, &req->overlapped)) { uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); } }
static DWORD WINAPI pipe_connect_thread_proc(void* parameter) { HANDLE pipeHandle = INVALID_HANDLE_VALUE; int errno; uv_pipe_t* handle; uv_connect_t* req; req = (uv_connect_t*)parameter; assert(req); handle = (uv_pipe_t*)req->handle; assert(handle); /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait for the pipe to become available with WaitNamedPipe. */ while (WaitNamedPipeW(handle->name, 30000)) { /* The pipe is now available, try to connect. */ pipeHandle = CreateFileW(handle->name, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL); if (pipeHandle != INVALID_HANDLE_VALUE) { break; } } if (pipeHandle != INVALID_HANDLE_VALUE && !uv_set_pipe_handle(handle, pipeHandle)) { handle->handle = pipeHandle; req->error = uv_ok_; } else { req->error = uv_new_sys_error(GetLastError()); } memset(&req->overlapped, 0, sizeof(req->overlapped)); /* Post completed */ if (!PostQueuedCompletionStatus(LOOP->iocp, 0, 0, &req->overlapped)) { uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); } return 0; }
void fs__fstat(uv_fs_t* req, uv_file file) { int result; req->ptr = malloc(sizeof(struct _stat)); if (!req->ptr) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } result = _fstat(file, (struct _stat*)req->ptr); if (result == -1) { free(req->ptr); req->ptr = NULL; } else { req->flags |= UV_FS_FREE_PTR; } SET_REQ_RESULT(req, result); }