static void uv_poll_ex(uv_loop_t* loop, int block) { BOOL success; DWORD timeout; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[64]; ULONG count; ULONG i; if (block) { timeout = uv_get_poll_timeout(loop); } else { timeout = 0; } assert(pGetQueuedCompletionStatusEx); success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, COUNTOF(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued */ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } }
static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) { BOOL success; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; ULONG count; ULONG i; success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued */ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } /* Some time might have passed waiting for I/O, * so update the loop time here. */ uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } else if (timeout > 0) { /* GetQueuedCompletionStatus can occasionally return a little early. * Make sure that the desired timeout is reflected in the loop time. */ uv__time_forward(loop, timeout); } }
void uv_run_blockingwork(uv_run_state* state) { BOOL success; // Check for pending events. if (pGetQueuedCompletionStatusEx) { success = pGetQueuedCompletionStatusEx( state->loop->iocp, state->overlappeds, 128, &state->count, state->timeout, FALSE); if (!success) state->count = 0; } else { success = GetQueuedCompletionStatus( state->loop->iocp, &state->overlappeds[0].dwNumberOfBytesTransferred, &state->overlappeds[0].lpCompletionKey, &state->overlappeds[0].lpOverlapped, state->timeout); if (state->overlappeds[0].lpOverlapped) state->count = 1; } if (!success && GetLastError() != WAIT_TIMEOUT) { // Serious error uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } }
static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) { BOOL success; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; ULONG count; ULONG i; int repeat; uint64_t timeout_time; timeout_time = loop->time + timeout; for (repeat = 0; ; repeat++) { success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued */ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); uv_insert_pending_req(loop, req); } /* Some time might have passed waiting for I/O, * so update the loop time here. */ uv_update_time(loop); } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } else if (timeout > 0) { /* GetQueuedCompletionStatus can occasionally return a little early. * Make sure that the desired timeout target time is reached. */ uv_update_time(loop); if (timeout_time > loop->time) { timeout = (DWORD)(timeout_time - loop->time); /* The first call to GetQueuedCompletionStatus should return very * close to the target time and the second should reach it, but * this is not stated in the documentation. To make sure a busy * loop cannot happen, the timeout is increased exponentially * starting on the third round. */ timeout += repeat ? (1 << (repeat - 1)) : 0; continue; } } break; } }
static void uv_poll_ex(int block) { BOOL success; DWORD timeout; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[64]; ULONG count; ULONG i; if (block) { timeout = uv_get_poll_timeout(); } else { timeout = 0; } assert(pGetQueuedCompletionStatusEx); success = pGetQueuedCompletionStatusEx(LOOP->iocp, overlappeds, COUNTOF(overlappeds), &count, timeout, FALSE); if (success) { for (i = 0; i < count; i++) { /* Package was dequeued */ req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); if (overlappeds[i].lpOverlapped->Internal != STATUS_SUCCESS) { req->error = uv_new_sys_error(pRtlNtStatusToDosError( overlappeds[i].lpOverlapped->Internal)); } uv_insert_pending_req(req); } } else if (GetLastError() != WAIT_TIMEOUT) { /* Serious error */ uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); } }
static struct event * win32iocp_process (struct event_queue *evq, struct event *ev_ready, msec_t now) { const HANDLE iocph = evq->iocp.h; OVERLAPPED_ENTRY entries[NENTRY]; ULONG nentries = 0; for (; ; ) { struct win32overlapped *ov; struct event *ev; BOOL status; int cancelled = 0; if (pGetQueuedCompletionStatusEx) { if (!nentries && !pGetQueuedCompletionStatusEx(iocph, entries, NENTRY, &nentries, 0L, FALSE)) break; { const OVERLAPPED_ENTRY *ove = &entries[--nentries]; const DWORD err = (DWORD) ove->lpOverlapped->Internal; ov = (struct win32overlapped *) ove->lpOverlapped; status = !err; cancelled = (err == STATUS_CANCELLED); } } else { ULONG_PTR key; DWORD nr; status = GetQueuedCompletionStatus(iocph, &nr, &key, (OVERLAPPED **) &ov, 0L); if (!status) { const DWORD err = GetLastError(); if (err == WAIT_TIMEOUT) break; cancelled = (err == ERROR_OPERATION_ABORTED); } } if (!ov) { if (pGetQueuedCompletionStatusEx) continue; break; /* error */ } ev = ov->ev; cancelled = ev ? cancelled : 1; win32iocp_del_overlapped(evq, ov); if (cancelled) continue; if (!status) ev->flags |= EVENT_EOF_RES; else if (ov == ev->w.iocp.rov) { ev->w.iocp.rov = NULL; ev->flags |= EVENT_READ_RES; ev->flags &= ~EVENT_RPENDING; /* have to set IOCP read request */ } else { ev->w.iocp.wov = NULL; ev->flags |= EVENT_WRITE_RES; ev->flags &= ~EVENT_WPENDING; /* have to set IOCP write request */ } if (ev->flags & EVENT_ACTIVE) continue; ev->flags |= EVENT_ACTIVE; if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) { if (now == 0L) { now = evq->now = sys_milliseconds(); } timeout_reset(ev, now); } ev->next_ready = ev_ready; ev_ready = ev; } return ev_ready; }