int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb) { int result; DWORD bytes; uv_req_init(loop, (uv_req_t*) req); req->type = UV_WRITE; req->handle = (uv_stream_t*) handle; req->cb = cb; /* Prepare the overlapped structure. */ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { req->event_handle = CreateEvent(NULL, 0, 0, NULL); if (!req->event_handle) { uv_fatal_error(GetLastError(), "CreateEvent"); } req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); req->wait_handle = INVALID_HANDLE_VALUE; } result = WSASend(handle->socket, (WSABUF*) bufs, nbufs, &bytes, 0, &req->u.io.overlapped, NULL); if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { /* Request completed immediately. */ req->u.io.queued_bytes = 0; handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); uv_insert_pending_req(loop, (uv_req_t*) req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs); handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); handle->write_queue_size += req->u.io.queued_bytes; if (handle->flags & UV_HANDLE_EMULATE_IOCP && !RegisterWaitForSingleObject(&req->wait_handle, req->event_handle, post_write_completion, (void*) req, INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) { SET_REQ_ERROR(req, GetLastError()); uv_insert_pending_req(loop, (uv_req_t*)req); } } else { /* Send failed due to an error, report it later */ req->u.io.queued_bytes = 0; handle->reqs_pending++; handle->stream.conn.write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); SET_REQ_ERROR(req, WSAGetLastError()); uv_insert_pending_req(loop, (uv_req_t*) req); } return 0; }
void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) { uv_loop_t* loop = handle->loop; int errorno, nameSize; HANDLE pipeHandle = INVALID_HANDLE_VALUE; DWORD duplex_flags; uv_req_init(loop, (uv_req_t*) req); req->type = UV_CONNECT; req->handle = (uv_stream_t*) handle; req->cb = cb; /* Convert name to UTF16. */ nameSize = uv_utf8_to_utf16(name, NULL, 0) * sizeof(WCHAR); handle->name = (WCHAR*)malloc(nameSize); if (!handle->name) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } if (!uv_utf8_to_utf16(name, handle->name, nameSize / sizeof(WCHAR))) { errorno = GetLastError(); goto error; } pipeHandle = open_named_pipe(handle->name, &duplex_flags); if (pipeHandle == INVALID_HANDLE_VALUE) { if (GetLastError() == ERROR_PIPE_BUSY) { /* Wait for the server to make a pipe instance available. */ if (!QueueUserWorkItem(&pipe_connect_thread_proc, req, WT_EXECUTELONGFUNCTION)) { errorno = GetLastError(); goto error; } REGISTER_HANDLE_REQ(loop, handle, req); handle->reqs_pending++; return; } errorno = GetLastError(); goto error; } assert(pipeHandle != INVALID_HANDLE_VALUE); if (uv_set_pipe_handle(loop, (uv_pipe_t*) req->handle, pipeHandle, duplex_flags)) { errorno = GetLastError(); goto error; } SET_REQ_SUCCESS(req); uv_insert_pending_req(loop, (uv_req_t*) req); handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); return; error: if (handle->name) { free(handle->name); handle->name = NULL; } if (pipeHandle != INVALID_HANDLE_VALUE) { CloseHandle(pipeHandle); } /* Make this req pending reporting an error. */ SET_REQ_ERROR(req, errorno); uv_insert_pending_req(loop, (uv_req_t*) req); handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); return; }
static int uv_tcp_try_connect(uv_connect_t* req, uv_tcp_t* handle, const struct sockaddr* addr, unsigned int addrlen, uv_connect_cb cb) { uv_loop_t* loop = handle->loop; const struct sockaddr* bind_addr; BOOL success; DWORD bytes; int err; if (handle->delayed_error) { return handle->delayed_error; } if (!(handle->flags & UV_HANDLE_BOUND)) { if (addrlen == sizeof(uv_addr_ip4_any_)) { bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_; } else if (addrlen == sizeof(uv_addr_ip6_any_)) { bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_; } else { abort(); } err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0); if (err) return err; if (handle->delayed_error) return handle->delayed_error; } if (!handle->tcp.conn.func_connectex) { if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) { return WSAEAFNOSUPPORT; } } uv_req_init(loop, (uv_req_t*) req); req->type = UV_CONNECT; req->handle = (uv_stream_t*) handle; req->cb = cb; memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); success = handle->tcp.conn.func_connectex(handle->socket, addr, addrlen, NULL, 0, &bytes, &req->u.io.overlapped); if (UV_SUCCEEDED_WITHOUT_IOCP(success)) { /* Process the req without IOCP. */ handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); uv_insert_pending_req(loop, (uv_req_t*)req); } else if (UV_SUCCEEDED_WITH_IOCP(success)) { /* The req will be processed with IOCP. */ handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); } else { return WSAGetLastError(); } return 0; }
static int uv_pipe_write_impl(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle, uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb) { int result; uv_tcp_t* tcp_send_handle; uv_write_t* ipc_header_req; uv_ipc_frame_uv_stream ipc_frame; if (bufcnt != 1 && (bufcnt != 0 || !send_handle)) { uv__set_artificial_error(loop, UV_ENOTSUP); return -1; } /* Only TCP handles are supported for sharing. */ if (send_handle && ((send_handle->type != UV_TCP) || (!(send_handle->flags & UV_HANDLE_BOUND) && !(send_handle->flags & UV_HANDLE_CONNECTION)))) { uv__set_artificial_error(loop, UV_ENOTSUP); return -1; } assert(handle->handle != INVALID_HANDLE_VALUE); uv_req_init(loop, (uv_req_t*) req); req->type = UV_WRITE; req->handle = (uv_stream_t*) handle; req->cb = cb; req->ipc_header = 0; req->event_handle = NULL; req->wait_handle = INVALID_HANDLE_VALUE; memset(&req->overlapped, 0, sizeof(req->overlapped)); if (handle->ipc) { assert(!(handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)); ipc_frame.header.flags = 0; /* Use the IPC framing protocol. */ if (send_handle) { tcp_send_handle = (uv_tcp_t*)send_handle; if (uv_tcp_duplicate_socket(tcp_send_handle, handle->ipc_pid, &ipc_frame.socket_info)) { return -1; } ipc_frame.header.flags |= UV_IPC_TCP_SERVER; if (tcp_send_handle->flags & UV_HANDLE_CONNECTION) { ipc_frame.header.flags |= UV_IPC_TCP_CONNECTION; } } if (bufcnt == 1) { ipc_frame.header.flags |= UV_IPC_RAW_DATA; ipc_frame.header.raw_data_length = bufs[0].len; } /* * Use the provided req if we're only doing a single write. * If we're doing multiple writes, use ipc_header_write_req to do * the first write, and then use the provided req for the second write. */ if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) { ipc_header_req = req; } else { /* * Try to use the preallocated write req if it's available. * Otherwise allocate a new one. */ if (handle->ipc_header_write_req.type != UV_WRITE) { ipc_header_req = (uv_write_t*)&handle->ipc_header_write_req; } else { ipc_header_req = (uv_write_t*)malloc(sizeof(uv_write_t)); if (!ipc_header_req) { uv_fatal_error(ERROR_OUTOFMEMORY, "malloc"); } } uv_req_init(loop, (uv_req_t*) ipc_header_req); ipc_header_req->type = UV_WRITE; ipc_header_req->handle = (uv_stream_t*) handle; ipc_header_req->cb = NULL; ipc_header_req->ipc_header = 1; } /* Write the header or the whole frame. */ memset(&ipc_header_req->overlapped, 0, sizeof(ipc_header_req->overlapped)); result = WriteFile(handle->handle, &ipc_frame, ipc_frame.header.flags & UV_IPC_TCP_SERVER ? sizeof(ipc_frame) : sizeof(ipc_frame.header), NULL, &ipc_header_req->overlapped); if (!result && GetLastError() != ERROR_IO_PENDING) { uv__set_sys_error(loop, GetLastError()); return -1; } if (result) { /* Request completed immediately. */ ipc_header_req->queued_bytes = 0; } else { /* Request queued by the kernel. */ ipc_header_req->queued_bytes = ipc_frame.header.flags & UV_IPC_TCP_SERVER ? sizeof(ipc_frame) : sizeof(ipc_frame.header); handle->write_queue_size += ipc_header_req->queued_bytes; } REGISTER_HANDLE_REQ(loop, handle, ipc_header_req); handle->reqs_pending++; handle->write_reqs_pending++; /* If we don't have any raw data to write - we're done. */ if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) { return 0; } } if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) { req->write_buffer = bufs[0]; uv_insert_non_overlapped_write_req(handle, req); if (handle->write_reqs_pending == 0) { uv_queue_non_overlapped_write(handle); } /* Request queued by the kernel. */ req->queued_bytes = uv_count_bufs(bufs, bufcnt); handle->write_queue_size += req->queued_bytes; } else { result = WriteFile(handle->handle, bufs[0].base, bufs[0].len, NULL, &req->overlapped); if (!result && GetLastError() != ERROR_IO_PENDING) { uv__set_sys_error(loop, GetLastError()); return -1; } if (result) { /* Request completed immediately. */ req->queued_bytes = 0; } else { /* Request queued by the kernel. */ req->queued_bytes = uv_count_bufs(bufs, bufcnt); handle->write_queue_size += req->queued_bytes; } if (handle->flags & UV_HANDLE_EMULATE_IOCP) { req->event_handle = CreateEvent(NULL, 0, 0, NULL); if (!req->event_handle) { uv_fatal_error(GetLastError(), "CreateEvent"); } if (!RegisterWaitForSingleObject(&req->wait_handle, req->overlapped.hEvent, post_completion_write_wait, (void*) req, INFINITE, WT_EXECUTEINWAITTHREAD)) { uv__set_sys_error(loop, GetLastError()); return -1; } } } REGISTER_HANDLE_REQ(loop, handle, req); handle->reqs_pending++; handle->write_reqs_pending++; return 0; }