static size_t uv__write_req_size(uv_write_t* req) { size_t size; assert(req->bufs != NULL); size = uv__count_bufs(req->bufs + req->write_index, req->nbufs - req->write_index); assert(req->handle->write_queue_size >= size); return size; }
int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb send_cb) { int err; int empty_queue; assert(nbufs > 0); err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); if (err) return err; /* It's legal for send_queue_count > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up send_queue_size/count later. */ empty_queue = (handle->send_queue_count == 0); uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->nbufs = nbufs; req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count++; QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); uv__handle_start(handle); if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) { uv__udp_sendmsg(handle); } else { uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); } return 0; }
static int uv__send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb cb) { uv_loop_t* loop = handle->loop; DWORD result, bytes; uv_req_init(loop, (uv_req_t*) req); req->type = UV_UDP_SEND; req->handle = handle; req->cb = cb; memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); result = WSASendTo(handle->socket, (WSABUF*)bufs, nbufs, &bytes, 0, addr, addrlen, &req->u.io.overlapped, NULL); if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { /* Request completed immediately. */ req->u.io.queued_bytes = 0; handle->reqs_pending++; handle->send_queue_size += req->u.io.queued_bytes; handle->send_queue_count++; REGISTER_HANDLE_REQ(loop, handle, req); uv_insert_pending_req(loop, (uv_req_t*)req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs); handle->reqs_pending++; handle->send_queue_size += req->u.io.queued_bytes; handle->send_queue_count++; REGISTER_HANDLE_REQ(loop, handle, req); } else { /* Send failed due to an error. */ return WSAGetLastError(); } return 0; }
int uv_try_write(uv_stream_t* stream, const uv_buf_t bufs[], unsigned int nbufs) { int r; int has_pollout; size_t written; size_t req_size; uv_write_t req; /* Connecting or already writing some data */ if (stream->connect_req != NULL || stream->write_queue_size != 0) return -EAGAIN; has_pollout = uv__io_active(&stream->io_watcher, UV__POLLOUT); r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb); if (r != 0) return r; /* Remove not written bytes from write queue size */ written = uv__count_bufs(bufs, nbufs); if (req.bufs != NULL) req_size = uv__write_req_size(&req); else req_size = 0; written -= req_size; stream->write_queue_size -= req_size; /* Unqueue request, regardless of immediateness */ QUEUE_REMOVE(&req.queue); uv__req_unregister(stream->loop, &req); if (req.bufs != req.bufsml) free(req.bufs); req.bufs = NULL; /* Do not poll for writable, if we wasn't before calling this */ if (!has_pollout) { uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); } if (written == 0) return -EAGAIN; else return written; }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!(handle->flags & UV_UDP_PROCESSING)); handle->flags |= UV_UDP_PROCESSING; while (!QUEUE_EMPTY(&handle->write_completed_queue)) { q = QUEUE_HEAD(&handle->write_completed_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count--; if (req->bufs != req->bufsml) uv__free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) req->send_cb(req, 0); else req->send_cb(req, req->status); } if (QUEUE_EMPTY(&handle->write_queue)) { /* Pending queue and completion queue empty, stop watcher. */ uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLOUT); if (!uv__io_active(&handle->io_watcher, UV__POLLIN)) uv__handle_stop(handle); } handle->flags &= ~UV_UDP_PROCESSING; }
int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb) { int result; DWORD bytes; uv_req_init(loop, (uv_req_t*) req); req->type = UV_WRITE; req->handle = (uv_stream_t*) handle; req->cb = cb; /* Prepare the overlapped structure. */ memset(&(req->overlapped), 0, sizeof(req->overlapped)); if (handle->flags & UV_HANDLE_EMULATE_IOCP) { req->event_handle = CreateEvent(NULL, 0, 0, NULL); if (!req->event_handle) { uv_fatal_error(GetLastError(), "CreateEvent"); } req->overlapped.hEvent = (HANDLE) ((DWORD) req->event_handle | 1); req->wait_handle = INVALID_HANDLE_VALUE; } result = WSASend(handle->socket, (WSABUF*) bufs, nbufs, &bytes, 0, &req->overlapped, NULL); if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { /* Request completed immediately. */ req->queued_bytes = 0; handle->reqs_pending++; handle->write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); uv_insert_pending_req(loop, (uv_req_t*) req); } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { /* Request queued by the kernel. */ req->queued_bytes = uv__count_bufs(bufs, nbufs); handle->reqs_pending++; handle->write_reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); handle->write_queue_size += req->queued_bytes; if (handle->flags & UV_HANDLE_EMULATE_IOCP && !pRegisterWaitForSingleObject(&req->wait_handle, req->event_handle, post_write_completion, (void*) req, INFINITE, WT_EXECUTEINWAITTHREAD | 0x00000008/*WT_EXECUTEONLYONCE*/)) { SET_REQ_ERROR(req, GetLastError()); uv_insert_pending_req(loop, (uv_req_t*)req); } } else { /* Send failed due to an error. */ return WSAGetLastError(); } return 0; }
int uv_write2(uv_write_t* req, uv_stream_t* stream, const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle, uv_write_cb cb) { int empty_queue; assert(nbufs > 0); assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || stream->type == UV_TTY) && "uv_write (unix) does not yet support other types of streams"); if (uv__stream_fd(stream) < 0) return -EBADF; if (send_handle) { if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) return -EINVAL; /* XXX We abuse uv_write2() to send over UDP handles to child processes. * Don't call uv__stream_fd() on those handles, it's a macro that on OS X * evaluates to a function that operates on a uv_stream_t with a couple of * OS X specific fields. On other Unices it does (handle)->io_watcher.fd, * which works but only by accident. */ if (uv__handle_fd((uv_handle_t*) send_handle) < 0) return -EBADF; } /* It's legal for write_queue_size > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up write_queue_size later, see also uv__write_req_finish(). * We could check that write_queue is empty instead but that implies making * a write() syscall when we know that the handle is in error mode. */ empty_queue = (stream->write_queue_size == 0); /* Initialize the req */ uv__req_init(stream->loop, req, UV_WRITE); req->cb = cb; req->handle = stream; req->error = 0; req->send_handle = send_handle; QUEUE_INIT(&req->queue); req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = (uv_buf_t*)malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); req->nbufs = nbufs; req->write_index = 0; stream->write_queue_size += uv__count_bufs(bufs, nbufs); /* Append the request to write_queue. */ QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); /* If the queue was empty when this function began, we should attempt to * do the write immediately. Otherwise start the write_watcher and wait * for the fd to become writable. */ if (stream->connect_req) { /* Still connecting, do nothing. */ } else if (empty_queue) { uv__write(stream); } else { /* * blocking streams should never have anything in the queue. * if this assert fires then somehow the blocking stream isn't being * sufficiently flushed in uv__write. */ assert(!(stream->flags & UV_STREAM_BLOCKING)); uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT); } return 0; }