static void uv__write(uv_stream_t* stream) { struct iovec* iov; QUEUE* q; uv_write_t* req; int iovmax; int iovcnt; ssize_t n; start: assert(uv__stream_fd(stream) >= 0); if (QUEUE_EMPTY(&stream->write_queue)) return; q = QUEUE_HEAD(&stream->write_queue); req = QUEUE_DATA(q, uv_write_t, queue); assert(req->handle == stream); /* * Cast to iovec. We had to have our own uv_buf_t instead of iovec * because Windows's WSABUF is not an iovec. */ assert(sizeof(uv_buf_t) == sizeof(struct iovec)); iov = (struct iovec*) &(req->bufs[req->write_index]); iovcnt = req->nbufs - req->write_index; iovmax = uv__getiovmax(); /* Limit iov count to avoid EINVALs from writev() */ if (iovcnt > iovmax) iovcnt = iovmax; /* * Now do the actual writev. Note that we've been updating the pointers * inside the iov each time we write. So there is no need to offset it. */ do { if (iovcnt == 1) { n = tuvp_write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len); } else { n = tuvp_writev(uv__stream_fd(stream), iov, iovcnt); } } while (n == -1 && errno == EINTR) ; if (n < 0) { if (errno != EAGAIN && errno != EWOULDBLOCK) { /* Error */ req->error = -errno; uv__write_req_finish(req); uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); if (!uv__io_active(&stream->io_watcher, UV__POLLIN)) uv__handle_stop(stream); return; } else if (stream->flags & UV_STREAM_BLOCKING) { /* If this is a blocking stream, try again. */ goto start; } } else { /* Successful write */ while (n >= 0) { uv_buf_t* buf = &(req->bufs[req->write_index]); size_t len = buf->len; assert(req->write_index < req->nbufs); if ((size_t)n < len) { buf->base += n; buf->len -= n; stream->write_queue_size -= n; n = 0; /* There is more to write. */ if (stream->flags & UV_STREAM_BLOCKING) { /* * If we're blocking then we should not be enabling the write * watcher - instead we need to try again. */ goto start; } else { /* Break loop and ensure the watcher is pending. */ break; } } else { /* Finished writing the buf at index req->write_index. */ req->write_index++; assert((size_t)n >= len); n -= len; assert(stream->write_queue_size >= len); stream->write_queue_size -= len; if (req->write_index == req->nbufs) { /* Then we're done! */ assert(n == 0); uv__write_req_finish(req); /* TODO: start trying to write the next request. */ return; } } } } /* Either we've counted n down to zero or we've got EAGAIN. */ assert(n == 0 || n == -1); /* Only non-blocking streams should use the write_watcher. */ assert(!(stream->flags & UV_STREAM_BLOCKING)); /* We're not done. */ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT); }
/* On success returns NULL. On error returns a pointer to the write request * which had the error. */ static void uv__write(uv_stream_t* stream) { uv_write_t* req; struct iovec* iov; int iovcnt; ssize_t n; if (stream->flags & UV_CLOSING) { /* Handle was closed this tick. We've received a stale * 'is writable' callback from the event loop, ignore. */ return; } start: assert(uv__stream_fd(stream) >= 0); /* Get the request at the head of the queue. */ req = uv_write_queue_head(stream); if (!req) { assert(stream->write_queue_size == 0); return; } assert(req->handle == stream); /* * Cast to iovec. We had to have our own uv_buf_t instead of iovec * because Windows's WSABUF is not an iovec. */ assert(sizeof(uv_buf_t) == sizeof(struct iovec)); iov = (struct iovec*) &(req->bufs[req->write_index]); iovcnt = req->bufcnt - req->write_index; /* * Now do the actual writev. Note that we've been updating the pointers * inside the iov each time we write. So there is no need to offset it. */ if (req->send_handle) { struct msghdr msg; char scratch[64]; struct cmsghdr *cmsg; int fd_to_send = req->send_handle->io_watcher.fd; assert(fd_to_send >= 0); msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = iovcnt; msg.msg_flags = 0; msg.msg_control = (void*) scratch; msg.msg_controllen = CMSG_LEN(sizeof(fd_to_send)); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = msg.msg_controllen; /* silence aliasing warning */ { void* pv = CMSG_DATA(cmsg); int* pi = pv; *pi = fd_to_send; } do { n = sendmsg(uv__stream_fd(stream), &msg, 0); } while (n == -1 && errno == EINTR); } else { do { if (iovcnt == 1) { n = write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len); } else { n = writev(uv__stream_fd(stream), iov, iovcnt); } } while (n == -1 && errno == EINTR); } if (n < 0) { if (errno != EAGAIN && errno != EWOULDBLOCK) { /* Error */ req->error = errno; stream->write_queue_size -= uv__write_req_size(req); uv__write_req_finish(req); return; } else if (stream->flags & UV_STREAM_BLOCKING) { /* If this is a blocking stream, try again. */ goto start; } } else { /* Successful write */ while (n >= 0) { uv_buf_t* buf = &(req->bufs[req->write_index]); size_t len = buf->len; assert(req->write_index < req->bufcnt); if ((size_t)n < len) { buf->base += n; buf->len -= n; stream->write_queue_size -= n; n = 0; /* There is more to write. */ if (stream->flags & UV_STREAM_BLOCKING) { /* * If we're blocking then we should not be enabling the write * watcher - instead we need to try again. */ goto start; } else { /* Break loop and ensure the watcher is pending. */ break; } } else { /* Finished writing the buf at index req->write_index. */ req->write_index++; assert((size_t)n >= len); n -= len; assert(stream->write_queue_size >= len); stream->write_queue_size -= len; if (req->write_index == req->bufcnt) { /* Then we're done! */ assert(n == 0); uv__write_req_finish(req); /* TODO: start trying to write the next request. */ return; } } } } /* Either we've counted n down to zero or we've got EAGAIN. */ assert(n == 0 || n == -1); /* Only non-blocking streams should use the write_watcher. */ assert(!(stream->flags & UV_STREAM_BLOCKING)); /* We're not done. */ uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT); }
/* On success returns NULL. On error returns a pointer to the write request * which had the error. */ static void uv__write(uv_stream_t* stream) { uv_write_t* req; struct iovec* iov; int iovcnt; ssize_t n; assert(stream->fd >= 0); /* TODO: should probably while(1) here until EAGAIN */ /* Get the request at the head of the queue. */ req = uv_write_queue_head(stream); if (!req) { assert(stream->write_queue_size == 0); return; } assert(req->handle == stream); /* Cast to iovec. We had to have our own uv_buf_t instead of iovec * because Windows's WSABUF is not an iovec. */ assert(sizeof(uv_buf_t) == sizeof(struct iovec)); iov = (struct iovec*) &(req->bufs[req->write_index]); iovcnt = req->bufcnt - req->write_index; /* Now do the actual writev. Note that we've been updating the pointers * inside the iov each time we write. So there is no need to offset it. */ if (req->send_handle) { struct msghdr msg; char scratch[64]; struct cmsghdr *cmsg; int fd_to_send = req->send_handle->fd; assert(fd_to_send >= 0); msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = iovcnt; msg.msg_flags = 0; msg.msg_control = (void*) scratch; msg.msg_controllen = CMSG_LEN(sizeof(fd_to_send)); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = msg.msg_controllen; *(int*) CMSG_DATA(cmsg) = fd_to_send; do { n = sendmsg(stream->fd, &msg, 0); } while (n == -1 && errno == EINTR); } else { do { if (iovcnt == 1) { n = write(stream->fd, iov[0].iov_base, iov[0].iov_len); } else { n = writev(stream->fd, iov, iovcnt); } } while (n == -1 && errno == EINTR); } if (n < 0) { if (errno != EAGAIN) { /* Error */ req->error = errno; stream->write_queue_size -= uv__write_req_size(req); uv__write_req_finish(req); return; } } else { /* Successful write */ /* Update the counters. */ while (n >= 0) { uv_buf_t* buf = &(req->bufs[req->write_index]); size_t len = buf->len; assert(req->write_index < req->bufcnt); if ((size_t)n < len) { buf->base += n; buf->len -= n; stream->write_queue_size -= n; n = 0; /* There is more to write. Break and ensure the watcher is pending. */ break; } else { /* Finished writing the buf at index req->write_index. */ req->write_index++; assert((size_t)n >= len); n -= len; assert(stream->write_queue_size >= len); stream->write_queue_size -= len; if (req->write_index == req->bufcnt) { /* Then we're done! */ assert(n == 0); uv__write_req_finish(req); /* TODO: start trying to write the next request. */ return; } } } } /* Either we've counted n down to zero or we've got EAGAIN. */ assert(n == 0 || n == -1); /* We're not done. */ ev_io_start(stream->loop->ev, &stream->write_watcher); }