static void uv__write_callbacks(uv_stream_t* stream) { uv_write_t* req; ngx_queue_t* q; while (!ngx_queue_empty(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = ngx_queue_head(&stream->write_completed_queue); req = ngx_queue_data(q, uv_write_t, queue); ngx_queue_remove(q); uv__req_unregister(stream->loop, req); /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) { uv__set_sys_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } } assert(ngx_queue_empty(&stream->write_completed_queue)); /* Write queue drained. */ if (!uv_write_queue_head(stream)) { uv__drain(stream); } }
static void uv__write_callbacks(uv_tcp_t* tcp) { int callbacks_made = 0; ngx_queue_t* q; uv_write_t* req; while (!ngx_queue_empty(&tcp->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = ngx_queue_head(&tcp->write_completed_queue); assert(q); req = ngx_queue_data(q, struct uv_write_s, queue); ngx_queue_remove(q); /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) { req->cb(req, 0); } callbacks_made++; } assert(ngx_queue_empty(&tcp->write_completed_queue)); /* Write queue drained. */ if (!uv_write_queue_head(tcp)) { uv__drain(tcp); } }
static void uv__write_callbacks(uv_stream_t* stream) { int callbacks_made = 0; ngx_queue_t* q; uv_write_t* req; while (!ngx_queue_empty(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = ngx_queue_head(&stream->write_completed_queue); assert(q); req = ngx_queue_data(q, struct uv_write_s, queue); ngx_queue_remove(q); /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) { uv__set_artificial_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } callbacks_made++; } assert(ngx_queue_empty(&stream->write_completed_queue)); /* Write queue drained. */ if (!uv_write_queue_head(stream)) { uv__drain(stream); } }
static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { uv_stream_t* stream; stream = container_of(w, uv_stream_t, io_watcher); assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || stream->type == UV_TTY); assert(!(stream->flags & UV_CLOSING)); if (stream->connect_req) { uv__stream_connect(stream); return; } assert(uv__stream_fd(stream) >= 0); /* Ignore POLLHUP here. Even it it's set, there may still be data to read. */ if (events & (UV__POLLIN | UV__POLLERR | UV__POLLHUP)) uv__read(stream); if (uv__stream_fd(stream) == -1) return; /* read_cb closed stream. */ /* Short-circuit iff POLLHUP is set, the user is still interested in read * events and uv__read() reported a partial read but not EOF. If the EOF * flag is set, uv__read() called read_cb with err=UV_EOF and we don't * have to do anything. If the partial read flag is not set, we can't * report the EOF yet because there is still data to read. */ if ((events & UV__POLLHUP) && (stream->flags & UV_STREAM_READING) && (stream->flags & UV_STREAM_READ_PARTIAL) && !(stream->flags & UV_STREAM_READ_EOF)) { uv_buf_t buf = { NULL, 0 }; uv__stream_eof(stream, &buf); } if (uv__stream_fd(stream) == -1) return; /* read_cb closed stream. */ if (events & (UV__POLLOUT | UV__POLLERR | UV__POLLHUP)) { uv__write(stream); uv__write_callbacks(stream); /* Write queue drained. */ if (QUEUE_EMPTY(&stream->write_queue)) uv__drain(stream); } }
void uv__write(uv_handle_t* handle) { assert(handle->fd >= 0); /* TODO: should probably while(1) here until EAGAIN */ /* Get the request at the head of the queue. */ uv_req_t* req = uv_write_queue_head(handle); if (!req) { assert(handle->write_queue_size == 0); uv__drain(handle); return; } assert(req->handle == handle); /* Cast to iovec. We had to have our own uv_buf_t instead of iovec * because Windows's WSABUF is not an iovec. */ assert(sizeof(uv_buf_t) == sizeof(struct iovec)); struct iovec* iov = (struct iovec*) &(req->bufs[req->write_index]); int iovcnt = req->bufcnt - req->write_index; /* Now do the actual writev. Note that we've been updating the pointers * inside the iov each time we write. So there is no need to offset it. */ ssize_t n = writev(handle->fd, iov, iovcnt); uv_write_cb cb = req->cb; if (n < 0) { if (errno != EAGAIN) { uv_err_t err = uv_err_new(handle, errno); /* XXX How do we handle the error? Need test coverage here. */ uv_close(handle); if (cb) { cb(req, -1); } return; } } else { /* Successful write */ /* The loop updates the counters. */ while (n > 0) { uv_buf_t* buf = &(req->bufs[req->write_index]); size_t len = buf->len; assert(req->write_index < req->bufcnt); if (n < len) { buf->base += n; buf->len -= n; handle->write_queue_size -= n; n = 0; /* There is more to write. Break and ensure the watcher is pending. */ break; } else { /* Finished writing the buf at index req->write_index. */ req->write_index++; assert(n >= len); n -= len; assert(handle->write_queue_size >= len); handle->write_queue_size -= len; if (req->write_index == req->bufcnt) { /* Then we're done! */ assert(n == 0); /* Pop the req off handle->write_queue. */ ngx_queue_remove(&req->queue); free(req->bufs); /* FIXME: we should not be allocing for each read */ req->bufs = NULL; /* NOTE: call callback AFTER freeing the request data. */ if (cb) { cb(req, 0); } if (!ngx_queue_empty(&handle->write_queue)) { assert(handle->write_queue_size > 0); } else { /* Write queue drained. */ uv__drain(handle); } return; } } } } /* Either we've counted n down to zero or we've got EAGAIN. */ assert(n == 0 || n == -1); /* We're not done yet. */ assert(ev_is_active(&handle->write_watcher)); ev_io_start(EV_DEFAULT_ &handle->write_watcher); }