void uv__stream_destroy(uv_stream_t* stream) { assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(stream->flags & UV_CLOSED); if (stream->connect_req) { uv__req_unregister(stream->loop, stream->connect_req); stream->connect_req->cb(stream->connect_req, -ECANCELED); stream->connect_req = NULL; } uv__stream_flush_write_queue(stream, -ECANCELED); uv__write_callbacks(stream); if (stream->shutdown_req) { /* The ECANCELED error code is a lie, the shutdown(2) syscall is a * fait accompli at this point. Maybe we should revisit this in v0.11. * A possible reason for leaving it unchanged is that it informs the * callee that the handle has been destroyed. */ uv__req_unregister(stream->loop, stream->shutdown_req); stream->shutdown_req->cb(stream->shutdown_req, -ECANCELED); stream->shutdown_req = NULL; } assert(stream->write_queue_size == 0); }
void uv__stream_destroy(uv_stream_t* stream) { uv_write_t* req; ngx_queue_t* q; assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(stream->flags & UV_CLOSED); if (stream->connect_req) { uv__req_unregister(stream->loop, stream->connect_req); uv__set_artificial_error(stream->loop, UV_ECANCELED); stream->connect_req->cb(stream->connect_req, -1); stream->connect_req = NULL; } while (!ngx_queue_empty(&stream->write_queue)) { q = ngx_queue_head(&stream->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); uv__req_unregister(stream->loop, req); if (req->bufs != req->bufsml) free(req->bufs); if (req->cb) { uv__set_artificial_error(req->handle->loop, UV_ECANCELED); req->cb(req, -1); } } while (!ngx_queue_empty(&stream->write_completed_queue)) { q = ngx_queue_head(&stream->write_completed_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); uv__req_unregister(stream->loop, req); if (req->cb) { uv__set_sys_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } } if (stream->shutdown_req) { uv__req_unregister(stream->loop, stream->shutdown_req); uv__set_artificial_error(stream->loop, UV_ECANCELED); stream->shutdown_req->cb(stream->shutdown_req, -1); stream->shutdown_req = NULL; } }
static void uv__drain(uv_stream_t* stream) { uv_shutdown_t* req; assert(!uv_write_queue_head(stream)); assert(stream->write_queue_size == 0); uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); /* Shutdown? */ if ((stream->flags & UV_STREAM_SHUTTING) && !(stream->flags & UV_CLOSING) && !(stream->flags & UV_STREAM_SHUT)) { assert(stream->shutdown_req); req = stream->shutdown_req; stream->shutdown_req = NULL; uv__req_unregister(stream->loop, req); if (shutdown(uv__stream_fd(stream), SHUT_WR)) { /* Error. Report it. User should call uv_close(). */ uv__set_sys_error(stream->loop, errno); if (req->cb) { req->cb(req, -1); } } else { uv__set_sys_error(stream->loop, 0); ((uv_handle_t*) stream)->flags |= UV_STREAM_SHUT; if (req->cb) { req->cb(req, 0); } } } }
/** * We get called here from directly following a call to connect(2). * In order to determine if we've errored out or succeeded must call * getsockopt. */ static void uv__stream_connect(uv_stream_t* stream) { int error; uv_connect_t* req = stream->connect_req; socklen_t errorsize = sizeof(int); assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE); assert(req); if (stream->delayed_error) { /* To smooth over the differences between unixes errors that * were reported synchronously on the first connect can be delayed * until the next tick--which is now. */ error = stream->delayed_error; stream->delayed_error = 0; } else { /* Normal situation: we need to get the socket error from the kernel. */ assert(stream->fd >= 0); getsockopt(stream->fd, SOL_SOCKET, SO_ERROR, &error, &errorsize); } if (error == EINPROGRESS) return; stream->connect_req = NULL; uv__req_unregister(stream->loop, req); if (req->cb) { uv__set_sys_error(stream->loop, error); req->cb(req, error ? -1 : 0); } }
static void uv__write_callbacks(uv_stream_t* stream) { uv_write_t* req; ngx_queue_t* q; while (!ngx_queue_empty(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = ngx_queue_head(&stream->write_completed_queue); req = ngx_queue_data(q, uv_write_t, queue); ngx_queue_remove(q); uv__req_unregister(stream->loop, req); /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) { uv__set_sys_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } } assert(ngx_queue_empty(&stream->write_completed_queue)); /* Write queue drained. */ if (!uv_write_queue_head(stream)) { uv__drain(stream); } }
static void uv__getaddrinfo_done(struct uv__work* w, int status) { uv_getaddrinfo_t* req; req = container_of(w, uv_getaddrinfo_t, work_req); uv__req_unregister(req->loop, req); /* See initialization in uv_getaddrinfo(). */ if (req->hints) uv__free(req->hints); else if (req->service) uv__free(req->service); else if (req->hostname) uv__free(req->hostname); else assert(0); req->hints = NULL; req->service = NULL; req->hostname = NULL; if (status == -ECANCELED) { assert(req->retcode == 0); req->retcode = UV_EAI_CANCELED; } if (req->cb) req->cb(req, req->retcode, req->addrinfo); }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; while (!QUEUE_EMPTY(&handle->write_completed_queue)) { q = QUEUE_HEAD(&handle->write_completed_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) req->send_cb(req, 0); else req->send_cb(req, req->status); } }
int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t off, uv_fs_cb cb) { INIT(WRITE); if (bufs == NULL || nbufs == 0) return -EINVAL; req->file = file; req->nbufs = nbufs; req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = uv__malloc(nbufs * sizeof(*bufs)); if (req->bufs == NULL) { if (cb != NULL) uv__req_unregister(loop, req); return -ENOMEM; } memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); req->off = off; POST; }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); uv__udp_run_completed(handle); while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb != NULL) req->send_cb(req, -ECANCELED); } /* Now tear down the handle. */ handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; ngx_queue_t* q; assert(!uv__io_active(&handle->write_watcher)); assert(!uv__io_active(&handle->read_watcher)); assert(handle->fd == -1); uv__udp_run_completed(handle); while (!ngx_queue_empty(&handle->write_queue)) { q = ngx_queue_head(&handle->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->send_cb) { /* FIXME proper error code like UV_EABORTED */ uv__set_artificial_error(handle->loop, UV_EINTR); req->send_cb(req, -1); } } /* Now tear down the handle. */ handle->flags = 0; handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; ngx_queue_t* q; while (!ngx_queue_empty(&handle->write_completed_queue)) { q = ngx_queue_head(&handle->write_completed_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) { req->send_cb(req, 0); } else { uv__set_sys_error(handle->loop, -req->status); req->send_cb(req, -1); } } }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; ngx_queue_t* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); uv__udp_run_completed(handle); while (!ngx_queue_empty(&handle->write_queue)) { q = ngx_queue_head(&handle->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb) { uv__set_artificial_error(handle->loop, UV_ECANCELED); req->send_cb(req, -1); } } /* Now tear down the handle. */ handle->flags = 0; handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
static void uv__drain(uv_stream_t* stream) { uv_shutdown_t* req; int err; assert(QUEUE_EMPTY(&stream->write_queue)); uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); /* Shutdown? */ if ((stream->flags & UV_STREAM_SHUTTING) && !(stream->flags & UV_CLOSING) && !(stream->flags & UV_STREAM_SHUT)) { assert(stream->shutdown_req); req = stream->shutdown_req; stream->shutdown_req = NULL; stream->flags &= ~UV_STREAM_SHUTTING; uv__req_unregister(stream->loop, req); err = 0; if (tuvp_shutdown(uv__stream_fd(stream), SHUT_WR)) err = -errno; if (err == 0) stream->flags |= UV_STREAM_SHUT; if (req->cb != NULL) req->cb(req, err); } }
static void uv__write_callbacks(uv_stream_t* stream) { uv_write_t* req; QUEUE* q; while (!QUEUE_EMPTY(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = QUEUE_HEAD(&stream->write_completed_queue); req = QUEUE_DATA(q, uv_write_t, queue); QUEUE_REMOVE(q); uv__req_unregister(stream->loop, req); if (req->bufs != NULL) { stream->write_queue_size -= uv__write_req_size(req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; } /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) req->cb(req, req->error); } assert(QUEUE_EMPTY(&stream->write_completed_queue)); }
static void uv__getaddrinfo_done(struct uv__work* w, int status) { uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req); struct addrinfo* res = req->res; #if defined(__sun) size_t hostlen; if (req->hostname) hostlen = strlen(req->hostname); else hostlen = 0; #endif req->res = NULL; uv__req_unregister(req->loop, req); /* see initialization in uv_getaddrinfo() */ if (req->hints) { JX_FREE(getaddr, req->hints); } else if (req->service) { JX_FREE(getaddr, req->service); } else if (req->hostname) { JX_FREE(getaddr, req->hostname); } else assert(0); req->hints = NULL; req->service = NULL; req->hostname = NULL; if (req->retcode < 0) { /* EAI_SYSTEM error */ uv__set_sys_error(req->loop, -req->retcode); } else if (req->retcode == 0) { /* OK */ #if defined(EAI_NODATA) /* FreeBSD deprecated EAI_NODATA */ } else if (req->retcode == EAI_NONAME || req->retcode == EAI_NODATA) { #else } else if (req->retcode == EAI_NONAME) { #endif uv__set_sys_error(req->loop, ENOENT); /* FIXME compatibility hack */ #if defined(__sun) } else if (req->retcode == EAI_MEMORY && hostlen >= MAXHOSTNAMELEN) { uv__set_sys_error(req->loop, ENOENT); #endif } else { req->loop->last_err.code = UV_EADDRINFO; req->loop->last_err.sys_errno_ = req->retcode; } if (status == -UV_ECANCELED) { assert(req->retcode == 0); req->retcode = UV_ECANCELED; uv__set_artificial_error(req->loop, UV_ECANCELED); } req->cb(req, req->retcode, res); }
static void uv__queue_done(struct uv__work* w, int err) { uv_work_t* req; req = container_of(w, uv_work_t, work_req); uv__req_unregister(req->loop, req); if (req->after_work_cb == NULL) return; req->after_work_cb(req, err); }
static void uv__queue_done(struct uv__work* w, int err, const char *buf, size_t buf_len) { uv_work_t* req; req = container_of(w, uv_work_t, work_req); uv__req_unregister(req->loop, req); if (req->after_work_cb == NULL) return; req->after_work_cb(req, err, buf, buf_len); }
static void uv__getaddrinfo_done(struct uv__work* w, int status) { uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req); struct addrinfo *res = req->res; #if __sun size_t hostlen; if (req->hostname) hostlen = strlen(req->hostname); else hostlen = 0; #endif req->res = NULL; uv__req_unregister(req->loop, req); /* see initialization in uv_getaddrinfo() */ if (req->hints) free(req->hints); else if (req->service) free(req->service); else if (req->hostname) free(req->hostname); else assert(0); req->hints = NULL; req->service = NULL; req->hostname = NULL; if (status != 0) return; if (req->retcode == 0) { /* OK */ #if EAI_NODATA /* FreeBSD deprecated EAI_NODATA */ } else if (req->retcode == EAI_NONAME || req->retcode == EAI_NODATA) { #else } else if (req->retcode == EAI_NONAME) { #endif uv__set_sys_error(req->loop, ENOENT); /* FIXME compatibility hack */ #if __sun } else if (req->retcode == EAI_MEMORY && hostlen >= MAXHOSTNAMELEN) { uv__set_sys_error(req->loop, ENOENT); #endif } else { req->loop->last_err.code = UV_EADDRINFO; req->loop->last_err.sys_errno_ = req->retcode; } req->cb(req, req->retcode, res); }
static void uv__fs_done(struct uv__work* w, int status) { uv_fs_t* req; req = container_of(w, uv_fs_t, work_req); uv__req_unregister(req->loop, req); if (status == -ECANCELED) { assert(req->result == 0); req->result = -ECANCELED; } req->cb(req); }
static void uv__queue_done(struct uv__work* w, int status) { uv_work_t* req; req = container_of(w, uv_work_t, work_req); uv__req_unregister(req->loop, req); if (req->after_work_cb == NULL) return; if (status == -UV_ECANCELED) uv__set_artificial_error(req->loop, UV_ECANCELED); req->after_work_cb(req, status ? -1 : 0); }
int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb) { INIT(MKDTEMP); req->path = uv__strdup(tpl); if (req->path == NULL) { if (cb != NULL) uv__req_unregister(loop, req); return -ENOMEM; } POST; }
static void uv__fs_done(struct uv__work* w) { uv_fs_t* req; req = container_of(w, uv_fs_t, work_req); uv__req_unregister(req->loop, req); if (req->errorno != 0) { req->errorno = uv_translate_sys_error(req->errorno); uv__set_artificial_error(req->loop, req->errorno); } if (req->cb != NULL) req->cb(req); }
static void uv__queue_done(struct uv__work* w, int err) { uv_work_t* req; req = container_of(w, uv_work_t, work_req); if(QUEUE_EMPTY(&(req->loop)->active_reqs) != 0) { printf("-- %s --\n", w->name); } free(w->name); uv__req_unregister(req->loop, req); if (req->after_work_cb == NULL) return; req->after_work_cb(req, err); }
/* * Called from uv_run when complete. */ void uv_process_getnameinfo_req(uv_loop_t* loop, uv_getnameinfo_t* req) { char* host; char* service; if (req->retcode == 0) { host = req->host; service = req->service; } else { host = NULL; service = NULL; } uv__req_unregister(loop, req); req->getnameinfo_cb(req, req->retcode, host, service); }
/** * We get called here from directly following a call to connect(2). * In order to determine if we've errored out or succeeded must call * getsockopt. */ static void uv__stream_connect(uv_stream_t* stream) { int error; uv_connect_t* req = stream->connect_req; socklen_t errorsize = sizeof(int); assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE); assert(req); if (stream->delayed_error) { /* To smooth over the differences between unixes errors that * were reported synchronously on the first connect can be delayed * until the next tick--which is now. */ error = stream->delayed_error; stream->delayed_error = 0; } else { /* Normal situation: we need to get the socket error from the kernel. */ assert(uv__stream_fd(stream) >= 0); tuvp_getsockopt(uv__stream_fd(stream), SOL_SOCKET, SO_ERROR, &error, &errorsize); error = -error; } if (error == -EINPROGRESS) return; stream->connect_req = NULL; uv__req_unregister(stream->loop, req); if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) { uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); } if (req->cb) req->cb(req, error); if (uv__stream_fd(stream) == -1) return; if (error < 0) { uv__stream_flush_write_queue(stream, -ECANCELED); uv__write_callbacks(stream); } }
int uv_try_write(uv_stream_t* stream, const uv_buf_t bufs[], unsigned int nbufs) { int r; int has_pollout; size_t written; size_t req_size; uv_write_t req; /* Connecting or already writing some data */ if (stream->connect_req != NULL || stream->write_queue_size != 0) return -EAGAIN; has_pollout = uv__io_active(&stream->io_watcher, UV__POLLOUT); r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb); if (r != 0) return r; /* Remove not written bytes from write queue size */ written = uv__count_bufs(bufs, nbufs); if (req.bufs != NULL) req_size = uv__write_req_size(&req); else req_size = 0; written -= req_size; stream->write_queue_size -= req_size; /* Unqueue request, regardless of immediateness */ QUEUE_REMOVE(&req.queue); uv__req_unregister(stream->loop, &req); if (req.bufs != req.bufsml) free(req.bufs); req.bufs = NULL; /* Do not poll for writable, if we wasn't before calling this */ if (!has_pollout) { uv__io_stop(stream->loop, &stream->io_watcher, UV__POLLOUT); } if (written == 0) return -EAGAIN; else return written; }
static void uv__udp_run_completed(uv_udp_t* handle) { uv_udp_send_t* req; QUEUE* q; assert(!(handle->flags & UV_UDP_PROCESSING)); handle->flags |= UV_UDP_PROCESSING; while (!QUEUE_EMPTY(&handle->write_completed_queue)) { q = QUEUE_HEAD(&handle->write_completed_queue); QUEUE_REMOVE(q); req = QUEUE_DATA(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count--; if (req->bufs != req->bufsml) uv__free(req->bufs); req->bufs = NULL; if (req->send_cb == NULL) continue; /* req->status >= 0 == bytes written * req->status < 0 == errno */ if (req->status >= 0) req->send_cb(req, 0); else req->send_cb(req, req->status); } if (QUEUE_EMPTY(&handle->write_queue)) { /* Pending queue and completion queue empty, stop watcher. */ uv__io_stop(handle->loop, &handle->io_watcher, UV__POLLOUT); if (!uv__io_active(&handle->io_watcher, UV__POLLIN)) uv__handle_stop(handle); } handle->flags &= ~UV_UDP_PROCESSING; }
/* * Called from uv_run when complete. */ static void uv__getnameinfo_done(struct uv__work* w, int status) { uv_getnameinfo_t* req; char* host; char* service; req = container_of(w, uv_getnameinfo_t, work_req); uv__req_unregister(req->loop, req); host = service = NULL; if (status == UV_ECANCELED) { assert(req->retcode == 0); req->retcode = UV_EAI_CANCELED; } else if (req->retcode == 0) { host = req->host; service = req->service; } if (req->getnameinfo_cb) req->getnameinfo_cb(req, req->retcode, host, service); }
static void uv__fs_done(struct uv__work* w, int status) { uv_fs_t* req; req = container_of(w, uv_fs_t, work_req); uv__req_unregister(req->loop, req); if (req->errorno != 0) { req->errorno = uv_translate_sys_error(req->errorno); uv__set_artificial_error(req->loop, req->errorno); } if (status == -UV_ECANCELED) { assert(req->errorno == 0); req->errorno = UV_ECANCELED; uv__set_artificial_error(req->loop, UV_ECANCELED); } if (req->cb != NULL) req->cb(req); }
static int uv_getaddrinfo_done(eio_req* req_) { uv_getaddrinfo_t* req = req_->data; struct addrinfo *res = req->res; #if __sun size_t hostlen = strlen(handle->hostname); #endif req->res = NULL; uv__req_unregister(req->loop, req); free(req->hints); free(req->service); free(req->hostname); if (req->retcode == 0) { /* OK */ #if EAI_NODATA /* FreeBSD deprecated EAI_NODATA */ } else if (req->retcode == EAI_NONAME || req->retcode == EAI_NODATA) { #else } else if (req->retcode == EAI_NONAME) { #endif uv__set_sys_error(req->loop, ENOENT); /* FIXME compatibility hack */ #if __sun } else if (req->retcode == EAI_MEMORY && hostlen >= MAXHOSTNAMELEN) { uv__set_sys_error(req->loop, ENOENT); #endif } else { req->loop->last_err.code = UV_EADDRINFO; req->loop->last_err.sys_errno_ = req->retcode; } req->cb(req, req->retcode, res); return 0; }