int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) && "uv_shutdown (unix) only supports uv_handle_t right now"); if (!(stream->flags & UV_STREAM_WRITABLE) || stream->flags & UV_STREAM_SHUT || stream->flags & UV_STREAM_SHUTTING || stream->flags & UV_CLOSED || stream->flags & UV_CLOSING) { TDLOG("uv_shutdown: ENOTCONN"); return -ENOTCONN; } assert(uv__stream_fd(stream) >= 0); /* Initialize request */ uv__req_init(stream->loop, req, UV_SHUTDOWN); req->handle = stream; req->cb = cb; stream->shutdown_req = req; stream->flags |= UV_STREAM_SHUTTING; uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT); return 0; }
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) && "uv_shutdown (unix) only supports uv_handle_t right now"); assert(stream->fd >= 0); if (!(stream->flags & UV_WRITABLE) || stream->flags & UV_SHUT || stream->flags & UV_CLOSED || stream->flags & UV_CLOSING) { uv__set_sys_error(stream->loop, EINVAL); return -1; } /* Initialize request */ uv__req_init(stream->loop, (uv_req_t*)req); req->handle = stream; req->cb = cb; stream->shutdown_req = req; req->type = UV_SHUTDOWN; ((uv_handle_t*)stream)->flags |= UV_SHUTTING; ev_io_start(stream->loop->ev, &stream->write_watcher); return 0; }
static int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[], int bufcnt, struct sockaddr* addr, socklen_t addrlen, uv_udp_send_cb send_cb) { if (uv__udp_maybe_deferred_bind(handle, addr->sa_family)) return -1; uv__req_init(handle->loop, (uv_req_t*)req); memcpy(&req->addr, addr, addrlen); req->addrlen = addrlen; req->send_cb = send_cb; req->handle = handle; req->bufcnt = bufcnt; req->type = UV_UDP_SEND; if (bufcnt <= UV_REQ_BUFSML_SIZE) { req->bufs = req->bufsml; } else if ((req->bufs = malloc(bufcnt * sizeof(bufs[0]))) == NULL) { uv__set_sys_error(handle->loop, ENOMEM); return -1; } memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0])); ngx_queue_insert_tail(&handle->write_queue, &req->queue); uv__udp_start_write_watcher(handle); return 0; }
static int uv__send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[], int bufcnt, struct sockaddr* addr, socklen_t addrlen, uv_udp_send_cb send_cb) { assert(bufcnt > 0); if (uv__udp_maybe_deferred_bind(handle, addr->sa_family)) return -1; uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->bufcnt = bufcnt; if (bufcnt <= (int) ARRAY_SIZE(req->bufsml)) { req->bufs = req->bufsml; } else if ((req->bufs = malloc(bufcnt * sizeof(bufs[0]))) == NULL) { uv__set_sys_error(handle->loop, ENOMEM); return -1; } memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0])); ngx_queue_insert_tail(&handle->write_queue, &req->queue); uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); uv__handle_start(handle); return 0; }
int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* req, uv_getaddrinfo_cb cb, const char* hostname, const char* service, const struct addrinfo* hints) { size_t hostname_len; size_t service_len; size_t hints_len; size_t len; char* buf; if (req == NULL || (hostname == NULL && service == NULL)) return -EINVAL; hostname_len = hostname ? strlen(hostname) + 1 : 0; service_len = service ? strlen(service) + 1 : 0; hints_len = hints ? sizeof(*hints) : 0; buf = uv__malloc(hostname_len + service_len + hints_len); if (buf == NULL) return -ENOMEM; uv__req_init(loop, req, UV_GETADDRINFO); req->loop = loop; req->cb = cb; req->addrinfo = NULL; req->hints = NULL; req->service = NULL; req->hostname = NULL; req->retcode = 0; /* order matters, see uv_getaddrinfo_done() */ len = 0; if (hints) { req->hints = memcpy(buf + len, hints, sizeof(*hints)); len += sizeof(*hints); } if (service) { req->service = memcpy(buf + len, service, service_len); len += service_len; } if (hostname) req->hostname = memcpy(buf + len, hostname, hostname_len); if (cb) { uv__work_submit(loop, &req->work_req, uv__getaddrinfo_work, uv__getaddrinfo_done); return 0; } else { uv__getaddrinfo_work(&req->work_req); uv__getaddrinfo_done(&req->work_req, 0); return req->retcode; } }
void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) { struct sockaddr_un saddr; int saved_errno; int new_sock; int err; int r; saved_errno = errno; new_sock = (handle->fd == -1); err = -1; if (new_sock) if ((handle->fd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) goto out; memset(&saddr, 0, sizeof saddr); uv_strlcpy(saddr.sun_path, name, sizeof(saddr.sun_path)); saddr.sun_family = AF_UNIX; /* We don't check for EINPROGRESS. Think about it: the socket * is either there or not. */ do { r = connect(handle->fd, (struct sockaddr*)&saddr, sizeof saddr); } while (r == -1 && errno == EINTR); if (r == -1) goto out; if (new_sock) if (uv__stream_open((uv_stream_t*)handle, handle->fd, UV_STREAM_READABLE | UV_STREAM_WRITABLE)) goto out; uv__io_start(handle->loop, &handle->read_watcher); uv__io_start(handle->loop, &handle->write_watcher); err = 0; out: handle->delayed_error = err ? errno : 0; /* Passed to callback. */ handle->connect_req = req; uv__req_init(handle->loop, req, UV_CONNECT); req->handle = (uv_stream_t*)handle; req->cb = cb; ngx_queue_init(&req->queue); /* Run callback on next tick. */ uv__io_feed(handle->loop, &handle->write_watcher, UV__IO_WRITE); /* Mimic the Windows pipe implementation, always * return 0 and let the callback handle errors. */ errno = saved_errno; }
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { uv_tcp_t* tcp = (uv_tcp_t*)handle; assert(handle->type == UV_TCP && "uv_shutdown (unix) only supports uv_tcp_t right now"); assert(tcp->fd >= 0); /* Initialize request */ uv__req_init((uv_req_t*)req); req->handle = handle; req->cb = cb; if (uv_flag_is_set((uv_handle_t*)tcp, UV_SHUT) || uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSED) || uv_flag_is_set((uv_handle_t*)tcp, UV_CLOSING)) { return -1; } tcp->shutdown_req = req; req->type = UV_SHUTDOWN; uv_flag_set((uv_handle_t*)tcp, UV_SHUTTING); ev_io_start(EV_DEFAULT_UC_ &tcp->write_watcher); return 0; }
void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) { struct sockaddr_un saddr; int saved_errno; int new_sock; int err; int r; saved_errno = errno; new_sock = (uv__stream_fd(handle) == -1); err = -1; if (new_sock) if ((handle->io_watcher.fd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) goto out; memset(&saddr, 0, sizeof saddr); uv_strlcpy(saddr.sun_path, name, sizeof(saddr.sun_path)); saddr.sun_family = AF_UNIX; do { r = connect(uv__stream_fd(handle), (struct sockaddr*)&saddr, sizeof saddr); } while (r == -1 && errno == EINTR); if (r == -1) if (errno != EINPROGRESS) goto out; if (new_sock) if (uv__stream_open((uv_stream_t*)handle, uv__stream_fd(handle), UV_STREAM_READABLE | UV_STREAM_WRITABLE)) goto out; uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN | UV__POLLOUT); err = 0; out: handle->delayed_error = err ? errno : 0; /* Passed to callback. */ handle->connect_req = req; uv__req_init(handle->loop, req, UV_CONNECT); req->handle = (uv_stream_t*)handle; req->cb = cb; QUEUE_INIT(&req->queue); /* Force callback to run on next tick in case of error. */ if (err != 0) uv__io_feed(handle->loop, &handle->io_watcher); /* Mimic the Windows pipe implementation, always * return 0 and let the callback handle errors. */ errno = saved_errno; }
int uv__tcp_connect(uv_connect_t* req, uv_tcp_t* handle, const struct sockaddr* addr, unsigned int addrlen, uv_connect_cb cb) { int err; int r; assert(handle->type == UV_TCP); if (handle->connect_req != NULL) return -EALREADY; /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */ err = maybe_new_socket(handle, addr->sa_family, UV_STREAM_READABLE | UV_STREAM_WRITABLE); if (err) return err; handle->delayed_error = 0; do { errno = 0; r = connect(uv__stream_fd(handle), addr, addrlen); } while (r == -1 && errno == EINTR); /* We not only check the return value, but also check the errno != 0. * Because in rare cases connect() will return -1 but the errno * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227) * and actually the tcp three-way handshake is completed. */ if (r == -1 && errno != 0) { if (errno == EINPROGRESS) ; /* not an error */ else if (errno == ECONNREFUSED) /* If we get a ECONNREFUSED wait until the next tick to report the * error. Solaris wants to report immediately--other unixes want to * wait. */ handle->delayed_error = -errno; else return -errno; } uv__req_init(handle->loop, req, UV_CONNECT); req->cb = cb; req->handle = (uv_stream_t*) handle; QUEUE_INIT(&req->queue); handle->connect_req = req; uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); if (handle->delayed_error) uv__io_feed(handle->loop, &handle->io_watcher); return 0; }
int uv_queue_work(uv_loop_t* loop, uv_work_t* req, const char *buf, size_t buf_len, uv_after_work_cb after_work_cb) { uv__req_init(loop, req, UV_WORK); req->loop = loop; req->after_work_cb = after_work_cb; uv__work_submit(loop, &req->work_req, buf, buf_len, uv__queue_done); return 0; }
int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, uv_after_work_cb after_work_cb) { if (work_cb == NULL) return uv__set_artificial_error(loop, UV_EINVAL); uv__req_init(loop, req, UV_WORK); req->loop = loop; req->work_cb = work_cb; req->after_work_cb = after_work_cb; uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done); return 0; }
static int uv__connect(uv_connect_t* req, uv_tcp_t* handle, struct sockaddr* addr, socklen_t addrlen, uv_connect_cb cb) { int err; int r; assert(handle->type == UV_TCP); if (handle->connect_req != NULL) return -EALREADY; /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */ err = maybe_new_socket(handle, addr->sa_family, UV_STREAM_READABLE | UV_STREAM_WRITABLE); if (err) return err; handle->delayed_error = 0; do r = connect(uv__stream_fd(handle), addr, addrlen); while (r == -1 && errno == EINTR); if (r == -1) { if (errno == EINPROGRESS) ; /* not an error */ else if (errno == ECONNREFUSED) /* If we get a ECONNREFUSED wait until the next tick to report the * error. Solaris wants to report immediately--other unixes want to * wait. */ handle->delayed_error = -errno; else return -errno; } uv__req_init(handle->loop, req, UV_CONNECT); req->cb = cb; req->handle = (uv_stream_t*) handle; QUEUE_INIT(&req->queue); handle->connect_req = req; uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); if (handle->delayed_error) uv__io_feed(handle->loop, &handle->io_watcher); return 0; }
int uv__udp_send(uv_udp_send_t* req, uv_udp_t* handle, const uv_buf_t bufs[], unsigned int nbufs, const struct sockaddr* addr, unsigned int addrlen, uv_udp_send_cb send_cb) { int err; int empty_queue; assert(nbufs > 0); err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); if (err) return err; /* It's legal for send_queue_count > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up send_queue_size/count later. */ empty_queue = (handle->send_queue_count == 0); uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->nbufs = nbufs; req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); handle->send_queue_count++; QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); uv__handle_start(handle); if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) { uv__udp_sendmsg(handle); } else { uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); } return 0; }
static int uv__connect(uv_connect_t* req, uv_tcp_t* handle, struct sockaddr* addr, socklen_t addrlen, uv_connect_cb cb) { int r; assert(handle->type == UV_TCP); if (handle->connect_req) return uv__set_sys_error(handle->loop, EALREADY); if (maybe_new_socket(handle, addr->sa_family, UV_STREAM_READABLE|UV_STREAM_WRITABLE)) { return -1; } handle->delayed_error = 0; do r = connect(handle->fd, addr, addrlen); while (r == -1 && errno == EINTR); if (r == -1) { if (errno == EINPROGRESS) ; /* not an error */ else if (errno == ECONNREFUSED) /* If we get a ECONNREFUSED wait until the next tick to report the * error. Solaris wants to report immediately--other unixes want to * wait. */ handle->delayed_error = errno; else return uv__set_sys_error(handle->loop, errno); } uv__req_init(handle->loop, req, UV_CONNECT); req->cb = cb; req->handle = (uv_stream_t*) handle; ngx_queue_init(&req->queue); handle->connect_req = req; uv__io_start(handle->loop, &handle->write_watcher); if (handle->delayed_error) uv__io_feed(handle->loop, &handle->write_watcher, UV__IO_WRITE); return 0; }
/* stub implementation of uv_getaddrinfo */ int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* handle, uv_getaddrinfo_cb cb, const char* hostname, const char* service, const struct addrinfo* hints) { eio_req* req; uv_eio_init(loop); if (handle == NULL || cb == NULL || (hostname == NULL && service == NULL)) { uv__set_artificial_error(loop, UV_EINVAL); return -1; } uv__req_init(loop, (uv_req_t*)handle); handle->type = UV_GETADDRINFO; handle->loop = loop; handle->cb = cb; /* TODO don't alloc so much. */ if (hints) { handle->hints = malloc(sizeof(struct addrinfo)); memcpy(handle->hints, hints, sizeof(struct addrinfo)); } else { handle->hints = NULL; } /* TODO security! check lengths, check return values. */ handle->hostname = hostname ? strdup(hostname) : NULL; handle->service = service ? strdup(service) : NULL; handle->res = NULL; handle->retcode = 0; /* TODO check handle->hostname == NULL */ /* TODO check handle->service == NULL */ uv_ref(loop); req = eio_custom(getaddrinfo_thread_proc, EIO_PRI_DEFAULT, uv_getaddrinfo_done, handle, &loop->uv_eio_channel); assert(req); assert(req->data == handle); return 0; }
static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req, uv_fs_type fs_type, const char* path, uv_fs_cb cb) { /* Make sure the thread pool is initialized. */ uv_eio_init(loop); uv__req_init((uv_req_t*) req); req->type = UV_FS; req->loop = loop; req->fs_type = fs_type; req->cb = cb; req->result = 0; req->ptr = NULL; req->path = path ? strdup(path) : NULL; req->errorno = 0; req->eio = NULL; }
int uv_queue_work(uv_loop_t* loop, uv_work_t* req, char *name, uv_work_cb work_cb, uv_after_work_cb after_work_cb) { if (work_cb == NULL) return UV_EINVAL; uv__req_init(loop, req, UV_WORK); req->loop = loop; req->work_cb = work_cb; req->after_work_cb = after_work_cb; req->work_req.name = strdup(name); uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done); return 0; }
int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, uv_after_work_cb after_work_cb) { void* data = req->data; uv_eio_init(loop); uv__req_init((uv_req_t*) req); uv_ref(loop); req->loop = loop; req->data = data; req->work_cb = work_cb; req->after_work_cb = after_work_cb; req->eio = eio_custom(uv__work, EIO_PRI_DEFAULT, uv__after_work, req); if (!req->eio) { uv_err_new(loop, ENOMEM); return -1; } return 0; }
static int uv__send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t bufs[], int bufcnt, struct sockaddr* addr, socklen_t addrlen, uv_udp_send_cb send_cb) { int err; assert(bufcnt > 0); err = uv__udp_maybe_deferred_bind(handle, addr->sa_family); if (err) return err; uv__req_init(handle->loop, req, UV_UDP_SEND); assert(addrlen <= sizeof(req->addr)); memcpy(&req->addr, addr, addrlen); req->send_cb = send_cb; req->handle = handle; req->bufcnt = bufcnt; if (bufcnt <= (int) ARRAY_SIZE(req->bufsml)) req->bufs = req->bufsml; else req->bufs = malloc(bufcnt * sizeof(*bufs)); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, bufcnt * sizeof(bufs[0])); QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); uv__io_start(handle->loop, &handle->io_watcher, UV__POLLOUT); uv__handle_start(handle); return 0; }
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) && "uv_shutdown (unix) only supports uv_handle_t right now"); assert(stream->fd >= 0); if (!(stream->flags & UV_STREAM_WRITABLE) || stream->flags & UV_STREAM_SHUT || stream->flags & UV_CLOSED || stream->flags & UV_CLOSING) { uv__set_artificial_error(stream->loop, UV_ENOTCONN); return -1; } /* Initialize request */ uv__req_init(stream->loop, req, UV_SHUTDOWN); req->handle = stream; req->cb = cb; stream->shutdown_req = req; stream->flags |= UV_STREAM_SHUTTING; uv__io_start(stream->loop, &stream->write_watcher); return 0; }
void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) { struct sockaddr_un saddr; int new_sock; int err; int r; size_t name_len; name_len = strlen(name); if (name_len > sizeof(saddr.sun_path) - 1) { err = -ENAMETOOLONG; goto out; } new_sock = (uv__stream_fd(handle) == -1); if (new_sock) { err = uv__socket(AF_UNIX, SOCK_STREAM, 0); if (err < 0) goto out; handle->io_watcher.fd = err; } memset(&saddr, 0, sizeof saddr); memcpy(saddr.sun_path, name, name_len); saddr.sun_family = AF_UNIX; do { r = connect(uv__stream_fd(handle), (struct sockaddr*)&saddr, sizeof saddr); } while (r == -1 && errno == EINTR); if (r == -1 && errno != EINPROGRESS) { err = -errno; #if defined(__CYGWIN__) || defined(__MSYS__) /* EBADF is supposed to mean that the socket fd is bad, but Cygwin reports EBADF instead of ENOTSOCK when the file is not a socket. We do not expect to see a bad fd here (e.g. due to new_sock), so translate the error. */ if (err == -EBADF) err = -ENOTSOCK; #endif goto out; } err = 0; if (new_sock) { err = uv__stream_open((uv_stream_t*)handle, uv__stream_fd(handle), UV_STREAM_READABLE | UV_STREAM_WRITABLE); } if (err == 0) uv__io_start(handle->loop, &handle->io_watcher, POLLIN | POLLOUT); out: handle->delayed_error = err; handle->connect_req = req; uv__req_init(handle->loop, req, UV_CONNECT); req->handle = (uv_stream_t*)handle; req->cb = cb; QUEUE_INIT(&req->queue); /* Force callback to run on next tick in case of error. */ if (err) uv__io_feed(handle->loop, &handle->io_watcher); }
int uv_write2(uv_write_t* req, uv_stream_t* stream, uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb) { int empty_queue; assert(bufcnt > 0); assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || stream->type == UV_TTY) && "uv_write (unix) does not yet support other types of streams"); if (uv__stream_fd(stream) < 0) { uv__set_sys_error(stream->loop, EBADF); return -1; } if (send_handle) { if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) { uv__set_sys_error(stream->loop, EOPNOTSUPP); return -1; } } empty_queue = (stream->write_queue_size == 0); /* Initialize the req */ uv__req_init(stream->loop, req, UV_WRITE); req->cb = cb; req->handle = stream; req->error = 0; req->send_handle = send_handle; ngx_queue_init(&req->queue); if (bufcnt <= (int) ARRAY_SIZE(req->bufsml)) req->bufs = req->bufsml; else req->bufs = malloc(sizeof(uv_buf_t) * bufcnt); memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t)); req->bufcnt = bufcnt; req->write_index = 0; stream->write_queue_size += uv__buf_count(bufs, bufcnt); /* Append the request to write_queue. */ ngx_queue_insert_tail(&stream->write_queue, &req->queue); /* If the queue was empty when this function began, we should attempt to * do the write immediately. Otherwise start the write_watcher and wait * for the fd to become writable. */ if (stream->connect_req) { /* Still connecting, do nothing. */ } else if (empty_queue) { uv__write(stream); } else { /* * blocking streams should never have anything in the queue. * if this assert fires then somehow the blocking stream isn't being * sufficiently flushed in uv__write. */ assert(!(stream->flags & UV_STREAM_BLOCKING)); uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT); } return 0; }
void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb) { struct sockaddr_un saddr; int saved_errno; int sockfd; int status; int r; saved_errno = errno; sockfd = -1; status = -1; if ((sockfd = uv__socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { uv__set_sys_error(handle->loop, errno); goto out; } memset(&saddr, 0, sizeof saddr); uv_strlcpy(saddr.sun_path, name, sizeof(saddr.sun_path)); saddr.sun_family = AF_UNIX; /* We don't check for EINPROGRESS. Think about it: the socket * is either there or not. */ do { r = connect(sockfd, (struct sockaddr*)&saddr, sizeof saddr); } while (r == -1 && errno == EINTR); if (r == -1) { status = errno; close(sockfd); goto out; } uv__stream_open((uv_stream_t*)handle, sockfd, UV_STREAM_READABLE | UV_STREAM_WRITABLE); ev_io_start(handle->loop->ev, &handle->read_watcher); ev_io_start(handle->loop->ev, &handle->write_watcher); status = 0; out: handle->delayed_error = status; /* Passed to callback. */ handle->connect_req = req; uv__req_init(handle->loop, req, UV_CONNECT); req->handle = (uv_stream_t*)handle; req->cb = cb; ngx_queue_init(&req->queue); /* Run callback on next tick. */ uv__make_pending(handle); /* Mimic the Windows pipe implementation, always * return 0 and let the callback handle errors. */ errno = saved_errno; }
int uv_write2(uv_write_t* req, uv_stream_t* stream, const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle, uv_write_cb cb) { int empty_queue; assert(nbufs > 0); assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || stream->type == UV_TTY) && "uv_write (unix) does not yet support other types of streams"); if (uv__stream_fd(stream) < 0) return -EBADF; if (send_handle) { if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) return -EINVAL; /* XXX We abuse uv_write2() to send over UDP handles to child processes. * Don't call uv__stream_fd() on those handles, it's a macro that on OS X * evaluates to a function that operates on a uv_stream_t with a couple of * OS X specific fields. On other Unices it does (handle)->io_watcher.fd, * which works but only by accident. */ if (uv__handle_fd((uv_handle_t*) send_handle) < 0) return -EBADF; } /* It's legal for write_queue_size > 0 even when the write_queue is empty; * it means there are error-state requests in the write_completed_queue that * will touch up write_queue_size later, see also uv__write_req_finish(). * We could check that write_queue is empty instead but that implies making * a write() syscall when we know that the handle is in error mode. */ empty_queue = (stream->write_queue_size == 0); /* Initialize the req */ uv__req_init(stream->loop, req, UV_WRITE); req->cb = cb; req->handle = stream; req->error = 0; req->send_handle = send_handle; QUEUE_INIT(&req->queue); req->bufs = req->bufsml; if (nbufs > ARRAY_SIZE(req->bufsml)) req->bufs = (uv_buf_t*)malloc(nbufs * sizeof(bufs[0])); if (req->bufs == NULL) return -ENOMEM; memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); req->nbufs = nbufs; req->write_index = 0; stream->write_queue_size += uv__count_bufs(bufs, nbufs); /* Append the request to write_queue. */ QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); /* If the queue was empty when this function began, we should attempt to * do the write immediately. Otherwise start the write_watcher and wait * for the fd to become writable. */ if (stream->connect_req) { /* Still connecting, do nothing. */ } else if (empty_queue) { uv__write(stream); } else { /* * blocking streams should never have anything in the queue. * if this assert fires then somehow the blocking stream isn't being * sufficiently flushed in uv__write. */ assert(!(stream->flags & UV_STREAM_BLOCKING)); uv__io_start(stream->loop, &stream->io_watcher, UV__POLLOUT); } return 0; }
int uv_write2(uv_write_t* req, uv_stream_t* stream, uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb) { int empty_queue; assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || stream->type == UV_TTY) && "uv_write (unix) does not yet support other types of streams"); if (stream->fd < 0) { uv__set_sys_error(stream->loop, EBADF); return -1; } if (send_handle) { if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) { uv__set_sys_error(stream->loop, EOPNOTSUPP); return -1; } } empty_queue = (stream->write_queue_size == 0); /* Initialize the req */ uv__req_init(stream->loop, (uv_req_t*)req); req->cb = cb; req->handle = stream; req->error = 0; req->send_handle = send_handle; req->type = UV_WRITE; ngx_queue_init(&req->queue); if (bufcnt <= UV_REQ_BUFSML_SIZE) { req->bufs = req->bufsml; } else { req->bufs = malloc(sizeof(uv_buf_t) * bufcnt); } memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t)); req->bufcnt = bufcnt; /* * fprintf(stderr, "cnt: %d bufs: %p bufsml: %p\n", bufcnt, req->bufs, req->bufsml); */ req->write_index = 0; stream->write_queue_size += uv__buf_count(bufs, bufcnt); /* Append the request to write_queue. */ ngx_queue_insert_tail(&stream->write_queue, &req->queue); assert(!ngx_queue_empty(&stream->write_queue)); assert(stream->write_watcher.cb == uv__stream_io); assert(stream->write_watcher.data == stream); assert(stream->write_watcher.fd == stream->fd); /* If the queue was empty when this function began, we should attempt to * do the write immediately. Otherwise start the write_watcher and wait * for the fd to become writable. */ if (empty_queue) { uv__write(stream); } else { /* * blocking streams should never have anything in the queue. * if this assert fires then somehow the blocking stream isn't being * sufficently flushed in uv__write. */ assert(!stream->blocking); ev_io_start(stream->loop->ev, &stream->write_watcher); } return 0; }
int uv__connect(uv_connect_t* req, uv_stream_t* stream, struct sockaddr* addr, socklen_t addrlen, uv_connect_cb cb) { int sockfd; int r; if (stream->fd <= 0) { if ((sockfd = uv__socket(addr->sa_family, SOCK_STREAM, 0)) == -1) { uv__set_sys_error(stream->loop, errno); return -1; } if (uv__stream_open(stream, sockfd, UV_READABLE | UV_WRITABLE)) { uv__close(sockfd); return -2; } } uv__req_init(stream->loop, (uv_req_t*)req); req->cb = cb; req->handle = stream; req->type = UV_CONNECT; ngx_queue_init(&req->queue); if (stream->connect_req) { uv__set_sys_error(stream->loop, EALREADY); return -1; } if (stream->type != UV_TCP) { uv__set_sys_error(stream->loop, ENOTSOCK); return -1; } stream->connect_req = req; do { r = connect(stream->fd, addr, addrlen); } while (r == -1 && errno == EINTR); stream->delayed_error = 0; if (r != 0 && errno != EINPROGRESS) { switch (errno) { /* If we get a ECONNREFUSED wait until the next tick to report the * error. Solaris wants to report immediately--other unixes want to * wait. */ case ECONNREFUSED: stream->delayed_error = errno; break; default: uv__set_sys_error(stream->loop, errno); return -1; } } assert(stream->write_watcher.data == stream); ev_io_start(stream->loop->ev, &stream->write_watcher); if (stream->delayed_error) { ev_feed_event(stream->loop->ev, &stream->write_watcher, EV_WRITE); } return 0; }
int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* req, uv_getaddrinfo_cb cb, const char* hostname, const char* service, const struct addrinfo* hints) { char hostname_ascii[256]; size_t hostname_len; size_t service_len; size_t hints_len; size_t len; char* buf; long rc; if (req == NULL || (hostname == NULL && service == NULL)) return UV_EINVAL; /* FIXME(bnoordhuis) IDNA does not seem to work z/OS, * probably because it uses EBCDIC rather than ASCII. */ #ifdef __MVS__ (void) &hostname_ascii; #else if (hostname != NULL) { rc = uv__idna_toascii(hostname, hostname + strlen(hostname), hostname_ascii, hostname_ascii + sizeof(hostname_ascii)); if (rc < 0) return rc; hostname = hostname_ascii; } #endif hostname_len = hostname ? strlen(hostname) + 1 : 0; service_len = service ? strlen(service) + 1 : 0; hints_len = hints ? sizeof(*hints) : 0; buf = uv__malloc(hostname_len + service_len + hints_len); if (buf == NULL) return UV_ENOMEM; uv__req_init(loop, req, UV_GETADDRINFO); req->loop = loop; req->cb = cb; req->addrinfo = NULL; req->hints = NULL; req->service = NULL; req->hostname = NULL; req->retcode = 0; /* order matters, see uv_getaddrinfo_done() */ len = 0; if (hints) { req->hints = memcpy(buf + len, hints, sizeof(*hints)); len += sizeof(*hints); } if (service) { req->service = memcpy(buf + len, service, service_len); len += service_len; } if (hostname) req->hostname = memcpy(buf + len, hostname, hostname_len); if (cb) { uv__work_submit(loop, &req->work_req, UV__WORK_SLOW_IO, uv__getaddrinfo_work, uv__getaddrinfo_done); return 0; } else { uv__getaddrinfo_work(&req->work_req); uv__getaddrinfo_done(&req->work_req, 0); return req->retcode; } }
int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* req, uv_getaddrinfo_cb cb, const char* hostname, const char* service, const struct addrinfo* hints) { size_t hostname_len; size_t service_len; size_t hints_len; eio_req* req_; size_t len; char* buf; if (req == NULL || cb == NULL || (hostname == NULL && service == NULL)) return uv__set_artificial_error(loop, UV_EINVAL); uv_eio_init(loop); hostname_len = hostname ? strlen(hostname) + 1 : 0; service_len = service ? strlen(service) + 1 : 0; hints_len = hints ? sizeof(*hints) : 0; buf = malloc(hostname_len + service_len + hints_len); if (buf == NULL) return uv__set_artificial_error(loop, UV_ENOMEM); uv__req_init(loop, req, UV_GETADDRINFO); req->loop = loop; req->cb = cb; req->res = NULL; req->hints = NULL; req->service = NULL; req->hostname = NULL; req->retcode = 0; /* order matters, see uv_getaddrinfo_done() */ len = 0; if (hints) { req->hints = memcpy(buf + len, hints, sizeof(*hints)); len += sizeof(*hints); } if (service) { req->service = memcpy(buf + len, service, service_len); len += service_len; } if (hostname) { req->hostname = memcpy(buf + len, hostname, hostname_len); len += hostname_len; } req_ = eio_custom(getaddrinfo_thread_proc, EIO_PRI_DEFAULT, uv_getaddrinfo_done, req, &loop->uv_eio_channel); if (req_) return 0; free(buf); return uv__set_artificial_error(loop, UV_ENOMEM); }
static int uv__connect(uv_connect_t* req, uv_tcp_t* tcp, struct sockaddr* addr, socklen_t addrlen, uv_connect_cb cb) { int r; if (tcp->fd <= 0) { int fd = socket(addr->sa_family, SOCK_STREAM, 0); if (fd < 0) { uv_err_new((uv_handle_t*)tcp, errno); return -1; } if (uv_tcp_open(tcp, fd)) { close(fd); return -2; } } uv__req_init((uv_req_t*)req); req->cb = cb; req->handle = (uv_stream_t*)tcp; req->type = UV_CONNECT; ngx_queue_init(&req->queue); if (tcp->connect_req) { uv_err_new((uv_handle_t*)tcp, EALREADY); return -1; } if (tcp->type != UV_TCP) { uv_err_new((uv_handle_t*)tcp, ENOTSOCK); return -1; } tcp->connect_req = req; r = connect(tcp->fd, addr, addrlen); tcp->delayed_error = 0; if (r != 0 && errno != EINPROGRESS) { switch (errno) { /* If we get a ECONNREFUSED wait until the next tick to report the * error. Solaris wants to report immediately--other unixes want to * wait. */ case ECONNREFUSED: tcp->delayed_error = errno; break; default: uv_err_new((uv_handle_t*)tcp, errno); return -1; } } assert(tcp->write_watcher.data == tcp); ev_io_start(EV_DEFAULT_ &tcp->write_watcher); if (tcp->delayed_error) { ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE); } return 0; }
/* The buffers to be written must remain valid until the callback is called. * This is not required for the uv_buf_t array. */ int uv_write(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[], int bufcnt, uv_write_cb cb) { int empty_queue; uv_tcp_t* tcp = (uv_tcp_t*)handle; /* Initialize the req */ uv__req_init((uv_req_t*) req); req->cb = cb; req->handle = handle; ngx_queue_init(&req->queue); assert(handle->type == UV_TCP && "uv_write (unix) does not yet support other types of streams"); empty_queue = (tcp->write_queue_size == 0); assert(tcp->fd >= 0); ngx_queue_init(&req->queue); req->type = UV_WRITE; if (bufcnt < UV_REQ_BUFSML_SIZE) { req->bufs = req->bufsml; } else { req->bufs = malloc(sizeof(uv_buf_t) * bufcnt); } memcpy(req->bufs, bufs, bufcnt * sizeof(uv_buf_t)); req->bufcnt = bufcnt; /* * fprintf(stderr, "cnt: %d bufs: %p bufsml: %p\n", bufcnt, req->bufs, req->bufsml); */ req->write_index = 0; tcp->write_queue_size += uv__buf_count(bufs, bufcnt); /* Append the request to write_queue. */ ngx_queue_insert_tail(&tcp->write_queue, &req->queue); assert(!ngx_queue_empty(&tcp->write_queue)); assert(tcp->write_watcher.cb == uv__tcp_io); assert(tcp->write_watcher.data == tcp); assert(tcp->write_watcher.fd == tcp->fd); /* If the queue was empty when this function began, we should attempt to * do the write immediately. Otherwise start the write_watcher and wait * for the fd to become writable. */ if (empty_queue) { if (uv__write(tcp)) { /* Error. uv_last_error has been set. */ return -1; } } /* If the queue is now empty - we've flushed the request already. That * means we need to make the callback. The callback can only be done on a * fresh stack so we feed the event loop in order to service it. */ if (ngx_queue_empty(&tcp->write_queue)) { ev_feed_event(EV_DEFAULT_ &tcp->write_watcher, EV_WRITE); } else { /* Otherwise there is data to write - so we should wait for the file * descriptor to become writable. */ ev_io_start(EV_DEFAULT_ &tcp->write_watcher); } return 0; }