static int uv_pipe_read_start_impl(uv_pipe_t* handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb, uv_read2_cb read2_cb) { uv_loop_t* loop = handle->loop; if (!(handle->flags & UV_HANDLE_CONNECTION)) { uv__set_artificial_error(loop, UV_EINVAL); return -1; } if (handle->flags & UV_HANDLE_READING) { uv__set_artificial_error(loop, UV_EALREADY); return -1; } if (handle->flags & UV_HANDLE_EOF) { uv__set_artificial_error(loop, UV_EOF); return -1; } handle->flags |= UV_HANDLE_READING; handle->read_cb = read_cb; handle->read2_cb = read2_cb; handle->alloc_cb = alloc_cb; /* If reading was stopped and then started again, there could still be a */ /* read request pending. */ if (!(handle->flags & UV_HANDLE_READ_PENDING)) uv_pipe_queue_read(loop, handle); return 0; }
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { uv_loop_t* loop = handle->loop; if (!(handle->flags & UV_HANDLE_WRITABLE)) { uv__set_artificial_error(loop, UV_EPIPE); return -1; } if (!(handle->flags & UV_HANDLE_WRITABLE)) { uv__set_artificial_error(loop, UV_EPIPE); return -1; } uv_req_init(loop, (uv_req_t*) req); req->type = UV_SHUTDOWN; req->handle = handle; req->cb = cb; handle->flags &= ~UV_HANDLE_WRITABLE; handle->shutdown_req = req; handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); uv_want_endgame(loop, (uv_handle_t*)handle); return 0; }
void uv__stream_destroy(uv_stream_t* stream) { uv_write_t* req; ngx_queue_t* q; assert(stream->flags & UV_CLOSED); while (!ngx_queue_empty(&stream->write_queue)) { q = ngx_queue_head(&stream->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); if (req->bufs != req->bufsml) free(req->bufs); if (req->cb) { uv__set_artificial_error(req->handle->loop, UV_EINTR); req->cb(req, -1); } } while (!ngx_queue_empty(&stream->write_completed_queue)) { q = ngx_queue_head(&stream->write_completed_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); if (req->cb) { uv__set_artificial_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } } }
/* Starts listening for connections for the given pipe. */ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) { uv_loop_t* loop = handle->loop; int i, errno; if (!(handle->flags & UV_HANDLE_BOUND)) { uv__set_artificial_error(loop, UV_EINVAL); return -1; } if (handle->flags & UV_HANDLE_LISTENING || handle->flags & UV_HANDLE_READING) { uv__set_artificial_error(loop, UV_EALREADY); return -1; } if (!(handle->flags & UV_HANDLE_PIPESERVER)) { uv__set_artificial_error(loop, UV_ENOTSUP); return -1; } handle->flags |= UV_HANDLE_LISTENING; handle->connection_cb = cb; /* First pipe handle should have already been created in uv_pipe_bind */ assert(handle->accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE); for (i = 0; i < handle->pending_instances; i++) { uv_pipe_queue_accept(loop, handle, &handle->accept_reqs[i], i == 0); } return 0; }
int uv_process_kill(uv_process_t* process, int signum) { DWORD status; if (process->process_handle == INVALID_HANDLE_VALUE) { uv__set_artificial_error(process->loop, UV_EINVAL); return -1; } if (signum) { /* Kill the process. On Windows, killed processes normally return 1. */ if (TerminateProcess(process->process_handle, 1)) { process->exit_signal = signum; return 0; } else { uv__set_sys_error(process->loop, GetLastError()); return -1; } } else { /* Health check: is the process still alive? */ if (GetExitCodeProcess(process->process_handle, &status) && status == STILL_ACTIVE) { return 0; } else { uv__set_artificial_error(process->loop, UV_EINVAL); return -1; } } assert(0 && "unreachable"); }
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { if (handle->flags & UV_HANDLE_CONNECTION) { uv__set_artificial_error(handle->loop, UV_EINVAL); return -1; } /* Check if we're already in the desired mode. */ if ((enable && !(handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) || (!enable && handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) { return 0; } /* Don't allow switching from single pending accept to many. */ if (enable) { uv__set_artificial_error(handle->loop, UV_ENOTSUP); return -1; } /* Check if we're in a middle of changing the number of pending accepts. */ if (handle->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING) { return 0; } handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT; /* Flip the changing flag if we have already queued multiple accepts. */ if (handle->flags & UV_HANDLE_LISTENING) { handle->flags |= UV_HANDLE_TCP_ACCEPT_STATE_CHANGING; } return 0; }
int uv_udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, uv_udp_recv_cb recv_cb) { if (alloc_cb == NULL || recv_cb == NULL) { uv__set_artificial_error(handle->loop, UV_EINVAL); return -1; } if (uv__io_active(&handle->read_watcher)) { uv__set_artificial_error(handle->loop, UV_EALREADY); return -1; } if (uv__udp_maybe_deferred_bind(handle, AF_INET)) return -1; handle->alloc_cb = alloc_cb; handle->recv_cb = recv_cb; uv__udp_start_watcher(handle, &handle->read_watcher, uv__udp_recvmsg, UV__IO_READ); return 0; }
int uv_getaddrinfo(uv_loop_t* loop, uv_getaddrinfo_t* req, uv_getaddrinfo_cb cb, const char* hostname, const char* service, const struct addrinfo* hints) { size_t hostname_len; size_t service_len; size_t hints_len; size_t len; char* buf; if (req == NULL || cb == NULL || (hostname == NULL && service == NULL)) return uv__set_artificial_error(loop, UV_EINVAL); hostname_len = hostname ? strlen(hostname) + 1 : 0; service_len = service ? strlen(service) + 1 : 0; hints_len = hints ? sizeof(*hints) : 0; buf = malloc(hostname_len + service_len + hints_len); if (buf == NULL) return uv__set_artificial_error(loop, UV_ENOMEM); uv__req_init(loop, req, UV_GETADDRINFO); req->loop = loop; req->cb = cb; req->res = NULL; req->hints = NULL; req->service = NULL; req->hostname = NULL; req->retcode = 0; /* order matters, see uv_getaddrinfo_done() */ len = 0; if (hints) { req->hints = memcpy(buf + len, hints, sizeof(*hints)); len += sizeof(*hints); } if (service) { req->service = memcpy(buf + len, service, service_len); len += service_len; } if (hostname) { req->hostname = memcpy(buf + len, hostname, hostname_len); len += hostname_len; } uv__work_submit(loop, &req->work_req, uv__getaddrinfo_work, uv__getaddrinfo_done); return 0; }
void uv__stream_destroy(uv_stream_t* stream) { uv_write_t* req; ngx_queue_t* q; assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(stream->flags & UV_CLOSED); if (stream->connect_req) { uv__req_unregister(stream->loop, stream->connect_req); uv__set_artificial_error(stream->loop, UV_ECANCELED); stream->connect_req->cb(stream->connect_req, -1); stream->connect_req = NULL; } while (!ngx_queue_empty(&stream->write_queue)) { q = ngx_queue_head(&stream->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); uv__req_unregister(stream->loop, req); if (req->bufs != req->bufsml) free(req->bufs); if (req->cb) { uv__set_artificial_error(req->handle->loop, UV_ECANCELED); req->cb(req, -1); } } while (!ngx_queue_empty(&stream->write_completed_queue)) { q = ngx_queue_head(&stream->write_completed_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); uv__req_unregister(stream->loop, req); if (req->cb) { uv__set_sys_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } } if (stream->shutdown_req) { uv__req_unregister(stream->loop, stream->shutdown_req); uv__set_artificial_error(stream->loop, UV_ECANCELED); stream->shutdown_req->cb(stream->shutdown_req, -1); stream->shutdown_req = NULL; } }
int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership) { int optname; struct ip_mreq mreq; /* If the socket is unbound, bind to inaddr_any. */ if (!(handle->flags & UV_HANDLE_BOUND) && uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) { return -1; } if (handle->flags & UV_HANDLE_IPV6) { uv__set_artificial_error(handle->loop, UV_ENOSYS); return -1; } memset(&mreq, 0, sizeof mreq); if (interface_addr) { mreq.imr_interface.s_addr = inet_addr(interface_addr); } else { mreq.imr_interface.s_addr = htonl(INADDR_ANY); } mreq.imr_multiaddr.s_addr = inet_addr(multicast_addr); switch (membership) { case UV_JOIN_GROUP: optname = IP_ADD_MEMBERSHIP; break; case UV_LEAVE_GROUP: optname = IP_DROP_MEMBERSHIP; break; default: uv__set_artificial_error(handle->loop, UV_EFAULT); return -1; } if (setsockopt(handle->socket, IPPROTO_IP, optname, (char*) &mreq, sizeof mreq) == SOCKET_ERROR) { uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } return 0; }
int uv_import(uv_stream_t* stream, uv_stream_info_t* info) { if (info->type != UV_TCP) { uv__set_artificial_error(stream->loop, UV_EINVAL); return -1; } if (stream->fd != -1) { uv__set_artificial_error(stream->loop, UV_EALREADY); return -1; } stream->fd = info->fd; return 0; }
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) { int saved_errno; int status; saved_errno = errno; status = -1; if (handle->fd == -1) { uv__set_artificial_error(handle->loop, UV_EINVAL); goto out; } assert(handle->fd >= 0); if ((status = listen(handle->fd, backlog)) == -1) { uv__set_sys_error(handle->loop, errno); } else { handle->connection_cb = cb; uv__io_init(&handle->read_watcher, uv__pipe_accept, handle->fd, UV__IO_READ); uv__io_start(handle->loop, &handle->read_watcher); } out: errno = saved_errno; return status; }
int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, LPWSAPROTOCOL_INFOW protocol_info) { assert(!(handle->flags & UV_HANDLE_CONNECTION)); /* * We're about to share the socket with another process. Because * this is a listening socket, we assume that the other process will * be accepting connections on it. So, before sharing the socket * with another process, we call listen here in the parent process. * This needs to be modified if the socket is shared with * another process for anything other than accepting connections. */ if (!(handle->flags & UV_HANDLE_LISTENING)) { if (!(handle->flags & UV_HANDLE_BOUND)) { uv__set_artificial_error(handle->loop, UV_EINVAL); return -1; } if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) { uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } handle->flags |= UV_HANDLE_SHARED_TCP_SERVER; } if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) { uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } return 0; }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; ngx_queue_t* q; assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(handle->io_watcher.fd == -1); uv__udp_run_completed(handle); while (!ngx_queue_empty(&handle->write_queue)) { q = ngx_queue_head(&handle->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_udp_send_t, queue); uv__req_unregister(handle->loop, req); if (req->bufs != req->bufsml) free(req->bufs); req->bufs = NULL; if (req->send_cb) { uv__set_artificial_error(handle->loop, UV_ECANCELED); req->send_cb(req, -1); } } /* Now tear down the handle. */ handle->flags = 0; handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership) { int optname; struct ip_mreq mreq; memset(&mreq, 0, sizeof mreq); if (interface_addr) { mreq.imr_interface.s_addr = inet_addr(interface_addr); } else { mreq.imr_interface.s_addr = htonl(INADDR_ANY); } mreq.imr_multiaddr.s_addr = inet_addr(multicast_addr); switch (membership) { case UV_JOIN_GROUP: optname = IP_ADD_MEMBERSHIP; break; case UV_LEAVE_GROUP: optname = IP_DROP_MEMBERSHIP; break; default: return uv__set_artificial_error(handle->loop, UV_EINVAL); } if (setsockopt(handle->io_watcher.fd, IPPROTO_IP, optname, (void*) &mreq, sizeof mreq) == -1) { uv__set_sys_error(handle->loop, errno); return -1; } return 0; }
void uv__udp_finish_close(uv_udp_t* handle) { uv_udp_send_t* req; ngx_queue_t* q; assert(!ev_is_active(&handle->write_watcher)); assert(!ev_is_active(&handle->read_watcher)); assert(handle->fd == -1); uv__udp_run_completed(handle); while (!ngx_queue_empty(&handle->write_queue)) { q = ngx_queue_head(&handle->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_udp_send_t, queue); if (req->send_cb) { /* FIXME proper error code like UV_EABORTED */ uv__set_artificial_error(handle->loop, UV_EINTR); req->send_cb(req, -1); } } /* Now tear down the handle. */ handle->flags = 0; handle->recv_cb = NULL; handle->alloc_cb = NULL; /* but _do not_ touch close_cb */ }
int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb cb, const char* path, unsigned int interval) { uv_fs_t* req; size_t len; if (uv__is_active(handle)) return 0; len = strlen(path) + 1; req = malloc(sizeof(*req) + len); if (req == NULL) return uv__set_artificial_error(handle->loop, UV_ENOMEM); req->data = handle; handle->path = memcpy(req + 1, path, len); handle->fs_req = req; handle->poll_cb = cb; handle->interval = interval ? interval : 1; handle->start_time = uv_now(handle->loop); handle->busy_polling = 0; memset(&handle->statbuf, 0, sizeof(handle->statbuf)); if (uv_fs_stat(handle->loop, handle->fs_req, handle->path, poll_cb)) abort(); uv__handle_start(handle); return 0; }
/* TODO: share this with windows? */ int uv_ares_init_options(uv_loop_t* loop, ares_channel *channelptr, struct ares_options *options, int optmask) { int rc; /* only allow single init at a time */ if (loop->channel != NULL) { uv__set_artificial_error(loop, UV_EALREADY); return -1; } /* set our callback as an option */ options->sock_state_cb = uv__ares_sockstate_cb; options->sock_state_cb_data = loop; optmask |= ARES_OPT_SOCK_STATE_CB; /* We do the call to ares_init_option for caller. */ rc = ares_init_options(channelptr, options, optmask); /* if success, save channel */ if (rc == ARES_SUCCESS) { loop->channel = *channelptr; } /* * Initialize the timeout timer. The timer won't be started until the * first socket is opened. */ uv_timer_init(loop, &loop->timer); uv_unref(loop); loop->timer.data = loop; return rc; }
static void uv__write_callbacks(uv_stream_t* stream) { int callbacks_made = 0; ngx_queue_t* q; uv_write_t* req; while (!ngx_queue_empty(&stream->write_completed_queue)) { /* Pop a req off write_completed_queue. */ q = ngx_queue_head(&stream->write_completed_queue); assert(q); req = ngx_queue_data(q, struct uv_write_s, queue); ngx_queue_remove(q); /* NOTE: call callback AFTER freeing the request data. */ if (req->cb) { uv__set_artificial_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } callbacks_made++; } assert(ngx_queue_empty(&stream->write_completed_queue)); /* Write queue drained. */ if (!uv_write_queue_head(stream)) { uv__drain(stream); } }
int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership) { /* not implemented yet */ uv__set_artificial_error(handle->loop, UV_ENOSYS); return -1; }
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) { int saved_errno; int status; saved_errno = errno; status = -1; if (uv__stream_fd(handle) == -1) { uv__set_artificial_error(handle->loop, UV_EINVAL); goto out; } assert(uv__stream_fd(handle) >= 0); if ((status = listen(uv__stream_fd(handle), backlog)) == -1) { uv__set_sys_error(handle->loop, errno); } else { handle->connection_cb = cb; handle->io_watcher.cb = uv__pipe_accept; uv__io_start(handle->loop, &handle->io_watcher, UV__POLLIN); } out: errno = saved_errno; return status; }
int uv_fs_poll_start(uv_fs_poll_t* handle, uv_fs_poll_cb cb, const char* path, unsigned int interval) { struct poll_ctx* ctx; uv_loop_t* loop; size_t len; if (uv__is_active(handle)) return 0; loop = handle->loop; len = strlen(path); ctx = calloc(1, sizeof(*ctx) + len); if (ctx == NULL) return uv__set_artificial_error(loop, UV_ENOMEM); ctx->loop = loop; ctx->poll_cb = cb; ctx->interval = interval ? interval : 1; ctx->start_time = uv_now(loop); ctx->parent_handle = handle; memcpy(ctx->path, path, len + 1); if (uv_timer_init(loop, &ctx->timer_handle)) abort(); ctx->timer_handle.flags |= UV__HANDLE_INTERNAL; uv__handle_unref(&ctx->timer_handle); if (uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb)) abort(); handle->poll_ctx = ctx; uv__handle_start(handle); return 0; }
int uv_tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) { if (handle->type != UV_TCP || addr.sin6_family != AF_INET6) { uv__set_artificial_error(handle->loop, UV_EFAULT); return -1; } return uv__tcp_bind6(handle, addr); }
static void uv__getaddrinfo_done(struct uv__work* w, int status) { uv_getaddrinfo_t* req = container_of(w, uv_getaddrinfo_t, work_req); struct addrinfo* res = req->res; #if defined(__sun) size_t hostlen; if (req->hostname) hostlen = strlen(req->hostname); else hostlen = 0; #endif req->res = NULL; uv__req_unregister(req->loop, req); /* see initialization in uv_getaddrinfo() */ if (req->hints) { JX_FREE(getaddr, req->hints); } else if (req->service) { JX_FREE(getaddr, req->service); } else if (req->hostname) { JX_FREE(getaddr, req->hostname); } else assert(0); req->hints = NULL; req->service = NULL; req->hostname = NULL; if (req->retcode < 0) { /* EAI_SYSTEM error */ uv__set_sys_error(req->loop, -req->retcode); } else if (req->retcode == 0) { /* OK */ #if defined(EAI_NODATA) /* FreeBSD deprecated EAI_NODATA */ } else if (req->retcode == EAI_NONAME || req->retcode == EAI_NODATA) { #else } else if (req->retcode == EAI_NONAME) { #endif uv__set_sys_error(req->loop, ENOENT); /* FIXME compatibility hack */ #if defined(__sun) } else if (req->retcode == EAI_MEMORY && hostlen >= MAXHOSTNAMELEN) { uv__set_sys_error(req->loop, ENOENT); #endif } else { req->loop->last_err.code = UV_EADDRINFO; req->loop->last_err.sys_errno_ = req->retcode; } if (status == -UV_ECANCELED) { assert(req->retcode == 0); req->retcode = UV_ECANCELED; uv__set_artificial_error(req->loop, UV_ECANCELED); } req->cb(req, req->retcode, res); }
int uv_tty_set_mode(uv_tty_t* tty, int mode) { DWORD flags = 0; unsigned char was_reading; uv_alloc_cb alloc_cb; uv_read_cb read_cb; if (!(tty->flags & UV_HANDLE_TTY_READABLE)) { uv__set_artificial_error(tty->loop, UV_EINVAL); return -1; } if (!!mode == !!(tty->flags & UV_HANDLE_TTY_RAW)) { return 0; } if (tty->original_console_mode & ENABLE_QUICK_EDIT_MODE) { flags = ENABLE_QUICK_EDIT_MODE | ENABLE_EXTENDED_FLAGS; } if (mode) { /* Raw input */ flags |= ENABLE_WINDOW_INPUT; } else { /* Line-buffered mode. */ flags |= ENABLE_ECHO_INPUT | ENABLE_INSERT_MODE | ENABLE_LINE_INPUT | ENABLE_EXTENDED_FLAGS | ENABLE_PROCESSED_INPUT; } if (!SetConsoleMode(tty->handle, flags)) { uv__set_sys_error(tty->loop, GetLastError()); return -1; } /* If currently reading, stop, and restart reading. */ if (tty->flags & UV_HANDLE_READING) { was_reading = 1; alloc_cb = tty->alloc_cb; read_cb = tty->read_cb; if (was_reading && uv_tty_read_stop(tty) != 0) { return -1; } } else { was_reading = 0; } /* Update flag. */ tty->flags &= ~UV_HANDLE_TTY_RAW; tty->flags |= mode ? UV_HANDLE_TTY_RAW : 0; /* If we just stopped reading, restart. */ if (was_reading && uv_tty_read_start(tty, alloc_cb, read_cb) != 0) { return -1; } return 0; }
int uv_pipe_write2(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle, uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb) { if (!handle->ipc) { uv__set_artificial_error(loop, UV_EINVAL); return -1; } return uv_pipe_write_impl(loop, req, handle, bufs, bufcnt, send_handle, cb); }
int uv_udp_bind6(uv_udp_t* handle, struct sockaddr_in6 addr, unsigned int flags) { if (handle->type != UV_UDP || addr.sin6_family != AF_INET6) { uv__set_artificial_error(handle->loop, UV_EFAULT); return -1; } return uv__udp_bind6(handle, addr, flags); }
int uv_timer_again(uv_timer_t* timer) { if (!uv__is_active(timer)) { uv__set_artificial_error(timer->loop, UV_EINVAL); return -1; } assert(uv__timer_repeating(timer)); ev_timer_again(timer->loop->ev, &timer->timer_watcher); return 0; }
static int uv__bind(uv_udp_t* handle, int domain, struct sockaddr* addr, int addrsize, unsigned int flags) { DWORD err; int r; SOCKET sock; if ((flags & UV_UDP_IPV6ONLY) && domain != AF_INET6) { /* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */ uv__set_artificial_error(handle->loop, UV_EINVAL); return -1; } if (handle->socket == INVALID_SOCKET) { sock = socket(domain, SOCK_DGRAM, 0); if (sock == INVALID_SOCKET) { uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } if (uv_udp_set_socket(handle->loop, handle, sock) == -1) { closesocket(sock); return -1; } } if (domain == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) { DWORD off = 0; /* On windows IPV6ONLY is on by default. */ /* If the user doesn't specify it libuv turns it off. */ /* TODO: how to handle errors? This may fail if there is no ipv4 stack */ /* available, or when run on XP/2003 which have no support for dualstack */ /* sockets. For now we're silently ignoring the error. */ setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*) &off, sizeof off); } r = bind(handle->socket, addr, addrsize); if (r == SOCKET_ERROR) { err = WSAGetLastError(); uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } handle->flags |= UV_HANDLE_BOUND; return 0; }
int uv_tcp_connect6(uv_connect_t* req, uv_tcp_t* handle, struct sockaddr_in6 address, uv_connect_cb cb) { if (handle->type != UV_TCP || address.sin6_family != AF_INET6) { uv__set_artificial_error(handle->loop, UV_EINVAL); return -1; } return uv__tcp_connect6(req, handle, address, cb); }