int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info, int tcp_connection) { SOCKET socket = WSASocketW(AF_INET, SOCK_STREAM, IPPROTO_IP, socket_protocol_info, 0, WSA_FLAG_OVERLAPPED); if (socket == INVALID_SOCKET) { uv__set_sys_error(tcp->loop, WSAGetLastError()); return -1; } if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0)) { uv__set_sys_error(tcp->loop, GetLastError()); closesocket(socket); return -1; } if (uv_tcp_set_socket(tcp->loop, tcp, socket, 1) != 0) { closesocket(socket); return -1; } if (tcp_connection) { uv_connection_init((uv_stream_t*)tcp); } tcp->flags |= UV_HANDLE_BOUND; tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET; if (socket_protocol_info->iAddressFamily == AF_INET6) { tcp->flags |= UV_HANDLE_IPV6; } tcp->loop->active_tcp_streams++; return 0; }
int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) { /* Make the socket non-inheritable */ if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) { uv__set_sys_error(handle->loop, GetLastError()); return -1; } if (uv_tcp_set_socket(handle->loop, handle, sock, 0) == -1) { return -1; } return 0; }
static void uv__fs_event_rearm(uv_fs_event_t *handle) { if (handle->fd == -1) return; if (port_associate(handle->loop->fs_fd, PORT_SOURCE_FILE, (uintptr_t) &handle->fo, FILE_ATTRIB | FILE_MODIFIED, handle) == -1) { uv__set_sys_error(handle->loop, errno); } handle->fd = PORT_LOADED; }
static int uv__bind(uv_tcp_t* handle, int domain, struct sockaddr* addr, int addrsize) { DWORD err; int r; SOCKET sock; if (handle->socket == INVALID_SOCKET) { sock = socket(domain, SOCK_STREAM, 0); if (sock == INVALID_SOCKET) { uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } if (uv_tcp_set_socket(handle->loop, handle, sock, 0) == -1) { closesocket(sock); return -1; } } r = bind(handle->socket, addr, addrsize); if (r == SOCKET_ERROR) { err = WSAGetLastError(); if (err == WSAEADDRINUSE) { /* Some errors are not to be reported until connect() or listen() */ handle->bind_error = err; handle->flags |= UV_HANDLE_BIND_ERROR; } else { uv__set_sys_error(handle->loop, err); return -1; } } handle->flags |= UV_HANDLE_BOUND; return 0; }
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) { struct winsize ws; if (ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws) < 0) { uv__set_sys_error(tty->loop, errno); return -1; } *width = ws.ws_col; *height = ws.ws_row; return 0; }
int uv__tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) { if (uv_allow_ipv6) { handle->flags |= UV_HANDLE_IPV6; return uv__bind(handle, AF_INET6, (struct sockaddr*)&addr, sizeof(struct sockaddr_in6)); } else { uv__set_sys_error(handle->loop, WSAEAFNOSUPPORT); return -1; } }
int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb, uv_read_cb read_cb) { uv_loop_t* loop = handle->loop; if (!(handle->flags & UV_HANDLE_CONNECTION)) { uv__set_sys_error(loop, WSAEINVAL); return -1; } if (handle->flags & UV_HANDLE_READING) { uv__set_sys_error(loop, WSAEALREADY); return -1; } if (handle->flags & UV_HANDLE_EOF) { uv__set_sys_error(loop, WSAESHUTDOWN); return -1; } handle->flags |= UV_HANDLE_READING; handle->read_cb = read_cb; handle->alloc_cb = alloc_cb; INCREASE_ACTIVE_COUNT(loop, handle); /* If reading was stopped and then started again, there could still be a */ /* read request pending. */ if (!(handle->flags & UV_HANDLE_READ_PENDING)) { if (handle->flags & UV_HANDLE_EMULATE_IOCP && !handle->read_req.event_handle) { handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL); if (!handle->read_req.event_handle) { uv_fatal_error(GetLastError(), "CreateEvent"); } } uv_tcp_queue_read(loop, handle); } return 0; }
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { int r; if (tcp->delayed_error) { uv__set_sys_error(tcp->loop, tcp->delayed_error); return -1; } if (tcp->fd < 0) { if ((tcp->fd = uv__socket(AF_INET, SOCK_STREAM, 0)) == -1) { uv__set_sys_error(tcp->loop, errno); return -1; } if (uv__stream_open((uv_stream_t*)tcp, tcp->fd, UV_READABLE)) { uv__close(tcp->fd); tcp->fd = -1; return -1; } } assert(tcp->fd >= 0); r = listen(tcp->fd, backlog); if (r < 0) { uv__set_sys_error(tcp->loop, errno); return -1; } tcp->connection_cb = cb; /* Start listening for connections. */ ev_io_set(&tcp->read_watcher, tcp->fd, EV_READ); ev_set_cb(&tcp->read_watcher, uv__server_io); ev_io_start(tcp->loop->ev, &tcp->read_watcher); return 0; }
static int uv__fast_poll_cancel_poll_reqs(uv_loop_t* loop, uv_poll_t* handle) { AFD_POLL_INFO afd_poll_info; DWORD result; HANDLE event; OVERLAPPED overlapped; event = CreateEvent(NULL, TRUE, FALSE, NULL); if (event == NULL) { uv__set_sys_error(loop, GetLastError()); return -1; } afd_poll_info.Exclusive = TRUE; afd_poll_info.NumberOfHandles = 1; afd_poll_info.Timeout.QuadPart = INT64_MAX; afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket; afd_poll_info.Handles[0].Status = 0; afd_poll_info.Handles[0].Events = AFD_POLL_ALL; memset(&overlapped, 0, sizeof overlapped); overlapped.hEvent = (HANDLE) ((uintptr_t) event & 1); result = uv_msafd_poll(handle->socket, &afd_poll_info, &overlapped); if (result == SOCKET_ERROR) { DWORD error = WSAGetLastError(); if (error != WSA_IO_PENDING) { uv__set_sys_error(loop, WSAGetLastError()); CloseHandle(event); return -1; } } CloseHandle(event); return 0; }
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { static int single_accept = -1; if (tcp->delayed_error) return uv__set_sys_error(tcp->loop, tcp->delayed_error); if (single_accept == -1) { const char* val = getenv("UV_TCP_SINGLE_ACCEPT"); single_accept = (val == NULL) || (atoi(val) != 0); /* on by default */ } if (!single_accept) goto no_single_accept; tcp->idle_handle = malloc(sizeof(*tcp->idle_handle)); if (tcp->idle_handle == NULL) return uv__set_sys_error(tcp->loop, ENOMEM); if (uv_idle_init(tcp->loop, tcp->idle_handle)) abort(); tcp->flags |= UV_TCP_SINGLE_ACCEPT; no_single_accept: if (maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE)) return -1; if (listen(tcp->fd, backlog)) return uv__set_sys_error(tcp->loop, errno); tcp->connection_cb = cb; /* Start listening for connections. */ uv__io_set(&tcp->read_watcher, uv__server_io, tcp->fd, UV__IO_READ); uv__io_start(tcp->loop, &tcp->read_watcher); return 0; }
int uv_tty_set_mode(uv_tty_t* tty, int mode) { struct termios raw; int fd; fd = uv__stream_fd(tty); if (mode && tty->mode == 0) { /* on */ if (tcgetattr(fd, &tty->orig_termios)) { goto fatal; } /* This is used for uv_tty_reset_mode() */ if (orig_termios_fd == -1) { orig_termios = tty->orig_termios; orig_termios_fd = fd; } raw = tty->orig_termios; raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); raw.c_oflag |= (ONLCR); raw.c_cflag |= (CS8); raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* Put terminal in raw mode after draining */ if (tcsetattr(fd, TCSADRAIN, &raw)) { goto fatal; } tty->mode = 1; return 0; } else if (mode == 0 && tty->mode) { /* off */ /* Put terminal in original mode after flushing */ if (tcsetattr(fd, TCSAFLUSH, &tty->orig_termios)) { goto fatal; } tty->mode = 0; return 0; } fatal: uv__set_sys_error(tty->loop, errno); return -1; }
void uv__stream_destroy(uv_stream_t* stream) { uv_write_t* req; ngx_queue_t* q; assert(!uv__io_active(&stream->io_watcher, UV__POLLIN | UV__POLLOUT)); assert(stream->flags & UV_CLOSED); if (stream->connect_req) { uv__req_unregister(stream->loop, stream->connect_req); uv__set_artificial_error(stream->loop, UV_ECANCELED); stream->connect_req->cb(stream->connect_req, -1); stream->connect_req = NULL; } while (!ngx_queue_empty(&stream->write_queue)) { q = ngx_queue_head(&stream->write_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); uv__req_unregister(stream->loop, req); if (req->bufs != req->bufsml) free(req->bufs); if (req->cb) { uv__set_artificial_error(req->handle->loop, UV_ECANCELED); req->cb(req, -1); } } while (!ngx_queue_empty(&stream->write_completed_queue)) { q = ngx_queue_head(&stream->write_completed_queue); ngx_queue_remove(q); req = ngx_queue_data(q, uv_write_t, queue); uv__req_unregister(stream->loop, req); if (req->cb) { uv__set_sys_error(stream->loop, req->error); req->cb(req, req->error ? -1 : 0); } } if (stream->shutdown_req) { uv__req_unregister(stream->loop, stream->shutdown_req); uv__set_artificial_error(stream->loop, UV_ECANCELED); stream->shutdown_req->cb(stream->shutdown_req, -1); stream->shutdown_req = NULL; } }
int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb, uv_after_work_cb after_work_cb) { if (work_cb == NULL) return uv__set_artificial_error(loop, UV_EINVAL); uv_work_req_init(loop, req, work_cb, after_work_cb); if (!QueueUserWorkItem(&uv_work_thread_proc, req, WT_EXECUTELONGFUNCTION)) { uv__set_sys_error(loop, GetLastError()); return -1; } uv__req_register(loop, req); return 0; }
int uv_tcp_getpeername(uv_tcp_t* handle, struct sockaddr* name, int* namelen) { uv_loop_t* loop = handle->loop; int result; if (!(handle->flags & UV_HANDLE_BOUND)) { uv__set_sys_error(loop, WSAEINVAL); return -1; } if (handle->flags & UV_HANDLE_BIND_ERROR) { uv__set_sys_error(loop, handle->bind_error); return -1; } result = getpeername(handle->socket, name, namelen); if (result != 0) { uv__set_sys_error(loop, WSAGetLastError()); return -1; } return 0; }
int uv__tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) { if (setsockopt(handle->fd, SOL_SOCKET, SO_KEEPALIVE, &enable, sizeof enable) == -1) { uv__set_sys_error(handle->loop, errno); return -1; } #ifdef TCP_KEEPIDLE if (enable && setsockopt(handle->fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof delay) == -1) { uv__set_sys_error(handle->loop, errno); return -1; } #endif /* Solaris/SmartOS, if you don't support keep-alive, * then don't advertise it in your system headers... */ #if defined(TCP_KEEPALIVE) && !defined(__sun) if (enable && setsockopt(handle->fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof delay) == -1) { uv__set_sys_error(handle->loop, errno); return -1; } #endif return 0; }
int uv_udp_set_membership(uv_udp_t* handle, const char* multicast_addr, const char* interface_addr, uv_membership membership) { int optname; struct ip_mreq mreq; /* If the socket is unbound, bind to inaddr_any. */ if (!(handle->flags & UV_HANDLE_BOUND) && uv_udp_bind(handle, uv_addr_ip4_any_, 0) < 0) { return -1; } if (handle->flags & UV_HANDLE_IPV6) { uv__set_artificial_error(handle->loop, UV_ENOSYS); return -1; } memset(&mreq, 0, sizeof mreq); if (interface_addr) { mreq.imr_interface.s_addr = inet_addr(interface_addr); } else { mreq.imr_interface.s_addr = htonl(INADDR_ANY); } mreq.imr_multiaddr.s_addr = inet_addr(multicast_addr); switch (membership) { case UV_JOIN_GROUP: optname = IP_ADD_MEMBERSHIP; break; case UV_LEAVE_GROUP: optname = IP_DROP_MEMBERSHIP; break; default: uv__set_artificial_error(handle->loop, UV_EFAULT); return -1; } if (setsockopt(handle->socket, IPPROTO_IP, optname, (char*) &mreq, sizeof mreq) == SOCKET_ERROR) { uv__set_sys_error(handle->loop, WSAGetLastError()); return -1; } return 0; }
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle, uv_udp_send_t* req) { assert(handle->type == UV_UDP); if (req->cb) { if (REQ_SUCCESS(req)) { req->cb(req, 0); } else { uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req)); req->cb(req, -1); } } DECREASE_PENDING_REQ_COUNT(handle); }
static int uv_getaddrinfo_done(eio_req* req) { uv_getaddrinfo_t* handle = req->data; struct addrinfo *res = handle->res; #if __sun size_t hostlen = strlen(handle->hostname); #endif handle->res = NULL; uv_unref(handle->loop); free(handle->hints); free(handle->service); free(handle->hostname); if (handle->retcode == 0) { /* OK */ #if EAI_NODATA /* FreeBSD deprecated EAI_NODATA */ } else if (handle->retcode == EAI_NONAME || handle->retcode == EAI_NODATA) { #else } else if (handle->retcode == EAI_NONAME) { #endif uv__set_sys_error(handle->loop, ENOENT); /* FIXME compatibility hack */ #if __sun } else if (handle->retcode == EAI_MEMORY && hostlen >= MAXHOSTNAMELEN) { uv__set_sys_error(handle->loop, ENOENT); #endif } else { handle->loop->last_err.code = UV_EADDRINFO; handle->loop->last_err.sys_errno_ = handle->retcode; } handle->cb(handle, handle->retcode, res); return 0; }
static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error, uv_buf_t buf) { /* If there is an eof timer running, we don't need it any more, */ /* so discard it. */ eof_timer_destroy(handle); uv_read_stop((uv_stream_t*) handle); uv__set_sys_error(loop, error); if (handle->read2_cb) { handle->read2_cb(handle, -1, buf, UV_UNKNOWN_HANDLE); } else { handle->read_cb((uv_stream_t*)handle, -1, buf); } }
void uv__server_io(EV_P_ ev_io* watcher, int revents) { int fd; struct sockaddr_storage addr; uv_stream_t* stream = watcher->data; assert(watcher == &stream->read_watcher || watcher == &stream->write_watcher); assert(revents == EV_READ); assert(!(stream->flags & UV_CLOSING)); if (stream->accepted_fd >= 0) { ev_io_stop(EV_A, &stream->read_watcher); return; } /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (stream->fd != -1) { assert(stream->accepted_fd < 0); fd = uv__accept(stream->fd, (struct sockaddr*)&addr, sizeof addr); if (fd < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* No problem. */ return; } else if (errno == EMFILE) { /* TODO special trick. unlock reserved socket, accept, close. */ return; } else if (errno == ECONNABORTED) { /* ignore */ continue; } else { uv__set_sys_error(stream->loop, errno); stream->connection_cb((uv_stream_t*)stream, -1); } } else { stream->accepted_fd = fd; stream->connection_cb((uv_stream_t*)stream, 0); if (stream->accepted_fd >= 0) { /* The user hasn't yet accepted called uv_accept() */ ev_io_stop(stream->loop->ev, &stream->read_watcher); return; } } } }
void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle, uv_connect_t* req) { assert(handle->type == UV_NAMED_PIPE); if (req->cb) { if (REQ_SUCCESS(req)) { uv_pipe_connection_init(handle); ((uv_connect_cb)req->cb)(req, 0); } else { uv__set_sys_error(loop, GET_REQ_ERROR(req)); ((uv_connect_cb)req->cb)(req, -1); } } DECREASE_PENDING_REQ_COUNT(handle); }
int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) { CONSOLE_SCREEN_BUFFER_INFO info; if (!GetConsoleScreenBufferInfo(tty->handle, &info)) { uv__set_sys_error(tty->loop, GetLastError()); return -1; } EnterCriticalSection(&uv_tty_output_lock); uv_tty_update_virtual_window(&info); LeaveCriticalSection(&uv_tty_output_lock); *width = uv_tty_virtual_width; *height = uv_tty_virtual_height; return 0; }
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) { int status; int sys_error; if (handle->flags & UV_HANDLE_CONNECTION && handle->flags & UV_HANDLE_SHUTTING && !(handle->flags & UV_HANDLE_SHUT) && handle->write_reqs_pending == 0) { if (shutdown(handle->socket, SD_SEND) != SOCKET_ERROR) { status = 0; handle->flags |= UV_HANDLE_SHUT; } else { status = -1; sys_error = WSAGetLastError(); } if (handle->shutdown_req->cb) { if (status == -1) { uv__set_sys_error(loop, sys_error); } handle->shutdown_req->cb(handle->shutdown_req, status); } DECREASE_PENDING_REQ_COUNT(handle); return; } if (handle->flags & UV_HANDLE_CLOSING && handle->reqs_pending == 0) { assert(!(handle->flags & UV_HANDLE_CLOSED)); handle->flags |= UV_HANDLE_CLOSED; if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->accept_reqs) { free(handle->accept_reqs); handle->accept_reqs = NULL; } if (handle->close_cb) { handle->close_cb((uv_handle_t*)handle); } active_tcp_streams--; uv_unref(loop); } }
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info) { SOCKET socket = WSASocketW(AF_INET, SOCK_STREAM, IPPROTO_IP, socket_protocol_info, 0, WSA_FLAG_OVERLAPPED); if (socket == INVALID_SOCKET) { uv__set_sys_error(tcp->loop, WSAGetLastError()); return -1; } tcp->flags |= UV_HANDLE_BOUND; return uv_tcp_set_socket(tcp->loop, tcp, socket, 1); }
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle, const char* filename, uv_fs_event_cb cb, int flags) { int fd; #if defined(__APPLE__) struct stat statbuf; #endif /* defined(__APPLE__) */ /* We don't support any flags yet. */ assert(!flags); /* TODO open asynchronously - but how do we report back errors? */ if ((fd = open(filename, O_RDONLY)) == -1) { uv__set_sys_error(loop, errno); return -1; } uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); uv__handle_start(handle); /* FIXME shouldn't start automatically */ handle->filename = strdup(filename); handle->fflags = 0; handle->cb = cb; handle->fd = fd; #if defined(__APPLE__) /* Nullify field to perform checks later */ handle->cf_eventstream = NULL; if (fstat(fd, &statbuf)) goto fallback; /* FSEvents works only with directories */ if (!(statbuf.st_mode & S_IFDIR)) goto fallback; return uv__fsevents_init(handle); fallback: #endif /* defined(__APPLE__) */ uv__fs_event_start(handle); return 0; }
void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* raw_req) { uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req; assert(handle->type == UV_TCP); /* If handle->accepted_socket is not a valid socket, then */ /* uv_queue_accept must have failed. This is a serious error. We stop */ /* accepting connections and report this error to the connection */ /* callback. */ if (req->accept_socket == INVALID_SOCKET) { if (handle->flags & UV_HANDLE_LISTENING) { handle->flags &= ~UV_HANDLE_LISTENING; DECREASE_ACTIVE_COUNT(loop, handle); if (handle->connection_cb) { uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req)); handle->connection_cb((uv_stream_t*)handle, -1); } } } else if (REQ_SUCCESS(req) && setsockopt(req->accept_socket, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (char*)&handle->socket, sizeof(handle->socket)) == 0) { req->next_pending = handle->pending_accepts; handle->pending_accepts = req; /* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */ if (handle->connection_cb) { handle->connection_cb((uv_stream_t*)handle, 0); } } else { /* Error related to accepted socket is ignored because the server */ /* socket may still be healthy. If the server socket is broken /* uv_queue_accept will detect it. */ closesocket(req->accept_socket); req->accept_socket = INVALID_SOCKET; if (handle->flags & UV_HANDLE_LISTENING) { uv_tcp_queue_accept(handle, req); } } DECREASE_PENDING_REQ_COUNT(handle); }
int uv_write2(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[], int bufcnt, uv_stream_t* send_handle, uv_write_cb cb) { uv_loop_t* loop = handle->loop; if (!(handle->flags & UV_HANDLE_WRITABLE)) { uv__set_artificial_error(loop, UV_EPIPE); return -1; } switch (handle->type) { case UV_NAMED_PIPE: return uv_pipe_write2(loop, req, (uv_pipe_t*) handle, bufs, bufcnt, send_handle, cb); default: assert(0); uv__set_sys_error(loop, WSAEINVAL); return -1; } }
static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) { int sockfd; if (uv__stream_fd(handle) != -1) return 0; sockfd = uv__socket(domain, SOCK_STREAM, 0); if (sockfd == -1) return uv__set_sys_error(handle->loop, errno); if (uv__stream_open((uv_stream_t*)handle, sockfd, flags)) { close(sockfd); return -1; } return 0; }
static int init_inotify(uv_loop_t* loop) { if (loop->inotify_fd != -1) return 0; loop->inotify_fd = new_inotify_fd(); if (loop->inotify_fd == -1) { uv__set_sys_error(loop, errno); return -1; } ev_io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd, EV_READ); ev_io_start(loop->ev, &loop->inotify_read_watcher); return 0; }
void uv__server_io(uv_loop_t* loop, uv__io_t* w, int events) { int fd; uv_stream_t* stream = container_of(w, uv_stream_t, read_watcher); assert(events == UV__IO_READ); assert(!(stream->flags & UV_CLOSING)); if (stream->accepted_fd >= 0) { uv__io_stop(loop, &stream->read_watcher); return; } /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (stream->fd != -1) { assert(stream->accepted_fd < 0); fd = uv__accept(stream->fd); if (fd < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* No problem. */ return; } else if (errno == EMFILE) { /* TODO special trick. unlock reserved socket, accept, close. */ return; } else if (errno == ECONNABORTED) { /* ignore */ continue; } else { uv__set_sys_error(stream->loop, errno); stream->connection_cb((uv_stream_t*)stream, -1); } } else { stream->accepted_fd = fd; stream->connection_cb((uv_stream_t*)stream, 0); if (stream->accepted_fd >= 0) { /* The user hasn't yet accepted called uv_accept() */ uv__io_stop(stream->loop, &stream->read_watcher); return; } } } }