/* TODO merge with uv__server_io()? */ void uv__pipe_accept(EV_P_ ev_io* watcher, int revents) { struct sockaddr_un sun; uv_pipe_t* pipe; int saved_errno; int sockfd; saved_errno = errno; pipe = watcher->data; assert(pipe->type == UV_NAMED_PIPE); assert(pipe->pipe_fname != NULL); sockfd = uv__accept(pipe->fd, (struct sockaddr *)&sun, sizeof sun); if (sockfd == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK) { assert(0 && "EAGAIN on uv__accept(pipefd)"); } else { uv_err_new(pipe->loop, errno); } } else { pipe->accepted_fd = sockfd; pipe->connection_cb((uv_stream_t*)pipe, 0); if (pipe->accepted_fd == sockfd) { /* The user hasn't yet accepted called uv_accept() */ ev_io_stop(pipe->loop->ev, &pipe->read_watcher); } } errno = saved_errno; }
/* TODO merge with uv__server_io()? */ static void uv__pipe_accept(uv_loop_t* loop, uv__io_t* w, int events) { uv_pipe_t* pipe; int saved_errno; int sockfd; saved_errno = errno; pipe = container_of(w, uv_pipe_t, read_watcher); assert(pipe->type == UV_NAMED_PIPE); sockfd = uv__accept(pipe->fd); if (sockfd == -1) { if (errno != EAGAIN && errno != EWOULDBLOCK) { uv__set_sys_error(pipe->loop, errno); pipe->connection_cb((uv_stream_t*)pipe, -1); } } else { pipe->accepted_fd = sockfd; pipe->connection_cb((uv_stream_t*)pipe, 0); if (pipe->accepted_fd == sockfd) { /* The user hasn't called uv_accept() yet */ uv__io_stop(pipe->loop, &pipe->read_watcher); } } errno = saved_errno; }
/* Implements a best effort approach to mitigating accept() EMFILE errors. * We have a spare file descriptor stashed away that we close to get below * the EMFILE limit. Next, we accept all pending connections and close them * immediately to signal the clients that we're overloaded - and we are, but * we still keep on trucking. * * There is one caveat: it's not reliable in a multi-threaded environment. * The file descriptor limit is per process. Our party trick fails if another * thread opens a file or creates a socket in the time window between us * calling close() and accept(). */ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) { int fd; int r; if (loop->emfile_fd == -1) return -1; close(loop->emfile_fd); for (;;) { fd = uv__accept(accept_fd); if (fd != -1) { close(fd); continue; } if (errno == EINTR) continue; if (errno == EAGAIN || errno == EWOULDBLOCK) r = 0; else r = -1; loop->emfile_fd = uv__open_cloexec("/", O_RDONLY); return r; } }
/* TODO merge with uv__server_io()? */ void uv__pipe_accept(EV_P_ ev_io* watcher, int revents) { struct sockaddr_un saddr; uv_pipe_t* pipe; int saved_errno; int sockfd; saved_errno = errno; pipe = watcher->data; assert(pipe->type == UV_NAMED_PIPE); sockfd = uv__accept(pipe->fd, (struct sockaddr *)&saddr, sizeof saddr); if (sockfd == -1) { if (errno != EAGAIN && errno != EWOULDBLOCK) { uv__set_sys_error(pipe->loop, errno); pipe->connection_cb((uv_stream_t*)pipe, -1); } } else { pipe->accepted_fd = sockfd; pipe->connection_cb((uv_stream_t*)pipe, 0); if (pipe->accepted_fd == sockfd) { /* The user hasn't called uv_accept() yet */ ev_io_stop(pipe->loop->ev, &pipe->read_watcher); } } errno = saved_errno; }
void uv__server_io(EV_P_ ev_io* watcher, int revents) { int fd; struct sockaddr_storage addr; uv_stream_t* stream = watcher->data; assert(watcher == &stream->read_watcher || watcher == &stream->write_watcher); assert(revents == EV_READ); assert(!(stream->flags & UV_CLOSING)); if (stream->accepted_fd >= 0) { ev_io_stop(EV_A, &stream->read_watcher); return; } /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (stream->fd != -1) { assert(stream->accepted_fd < 0); fd = uv__accept(stream->fd, (struct sockaddr*)&addr, sizeof addr); if (fd < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* No problem. */ return; } else if (errno == EMFILE) { /* TODO special trick. unlock reserved socket, accept, close. */ return; } else if (errno == ECONNABORTED) { /* ignore */ continue; } else { uv__set_sys_error(stream->loop, errno); stream->connection_cb((uv_stream_t*)stream, -1); } } else { stream->accepted_fd = fd; stream->connection_cb((uv_stream_t*)stream, 0); if (stream->accepted_fd >= 0) { /* The user hasn't yet accepted called uv_accept() */ ev_io_stop(stream->loop->ev, &stream->read_watcher); return; } } } }
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { uv_stream_t* stream; int err; stream = container_of(w, uv_stream_t, io_watcher); assert(events == UV__POLLIN); assert(stream->accepted_fd == -1); assert(!(stream->flags & UV_CLOSING)); uv__io_start(stream->loop, &stream->io_watcher, UV__POLLIN); /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (uv__stream_fd(stream) != -1) { assert(stream->accepted_fd == -1); set_errno(0); err = uv__accept(uv__stream_fd(stream)); if (err < 0) { if (err == -EAGAIN || err == -EWOULDBLOCK) return; /* Not an error. */ if (err == -ECONNABORTED) continue; /* Ignore. Nothing we can do about that. */ stream->connection_cb(stream, err); continue; } stream->accepted_fd = err; stream->connection_cb(stream, 0); if (stream->accepted_fd != -1) { /* The user hasn't yet accepted called uv_accept() */ uv__io_stop(loop, &stream->io_watcher, UV__POLLIN); return; } /* done accept for mbed */ break; } }
void uv__server_io(uv_loop_t* loop, uv__io_t* w, int events) { int fd; uv_stream_t* stream = container_of(w, uv_stream_t, read_watcher); assert(events == UV__IO_READ); assert(!(stream->flags & UV_CLOSING)); if (stream->accepted_fd >= 0) { uv__io_stop(loop, &stream->read_watcher); return; } /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (stream->fd != -1) { assert(stream->accepted_fd < 0); fd = uv__accept(stream->fd); if (fd < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* No problem. */ return; } else if (errno == EMFILE) { /* TODO special trick. unlock reserved socket, accept, close. */ return; } else if (errno == ECONNABORTED) { /* ignore */ continue; } else { uv__set_sys_error(stream->loop, errno); stream->connection_cb((uv_stream_t*)stream, -1); } } else { stream->accepted_fd = fd; stream->connection_cb((uv_stream_t*)stream, 0); if (stream->accepted_fd >= 0) { /* The user hasn't yet accepted called uv_accept() */ uv__io_stop(stream->loop, &stream->read_watcher); return; } } } }
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { static int use_emfile_trick = -1; uv_stream_t* stream; int fd; int r; stream = container_of(w, uv_stream_t, io_watcher); assert(events == UV__POLLIN); assert(stream->accepted_fd == -1); assert(!(stream->flags & UV_CLOSING)); if (stream->accepted_fd == -1) uv__io_start(stream->loop, &stream->io_watcher, UV__POLLIN); /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (uv__stream_fd(stream) != -1) { assert(stream->accepted_fd == -1); fd = uv__accept(uv__stream_fd(stream)); if (fd == -1) { switch (errno) { #if EWOULDBLOCK != EAGAIN case EWOULDBLOCK: #endif case EAGAIN: return; /* Not an error. */ case ECONNABORTED: continue; /* Ignore. */ case EMFILE: case ENFILE: if (use_emfile_trick == -1) { const char* val = getenv("UV_ACCEPT_EMFILE_TRICK"); use_emfile_trick = (val == NULL || atoi(val) != 0); } if (use_emfile_trick) { SAVE_ERRNO(r = uv__emfile_trick(loop, uv__stream_fd(stream))); if (r == 0) continue; } /* Fall through. */ default: uv__set_sys_error(loop, errno); stream->connection_cb(stream, -1); continue; } } stream->accepted_fd = fd; stream->connection_cb(stream, 0); if (stream->accepted_fd != -1) { /* The user hasn't yet accepted called uv_accept() */ uv__io_stop(loop, &stream->io_watcher, UV__POLLIN); return; } if (stream->type == UV_TCP && (stream->flags & UV_TCP_SINGLE_ACCEPT)) { /* Give other processes a chance to accept connections. */ struct timespec timeout = { 0, 1 }; nanosleep(&timeout, NULL); } } }
void uv__server_io(uv_loop_t* loop, uv__io_t* w, int events) { int fd; uv_stream_t* stream = container_of(w, uv_stream_t, read_watcher); assert(events == UV__IO_READ); assert(!(stream->flags & UV_CLOSING)); if (stream->accepted_fd >= 0) { uv__io_stop(loop, &stream->read_watcher); return; } /* connection_cb can close the server socket while we're * in the loop so check it on each iteration. */ while (stream->fd != -1) { assert(stream->accepted_fd < 0); fd = uv__accept(stream->fd); if (fd < 0) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* No problem. */ return; } else if (errno == EMFILE) { /* TODO special trick. unlock reserved socket, accept, close. */ return; } else if (errno == ECONNABORTED) { /* ignore */ continue; } else { uv__set_sys_error(stream->loop, errno); stream->connection_cb((uv_stream_t*)stream, -1); } } else { stream->accepted_fd = fd; stream->connection_cb(stream, 0); if (stream->accepted_fd != -1 || (stream->type == UV_TCP && stream->flags == UV_TCP_SINGLE_ACCEPT)) { /* The user hasn't yet accepted called uv_accept() */ uv__io_stop(stream->loop, &stream->read_watcher); break; } } } if (stream->fd != -1 && stream->accepted_fd == -1 && (stream->type == UV_TCP && stream->flags == UV_TCP_SINGLE_ACCEPT)) { /* Defer the next accept() syscall to the next event loop tick. * This lets us guarantee fair load balancing in in multi-process setups. * The problem is as follows: * * 1. Multiple processes listen on the same socket. * 2. The OS scheduler commonly gives preference to one process to * avoid task switches. * 3. That process therefore accepts most of the new connections, * leading to a (sometimes very) unevenly distributed load. * * Here is how we mitigate this issue: * * 1. Accept a connection. * 2. Start an idle watcher. * 3. Don't accept new connections until the idle callback fires. * * This works because the callback only fires when there have been * no recent events, i.e. none of the watched file descriptors have * recently been readable or writable. */ uv_tcp_t* tcp = (uv_tcp_t*) stream; uv_idle_start(tcp->idle_handle, uv__next_accept); } }