int vr_listen_accept(vr_listen *vlisten) { rstatus_t status; int sd; ASSERT(vlisten->sd > 0); log_debug(LOG_DEBUG,"client_accept"); for (;;) { sd = accept(vlisten->sd, NULL, NULL); if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", vlisten->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", vlisten->sd); return -1; } if (errno == EMFILE || errno == ENFILE) { log_debug(LOG_CRIT, "accept on p %d " "used connections %"PRIu32" max client connections %u " "curr client connections %"PRIu32" failed: %s", vlisten->sd, conn_ncurr_conn(), server.maxclients, conn_ncurr_cconn(), strerror(errno)); return -1; } log_error("accept on p %d failed: %s", vlisten->sd, strerror(errno)); return -1; } break; } if (conn_ncurr_cconn() >= server.maxclients) { log_debug(LOG_CRIT, "client connections %"PRIu32" exceed limit %"PRIu32, conn_ncurr_cconn(), server.maxclients); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } update_stats_add(master.vel.stats, rejected_conn, 1); return -1; } return sd; }
//接收到客户端连接后,返回新的fd,为该fd创建新的conn来读取数据 static rstatus_t proxy_accept(struct context *ctx, struct conn *p) //p对应的是proxy conn 也就是用于监听客户端的conn信息 { rstatus_t status; struct conn *c; int sd; struct server_pool *pool = p->owner; ASSERT(p->proxy && !p->client); ASSERT(p->sd > 0); ASSERT(p->recv_active && p->recv_ready); for (;;) { sd = accept(p->sd, NULL, NULL); //获取到新的客户端连接,产生新的fd if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd); p->recv_ready = 0; return NC_OK; } /* * Workaround of https://github.com/twitter/twemproxy/issues/97 * * We should never reach here because the check for conn_ncurr_cconn() * against ctx->max_ncconn should catch this earlier in the cycle. * If we reach here ignore EMFILE/ENFILE, return NC_OK will enable * the server continue to run instead of close the server socket * * The right solution however, is on EMFILE/ENFILE to mask out IN * event on the proxy and mask it back in when some existing * connections gets closed */ if (errno == EMFILE || errno == ENFILE) { log_debug(LOG_CRIT, "accept on p %d with max fds %"PRIu32" " "used connections %"PRIu32" max client connections %"PRIu32" " "curr client connections %"PRIu32" failed: %s", p->sd, ctx->max_nfd, conn_ncurr_conn(), ctx->max_ncconn, conn_ncurr_cconn(), strerror(errno)); p->recv_ready = 0; return NC_OK; } log_error("accept on p %d failed: %s", p->sd, strerror(errno)); return NC_ERROR; } break; } if (conn_ncurr_cconn() >= ctx->max_ncconn) { log_debug(LOG_CRIT, "client connections %"PRIu32" exceed limit %"PRIu32, conn_ncurr_cconn(), ctx->max_ncconn); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_OK; } c = conn_get(p->owner, true, p->redis); if (c == NULL) { log_error("get conn for c %d from p %d failed: %s", sd, p->sd, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_ENOMEM; } c->sd = sd; stats_pool_incr(ctx, c->owner, client_connections); status = nc_set_nonblocking(c->sd); if (status < 0) { log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } if (pool->tcpkeepalive) { status = nc_set_tcpkeepalive(c->sd); if (status < 0) { log_warn("set tcpkeepalive on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } status = event_add_conn(ctx->evb, c); if (status < 0) { log_error("event add conn from p %d failed: %s", p->sd, strerror(errno)); c->close(ctx, c); return status; } log_debug(LOG_INFO, "accepted c %d on p %d from '%s'", c->sd, p->sd, nc_unresolve_peer_desc(c->sd)); return NC_OK; }
static rstatus_t proxy_accept(struct context *ctx, struct conn *p) { rstatus_t status; struct conn *c; int sd; struct sockaddr_storage addr; socklen_t addr_len; ASSERT(p->proxy && !p->client); ASSERT(p->sd > 0); ASSERT(p->recv_active && p->recv_ready); for (;;) { sd = accept(p->sd, NULL, NULL); if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd); p->recv_ready = 0; return NC_OK; } /* * FIXME: On EMFILE or ENFILE mask out IN event on the proxy; mask * it back in when some existing connection gets closed */ /* * Workaround of https://github.com/twitter/twemproxy/issues/97 * Just ignore EMFILE/ENFILE, return NC_OK will enable the server * continue to run instead of close the server socket */ if (errno == EMFILE || errno == ENFILE) { log_crit("accept on p %d failed: %s", p->sd, strerror(errno)); p->recv_ready = 0; log_crit("connections status: rlimit nofile %d, " "used connections: %d, max client connections %d, " "curr client connections %d", ctx->rlimit_nofile, conn_ncurr(), ctx->max_ncconn, conn_ncurr_cconn()); /* Since we maintain a safe max_ncconn and check * it after every accept, we should not reach here. * So we will panic after this log */ log_panic("HIT MAX OPEN FILES, IT SHOULD NOT HAPPEN. ABORT."); return NC_OK; } log_error("accept on p %d failed: %s", p->sd, strerror(errno)); return NC_ERROR; } addr_len = sizeof(addr); if (getsockname(sd, (struct sockaddr *)&addr, &addr_len)) { log_error("getsockname on p %d failed: %s", p->sd, strerror(errno)); close(sd); continue; } break; } if (conn_ncurr_cconn() >= ctx->max_ncconn) { stats_pool_incr(ctx, p->owner, rejected_connections); log_crit("client connections %d exceed limit %d", conn_ncurr_cconn(), ctx->max_ncconn); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_OK; } c = conn_get(p->owner, true, p->redis); if (c == NULL) { log_error("get conn for c %d from p %d failed: %s", sd, p->sd, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_ENOMEM; } c->sd = sd; c->family = addr.ss_family; c->addrlen = addr_len; c->ss = addr; c->addr = (struct sockaddr *)&c->ss; stats_pool_incr(ctx, c->owner, client_connections); status = nc_set_nonblocking(c->sd); if (status < 0) { log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } status = event_add_conn(ctx->evb, c); if (status < 0) { log_error("event add conn from p %d failed: %s", p->sd, strerror(errno)); c->close(ctx, c); return status; } log_notice("accepted c %d on p %d from '%s'", c->sd, p->sd, nc_unresolve_peer_desc(c->sd)); return NC_OK; }