int vr_listen_accept(vr_listen *vlisten) { rstatus_t status; int sd; ASSERT(vlisten->sd > 0); log_debug(LOG_DEBUG,"client_accept"); for (;;) { sd = accept(vlisten->sd, NULL, NULL); if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", vlisten->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", vlisten->sd); return -1; } if (errno == EMFILE || errno == ENFILE) { log_debug(LOG_CRIT, "accept on p %d " "used connections %"PRIu32" max client connections %u " "curr client connections %"PRIu32" failed: %s", vlisten->sd, conn_ncurr_conn(), server.maxclients, conn_ncurr_cconn(), strerror(errno)); return -1; } log_error("accept on p %d failed: %s", vlisten->sd, strerror(errno)); return -1; } break; } if (conn_ncurr_cconn() >= server.maxclients) { log_debug(LOG_CRIT, "client connections %"PRIu32" exceed limit %"PRIu32, conn_ncurr_cconn(), server.maxclients); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } update_stats_add(master.vel.stats, rejected_conn, 1); return -1; } return sd; }
static rstatus_t stats_add_header(struct stats *st) { rstatus_t status; struct stats_buffer *buf; int64_t cur_ts, uptime; buf = &st->buf; buf->data[0] = '{'; buf->len = 1; cur_ts = (int64_t)time(NULL); uptime = cur_ts - st->start_ts; status = stats_add_string(st, &st->service_str, &st->service); if (status != NC_OK) { return status; } status = stats_add_string(st, &st->source_str, &st->source); if (status != NC_OK) { return status; } status = stats_add_string(st, &st->version_str, &st->version); if (status != NC_OK) { return status; } status = stats_add_num(st, &st->uptime_str, uptime); if (status != NC_OK) { return status; } status = stats_add_num(st, &st->timestamp_str, cur_ts); if (status != NC_OK) { return status; } status = stats_add_num(st, &st->ntotal_conn_str, conn_ntotal_conn()); if (status != NC_OK) { return status; } status = stats_add_num(st, &st->ncurr_conn_str, conn_ncurr_conn()); if (status != NC_OK) { return status; } return NC_OK; }
//接收到客户端连接后,返回新的fd,为该fd创建新的conn来读取数据 static rstatus_t proxy_accept(struct context *ctx, struct conn *p) //p对应的是proxy conn 也就是用于监听客户端的conn信息 { rstatus_t status; struct conn *c; int sd; struct server_pool *pool = p->owner; ASSERT(p->proxy && !p->client); ASSERT(p->sd > 0); ASSERT(p->recv_active && p->recv_ready); for (;;) { sd = accept(p->sd, NULL, NULL); //获取到新的客户端连接,产生新的fd if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd); p->recv_ready = 0; return NC_OK; } /* * Workaround of https://github.com/twitter/twemproxy/issues/97 * * We should never reach here because the check for conn_ncurr_cconn() * against ctx->max_ncconn should catch this earlier in the cycle. * If we reach here ignore EMFILE/ENFILE, return NC_OK will enable * the server continue to run instead of close the server socket * * The right solution however, is on EMFILE/ENFILE to mask out IN * event on the proxy and mask it back in when some existing * connections gets closed */ if (errno == EMFILE || errno == ENFILE) { log_debug(LOG_CRIT, "accept on p %d with max fds %"PRIu32" " "used connections %"PRIu32" max client connections %"PRIu32" " "curr client connections %"PRIu32" failed: %s", p->sd, ctx->max_nfd, conn_ncurr_conn(), ctx->max_ncconn, conn_ncurr_cconn(), strerror(errno)); p->recv_ready = 0; return NC_OK; } log_error("accept on p %d failed: %s", p->sd, strerror(errno)); return NC_ERROR; } break; } if (conn_ncurr_cconn() >= ctx->max_ncconn) { log_debug(LOG_CRIT, "client connections %"PRIu32" exceed limit %"PRIu32, conn_ncurr_cconn(), ctx->max_ncconn); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_OK; } c = conn_get(p->owner, true, p->redis); if (c == NULL) { log_error("get conn for c %d from p %d failed: %s", sd, p->sd, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_ENOMEM; } c->sd = sd; stats_pool_incr(ctx, c->owner, client_connections); status = nc_set_nonblocking(c->sd); if (status < 0) { log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } if (pool->tcpkeepalive) { status = nc_set_tcpkeepalive(c->sd); if (status < 0) { log_warn("set tcpkeepalive on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } status = event_add_conn(ctx->evb, c); if (status < 0) { log_error("event add conn from p %d failed: %s", p->sd, strerror(errno)); c->close(ctx, c); return status; } log_debug(LOG_INFO, "accepted c %d on p %d from '%s'", c->sd, p->sd, nc_unresolve_peer_desc(c->sd)); return NC_OK; }