static void core_close(struct context *ctx, struct conn *conn) { rstatus_t status; char type, *addrstr; ASSERT(conn->sd > 0); if (conn->client) { type = 'c'; addrstr = nc_unresolve_peer_desc(conn->sd); } else { type = conn->proxy ? 'p' : 's'; addrstr = nc_unresolve_addr(conn->addr, conn->addrlen); } log_debug(LOG_NOTICE, "close %c %d '%s' on event %04"PRIX32" eof %d done " "%d rb %zu sb %zu%c %s", type, conn->sd, addrstr, conn->events, conn->eof, conn->done, conn->recv_bytes, conn->send_bytes, conn->err ? ':' : ' ', conn->err ? strerror(conn->err) : ""); status = event_del_conn(ctx->ep, conn); if (status < 0) { log_warn("event del conn e %d %c %d failed, ignored: %s", ctx->ep, type, conn->sd, strerror(errno)); } conn->close(ctx, conn); }
static void req_log(struct msg *req) { struct msg *rsp; /* peer message (response) */ int64_t req_time; /* time cost for this request */ char *peer_str; /* peer client ip:port */ uint32_t req_len, rsp_len; /* request and response length */ struct string *req_type; /* request type string */ if (log_loggable(LOG_NOTICE) == 0) { return; } /* a fragment? */ if (req->frag_id != 0 && req->frag_owner != req) { return; } /* conn close normally? */ if (req->mlen == 0) { return; } req_time = nc_usec_now() - req->start_ts; rsp = req->peer; req_len = req->mlen; rsp_len = (rsp != NULL) ? rsp->mlen : 0; if (req->key_end) { req->key_end[0] = '\0'; } /* * FIXME: add backend addr here * Maybe we can store addrstr just like server_pool in conn struct * when connections are resolved */ peer_str = nc_unresolve_peer_desc(req->owner->sd); req_type = msg_type_string(req->type); log_debug(LOG_NOTICE, "req %"PRIu64" done on c %d req_time %"PRIi64".%03"PRIi64 " msec type %.*s narg %"PRIu32" req_len %"PRIu32" rsp_len %"PRIu32 " key0 '%s' peer '%s' done %d error %d", req->id, req->owner->sd, req_time / 1000, req_time % 1000, req_type->len, req_type->data, req->narg, req_len, rsp_len, req->key_start, peer_str, req->done, req->error); }
static void req_log(struct msg *req) { struct msg *rsp; /* peer message (response) */ int64_t req_time; /* time cost for this request */ char *peer_str; /* peer client ip:port */ uint32_t req_len, rsp_len; /* request and response length */ struct string *req_type; /* request type string */ struct keypos *kpos; if (log_loggable(LOG_NOTICE) == 0) { return; } /* a fake request? */ if (req->owner == NULL) { return; } /* a fragment? */ if (req->frag_id != 0 && req->frag_owner != req) { return; } /* conn close normally? */ if (req->mlen == 0) { return; } /* * there is a race scenario where a requests comes in, the log level is not LOG_NOTICE, * and before the response arrives you modify the log level to LOG_NOTICE * using SIGTTIN OR SIGTTOU, then req_log() wouldn't have msg->start_ts set */ if (req->start_ts == 0) { return; } req_time = nc_usec_now() - req->start_ts; rsp = req->peer; req_len = req->mlen; rsp_len = (rsp != NULL) ? rsp->mlen : 0; if (array_n(req->keys) < 1) { return; } kpos = array_get(req->keys, 0); if (kpos->end != NULL) { *(kpos->end) = '\0'; } /* * FIXME: add backend addr here * Maybe we can store addrstr just like server_pool in conn struct * when connections are resolved */ peer_str = nc_unresolve_peer_desc(req->owner->sd); req_type = msg_type_string(req->type); log_debug(LOG_NOTICE, "req %"PRIu64" done on c %d req_time %"PRIi64".%03"PRIi64 " msec type %.*s narg %"PRIu32" req_len %"PRIu32" rsp_len %"PRIu32 " key0 '%s' peer '%s' done %d error %d", req->id, req->owner->sd, req_time / 1000, req_time % 1000, req_type->len, req_type->data, req->narg, req_len, rsp_len, kpos->start, peer_str, req->done, req->error); }
static rstatus_t proxy_accept(struct context *ctx, struct conn *p) { rstatus_t status; struct conn *c; int sd; ASSERT(p->proxy && !p->client); ASSERT(p->sd > 0); ASSERT(p->recv_active && p->recv_ready); for (;;) { sd = accept(p->sd, NULL, NULL); if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd); p->recv_ready = 0; return NC_OK; } /* * FIXME: On EMFILE or ENFILE mask out IN event on the proxy; mask * it back in when some existing connection gets closed */ log_error("accept on p %d failed: %s", p->sd, strerror(errno)); return NC_ERROR; } break; } c = conn_get(p->owner, true, p->redis); if (c == NULL) { log_error("get conn for c %d from p %d failed: %s", sd, p->sd, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_ENOMEM; } c->sd = sd; stats_pool_incr(ctx, c->owner, client_connections); status = nc_set_nonblocking(c->sd); if (status < 0) { log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } status = event_add_conn(ctx->center->ep, c); if (status < 0) { log_error("event add conn of c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } log_debug(LOG_NOTICE, "accepted c %d on p %d from '%s'", c->sd, p->sd, nc_unresolve_peer_desc(c->sd)); return NC_OK; }
//接收到客户端连接后,返回新的fd,为该fd创建新的conn来读取数据 static rstatus_t proxy_accept(struct context *ctx, struct conn *p) //p对应的是proxy conn 也就是用于监听客户端的conn信息 { rstatus_t status; struct conn *c; int sd; struct server_pool *pool = p->owner; ASSERT(p->proxy && !p->client); ASSERT(p->sd > 0); ASSERT(p->recv_active && p->recv_ready); for (;;) { sd = accept(p->sd, NULL, NULL); //获取到新的客户端连接,产生新的fd if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd); p->recv_ready = 0; return NC_OK; } /* * Workaround of https://github.com/twitter/twemproxy/issues/97 * * We should never reach here because the check for conn_ncurr_cconn() * against ctx->max_ncconn should catch this earlier in the cycle. * If we reach here ignore EMFILE/ENFILE, return NC_OK will enable * the server continue to run instead of close the server socket * * The right solution however, is on EMFILE/ENFILE to mask out IN * event on the proxy and mask it back in when some existing * connections gets closed */ if (errno == EMFILE || errno == ENFILE) { log_debug(LOG_CRIT, "accept on p %d with max fds %"PRIu32" " "used connections %"PRIu32" max client connections %"PRIu32" " "curr client connections %"PRIu32" failed: %s", p->sd, ctx->max_nfd, conn_ncurr_conn(), ctx->max_ncconn, conn_ncurr_cconn(), strerror(errno)); p->recv_ready = 0; return NC_OK; } log_error("accept on p %d failed: %s", p->sd, strerror(errno)); return NC_ERROR; } break; } if (conn_ncurr_cconn() >= ctx->max_ncconn) { log_debug(LOG_CRIT, "client connections %"PRIu32" exceed limit %"PRIu32, conn_ncurr_cconn(), ctx->max_ncconn); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_OK; } c = conn_get(p->owner, true, p->redis); if (c == NULL) { log_error("get conn for c %d from p %d failed: %s", sd, p->sd, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_ENOMEM; } c->sd = sd; stats_pool_incr(ctx, c->owner, client_connections); status = nc_set_nonblocking(c->sd); if (status < 0) { log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } if (pool->tcpkeepalive) { status = nc_set_tcpkeepalive(c->sd); if (status < 0) { log_warn("set tcpkeepalive on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } status = event_add_conn(ctx->evb, c); if (status < 0) { log_error("event add conn from p %d failed: %s", p->sd, strerror(errno)); c->close(ctx, c); return status; } log_debug(LOG_INFO, "accepted c %d on p %d from '%s'", c->sd, p->sd, nc_unresolve_peer_desc(c->sd)); return NC_OK; }
static rstatus_t proxy_accept(struct context *ctx, struct conn *p) { rstatus_t status; struct conn *c; int sd; struct sockaddr_storage addr; socklen_t addr_len; ASSERT(p->proxy && !p->client); ASSERT(p->sd > 0); ASSERT(p->recv_active && p->recv_ready); for (;;) { sd = accept(p->sd, NULL, NULL); if (sd < 0) { if (errno == EINTR) { log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd); continue; } if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd); p->recv_ready = 0; return NC_OK; } /* * FIXME: On EMFILE or ENFILE mask out IN event on the proxy; mask * it back in when some existing connection gets closed */ /* * Workaround of https://github.com/twitter/twemproxy/issues/97 * Just ignore EMFILE/ENFILE, return NC_OK will enable the server * continue to run instead of close the server socket */ if (errno == EMFILE || errno == ENFILE) { log_crit("accept on p %d failed: %s", p->sd, strerror(errno)); p->recv_ready = 0; log_crit("connections status: rlimit nofile %d, " "used connections: %d, max client connections %d, " "curr client connections %d", ctx->rlimit_nofile, conn_ncurr(), ctx->max_ncconn, conn_ncurr_cconn()); /* Since we maintain a safe max_ncconn and check * it after every accept, we should not reach here. * So we will panic after this log */ log_panic("HIT MAX OPEN FILES, IT SHOULD NOT HAPPEN. ABORT."); return NC_OK; } log_error("accept on p %d failed: %s", p->sd, strerror(errno)); return NC_ERROR; } addr_len = sizeof(addr); if (getsockname(sd, (struct sockaddr *)&addr, &addr_len)) { log_error("getsockname on p %d failed: %s", p->sd, strerror(errno)); close(sd); continue; } break; } if (conn_ncurr_cconn() >= ctx->max_ncconn) { stats_pool_incr(ctx, p->owner, rejected_connections); log_crit("client connections %d exceed limit %d", conn_ncurr_cconn(), ctx->max_ncconn); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_OK; } c = conn_get(p->owner, true, p->redis); if (c == NULL) { log_error("get conn for c %d from p %d failed: %s", sd, p->sd, strerror(errno)); status = close(sd); if (status < 0) { log_error("close c %d failed, ignored: %s", sd, strerror(errno)); } return NC_ENOMEM; } c->sd = sd; c->family = addr.ss_family; c->addrlen = addr_len; c->ss = addr; c->addr = (struct sockaddr *)&c->ss; stats_pool_incr(ctx, c->owner, client_connections); status = nc_set_nonblocking(c->sd); if (status < 0) { log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd, strerror(errno)); c->close(ctx, c); return status; } if (p->family == AF_INET || p->family == AF_INET6) { status = nc_set_tcpnodelay(c->sd); if (status < 0) { log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s", c->sd, p->sd, strerror(errno)); } } status = event_add_conn(ctx->evb, c); if (status < 0) { log_error("event add conn from p %d failed: %s", p->sd, strerror(errno)); c->close(ctx, c); return status; } log_notice("accepted c %d on p %d from '%s'", c->sd, p->sd, nc_unresolve_peer_desc(c->sd)); return NC_OK; }