void req_server_enqueue_imsgq(struct context *ctx, struct conn *conn, struct msg *msg) { ASSERT(msg->request); ASSERT(!conn->client && !conn->proxy); /* * timeout clock starts ticking the instant the message is enqueued into * the server in_q; the clock continues to tick until it either expires * or the message is dequeued from the server out_q * * noreply request are free from timeouts because client is not intrested * in the response anyway! */ if (!msg->noreply) { msg_tmo_insert(msg, conn); } TAILQ_INSERT_TAIL(&conn->imsg_q, msg, s_tqe); stats_server_incr(ctx, conn->owner, in_queue); stats_server_incr_by(ctx, conn->owner, in_queue_bytes, msg->mlen); }
//接收的客户端msg信息通过req_server_enqueue_imsgq添加到该队列 在req_send_next中发往后端真实服务器 //msg数据添加到conn->imsg_q队列入队,在req_send_next中发往后端真实服务器 void //req_server_enqueue_imsgq添加到队列尾部,req_server_enqueue_imsgq_head添加到队列头部 req_server_enqueue_imsgq(struct context *ctx, struct conn *conn, struct msg *msg) {//req_forward中执行 req_forward->req_server_enqueue_imsgq ASSERT(msg->request); ASSERT(!conn->client && !conn->proxy); /* * timeout clock starts ticking the instant the message is enqueued into * the server in_q; the clock continues to tick until it either expires * or the message is dequeued from the server out_q * * noreply request are free from timeouts because client is not intrested * in the response anyway! */ if (!msg->noreply) { //该msg需要得到应答,添加到红黑树超时定时器 //该定时器在core_timeout中删除 msg_tmo_insert(msg, conn); //添加到tmo_rbt 该msg需要发往后端真实服务器,并且需要等待对方应答 } TAILQ_INSERT_TAIL(&conn->imsg_q, msg, s_tqe);//在core_core中的写事件把imsg_q中的msg发送出去 stats_server_incr(ctx, conn->owner, in_queue); stats_server_incr_by(ctx, conn->owner, in_queue_bytes, msg->mlen); }
static void rsp_forward_stats(struct context *ctx, struct msg *msg, struct conn *s_conn, struct conn *c_conn) { struct msg *pmsg; struct server *server; ASSERT(!s_conn->client && !s_conn->proxy); ASSERT(c_conn->client && !c_conn->proxy); ASSERT(!msg->request && msg->peer != NULL); server = s_conn->owner; pmsg = msg->peer; stats_server_incr(ctx, server, responses); stats_server_incr_by(ctx, server, response_bytes, msg->mlen); switch (msg->type) { case MSG_RSP_NUM: stats_server_incr(ctx, server, num); break; case MSG_RSP_STORED: stats_server_incr(ctx, server, stored); break; case MSG_RSP_NOT_STORED: stats_server_incr(ctx, server, not_stored); break; case MSG_RSP_EXISTS: stats_server_incr(ctx, server, exists); break; case MSG_RSP_NOT_FOUND: stats_server_incr(ctx, server, not_found); break; case MSG_RSP_END: stats_server_incr(ctx, server, end); break; case MSG_RSP_VALUE: stats_server_incr(ctx, server, value); break; case MSG_RSP_DELETED: stats_server_incr(ctx, server, deleted); break; case MSG_RSP_ERROR: log_debug(LOG_INFO, "rsp error type %d from s %d for req %"PRIu64" " "type %d from c %d", msg->type, s_conn->sd, pmsg->id, pmsg->type, c_conn->sd); stats_server_incr(ctx, server, error); break; case MSG_RSP_CLIENT_ERROR: log_debug(LOG_INFO, "rsp error type %d from s %d for req %"PRIu64" " "type %d from c %d", msg->type, s_conn->sd, pmsg->id, pmsg->type, c_conn->sd); stats_server_incr(ctx, server, client_error); break; case MSG_RSP_SERVER_ERROR: log_debug(LOG_INFO, "rsp error type %d from s %d for req %"PRIu64" " "type %d from c %d", msg->type, s_conn->sd, pmsg->id, pmsg->type, c_conn->sd); stats_server_incr(ctx, server, server_error); break; default: NOT_REACHED(); } }
void server_close(struct context *ctx, struct conn *conn) { rstatus_t status; struct msg *msg, *nmsg; /* current and next message */ ASSERT(conn->type == CONN_SERVER); server_close_stats(ctx, conn->owner, conn->err, conn->eof, conn->connected); if (conn->sd < 0) { server_failure(ctx, conn->owner); conn_unref(conn); conn_put(conn); return; } for (msg = TAILQ_FIRST(&conn->omsg_q); msg != NULL; msg = nmsg) { nmsg = TAILQ_NEXT(msg, s_tqe); /* dequeue the message (request) from server outq */ conn_dequeue_outq(ctx, conn, msg); server_ack_err(ctx, conn, msg); } ASSERT(TAILQ_EMPTY(&conn->omsg_q)); for (msg = TAILQ_FIRST(&conn->imsg_q); msg != NULL; msg = nmsg) { nmsg = TAILQ_NEXT(msg, s_tqe); /* dequeue the message (request) from server inq */ conn_dequeue_inq(ctx, conn, msg); // We should also remove the msg from the timeout rbtree. msg_tmo_delete(msg); server_ack_err(ctx, conn, msg); stats_server_incr(ctx, conn->owner, server_dropped_requests); } ASSERT(TAILQ_EMPTY(&conn->imsg_q)); msg = conn->rmsg; if (msg != NULL) { conn->rmsg = NULL; ASSERT(!msg->request); ASSERT(msg->peer == NULL); rsp_put(msg); log_debug(LOG_INFO, "close s %d discarding rsp %"PRIu64" len %"PRIu32" " "in error", conn->sd, msg->id, msg->mlen); } ASSERT(conn->smsg == NULL); server_failure(ctx, conn->owner); conn_unref(conn); status = close(conn->sd); if (status < 0) { log_error("close s %d failed, ignored: %s", conn->sd, strerror(errno)); } conn->sd = -1; conn_put(conn); }
static void req_redis_stats(struct context *ctx, struct msg *msg) { switch (msg->type) { case MSG_REQ_REDIS_GET: stats_server_incr(ctx, redis_req_get); break; case MSG_REQ_REDIS_SET: stats_server_incr(ctx, redis_req_set); break; case MSG_REQ_REDIS_DEL: stats_server_incr(ctx, redis_req_del); break; case MSG_REQ_REDIS_INCR: case MSG_REQ_REDIS_DECR: stats_server_incr(ctx, redis_req_incr_decr); break; case MSG_REQ_REDIS_KEYS: stats_server_incr(ctx, redis_req_keys); break; case MSG_REQ_REDIS_MGET: stats_server_incr(ctx, redis_req_mget); break; case MSG_REQ_REDIS_SCAN: stats_server_incr(ctx, redis_req_scan); break; case MSG_REQ_REDIS_SORT: stats_server_incr(ctx, redis_req_sort); break; case MSG_REQ_REDIS_PING: stats_server_incr(ctx, redis_req_ping); break; case MSG_REQ_REDIS_LREM: stats_server_incr(ctx, redis_req_lreqm); /* do not break as this is a list operation as the following. * We count twice the LREM because it is an intensive operation/ * */ case MSG_REQ_REDIS_LRANGE: case MSG_REQ_REDIS_LSET: case MSG_REQ_REDIS_LTRIM: case MSG_REQ_REDIS_LINDEX: case MSG_REQ_REDIS_LPUSHX: stats_server_incr(ctx, redis_req_lists); break; case MSG_REQ_REDIS_SUNION: stats_server_incr(ctx, redis_req_sunion); /* do not break as this is a set operation as the following. * We count twice the SUNION because it is an intensive operation/ * */ case MSG_REQ_REDIS_SETBIT: case MSG_REQ_REDIS_SETEX: case MSG_REQ_REDIS_SETRANGE: case MSG_REQ_REDIS_SADD: case MSG_REQ_REDIS_SDIFF: case MSG_REQ_REDIS_SDIFFSTORE: case MSG_REQ_REDIS_SINTER: case MSG_REQ_REDIS_SINTERSTORE: case MSG_REQ_REDIS_SREM: case MSG_REQ_REDIS_SUNIONSTORE: case MSG_REQ_REDIS_SSCAN: stats_server_incr(ctx, redis_req_set); break; case MSG_REQ_REDIS_ZADD: case MSG_REQ_REDIS_ZINTERSTORE: case MSG_REQ_REDIS_ZRANGE: case MSG_REQ_REDIS_ZRANGEBYSCORE: case MSG_REQ_REDIS_ZREM: case MSG_REQ_REDIS_ZREVRANGE: case MSG_REQ_REDIS_ZREVRANGEBYSCORE: case MSG_REQ_REDIS_ZUNIONSTORE: case MSG_REQ_REDIS_ZSCAN: case MSG_REQ_REDIS_ZCOUNT: case MSG_REQ_REDIS_ZINCRBY: case MSG_REQ_REDIS_ZREMRANGEBYRANK: case MSG_REQ_REDIS_ZREMRANGEBYSCORE: stats_server_incr(ctx, redis_req_sortedsets); break; case MSG_REQ_REDIS_HINCRBY: case MSG_REQ_REDIS_HINCRBYFLOAT: case MSG_REQ_REDIS_HSET: case MSG_REQ_REDIS_HSETNX: stats_server_incr(ctx, redis_req_hashes); break; default: stats_server_incr(ctx, redis_req_other); break; } }