//通过ngx_http_top_request_body_filter调用 ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, ngx_chain_t *in) { #if (NGX_DEBUG) ngx_chain_t *cl; #endif ngx_http_request_body_t *rb; rb = r->request_body; #if (NGX_DEBUG) for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } for (cl = in; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } #endif /* TODO: coalesce neighbouring buffers */ if (ngx_chain_add_copy(r->pool, &rb->bufs, in) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } //当一个rb->buf填满后就会通过ngx_http_write_request_body把bufs链表中的所有ngx_chain_t->ngx_buf_t中指向的数据 //写入到临时文件,并把ngx_buf_t结构加入poll->chain,通过poll统一释放他们 if (rb->rest > 0 && rb->buf && rb->buf->last == rb->buf->end && !r->request_body_no_buffering) { //需要缓存数据,并且rb->buf数据已经解析完毕,并且buf已经满了,但是包体还没有读完,那么就可以把buf中的数据写入临时文件, //这样改buf指向的内存空间在该函数退出后可以继续用来读取数据 if (ngx_http_write_request_body(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } } return NGX_OK; }
ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, ngx_chain_t *in) { #if (NGX_DEBUG) ngx_chain_t *cl; #endif ngx_http_request_body_t *rb; rb = r->request_body; #if (NGX_DEBUG) for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } for (cl = in; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } #endif /* TODO: coalesce neighbouring buffers */ if (ngx_chain_add_copy(r->pool, &rb->bufs, in) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } if (rb->rest > 0 && rb->buf && rb->buf->last == rb->buf->end && !r->request_body_no_buffering) { if (ngx_http_write_request_body(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } } return NGX_OK; }
/*a rough check of server ssl_hello responses*/ static ngx_int_t ngx_tcp_check_ssl_hello_parse(ngx_tcp_check_peer_conf_t *peer_conf) { size_t size; server_ssl_hello_t *resp; ngx_tcp_check_ctx *ctx; ctx = peer_conf->check_data; size = ctx->recv.last - ctx->recv.pos; if (size < sizeof(server_ssl_hello_t)) { return NGX_AGAIN; } resp = (server_ssl_hello_t *) ctx->recv.pos; ngx_log_debug7(NGX_LOG_DEBUG_TCP, ngx_cycle->log, 0, "tcp check ssl_parse, type: %d, version: %d.%d, length: %d, handshanke_type: %d, " "hello_version: %d.%d", resp->msg_type, resp->version.major, resp->version.minor, ntohs(resp->length), resp->handshake_type, resp->hello_version.major, resp->hello_version.minor); if (resp->msg_type != HANDSHAKE) { return NGX_ERROR; } if (resp->handshake_type != SERVER_HELLO) { return NGX_ERROR; } return NGX_OK; }
// 将request body 的数据打印出来 static size_t ngx_http_dump_request_body(ngx_http_request_t* r, void** post_content_ptr) { ngx_http_request_body_t* rb; u_char* post_content = NULL; rb = r->request_body; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_dump_request_body entried - [rainx]"); if (!rb) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "request body is empty - [rainx]"); return 0; } // check a bit ngx_log_debug7(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "\nParameters dump is : [ rb->temp_file = %p ]\n" " [ rb->bufs = %p ]\n" " [ rb->bufs->buf = %p ]\n" " [ rb->buf = %p ]\n" " [ rb->rest = %d ]\n" " [ rb->to_write = %d ]\n" " [ buf size = %d ]\n", " [ buf pos = %d ]\n", " [ buf last = %d ]\n", rb->temp_file, rb->bufs, rb->bufs->buf, rb->buf, rb->rest, rb->to_write, rb->buf->end - rb->buf->start ); ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "\n [ buf pos = %p ]\n" " [ buf last = %p ]\n", rb->buf->pos, rb->buf->last ); post_content = ngx_palloc(r->pool, rb->buf->last - rb->buf->pos + 1); ngx_cpystrn(post_content, rb->buf->pos, rb->buf->last - rb->buf->pos + 1); // ngx_cpystrn()长度参数应为实际数据长度+1,是其for循环编写有误所致…… ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "\n [ buf pos data = %s ]\n" " [ data length = %d ]\n", post_content, rb->buf->last - rb->buf->pos ); *post_content_ptr = post_content; return rb->buf->last - rb->buf->pos; }
static int ngx_dbd_freetds_msg_handler(DBPROCESS *db, DBINT msgno, int msgstate, int severity, char *msgtext, char *srvname, char *procname, DBUSMALLINT line) { ngx_log_t *log; ngx_dbd_t *dbd; dbd = dbgetuserdata(db); if (dbd != NULL) { log = dbd->log; } else { log = ngx_cycle->log; } if (msgtext == NULL) { msgtext = ""; } if (srvname == NULL) { srvname = ""; } if (procname == NULL) { procname = ""; } /* TODO */ ngx_log_debug7(NGX_LOG_DEBUG_CORE, log, 0, "msgno: %d, msgstate: %d, severity: %d, " "msgtext: %s, srvname: %s, procname: %s, line: %d", msgno, msgstate, severity, msgtext, srvname, procname, line); return 0; }
static ngx_int_t ngx_http_request_body_chunked_filter(ngx_http_request_t *r, ngx_chain_t *in) { size_t size; ngx_int_t rc; ngx_buf_t *b; ngx_chain_t *cl, *out, *tl, **ll; ngx_http_request_body_t *rb; ngx_http_core_loc_conf_t *clcf; rb = r->request_body; if (rb->rest == -1) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http request body chunked filter"); rb->chunked = ngx_pcalloc(r->pool, sizeof(ngx_http_chunked_t)); if (rb->chunked == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } r->headers_in.content_length_n = 0; rb->rest = 3; } out = NULL; ll = &out; for (cl = in; cl; cl = cl->next) { for ( ;; ) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body chunked buf " "t:%d f:%d %p, pos %p, size: %z file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); rc = ngx_http_parse_chunked(r, cl->buf, rb->chunked); if (rc == NGX_OK) { /* a chunk has been parsed successfully */ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); if (clcf->client_max_body_size && clcf->client_max_body_size - r->headers_in.content_length_n < rb->chunked->size) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "client intended to send too large chunked " "body: %O+%O bytes", r->headers_in.content_length_n, rb->chunked->size); r->lingering_close = 1; return NGX_HTTP_REQUEST_ENTITY_TOO_LARGE; } tl = ngx_chain_get_free_buf(r->pool, &rb->free); if (tl == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b = tl->buf; ngx_memzero(b, sizeof(ngx_buf_t)); b->temporary = 1; b->tag = (ngx_buf_tag_t) &ngx_http_read_client_request_body; b->start = cl->buf->pos; b->pos = cl->buf->pos; b->last = cl->buf->last; b->end = cl->buf->end; b->flush = r->request_body_no_buffering; *ll = tl; ll = &tl->next; size = cl->buf->last - cl->buf->pos; if ((off_t) size > rb->chunked->size) { cl->buf->pos += (size_t) rb->chunked->size; r->headers_in.content_length_n += rb->chunked->size; rb->chunked->size = 0; } else { rb->chunked->size -= size; r->headers_in.content_length_n += size; cl->buf->pos = cl->buf->last; } b->last = cl->buf->pos; continue; } if (rc == NGX_DONE) { /* a whole response has been parsed successfully */ rb->rest = 0; tl = ngx_chain_get_free_buf(r->pool, &rb->free); if (tl == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b = tl->buf; ngx_memzero(b, sizeof(ngx_buf_t)); b->last_buf = 1; *ll = tl; ll = &tl->next; break; } if (rc == NGX_AGAIN) { /* set rb->rest, amount of data we want to see next time */ rb->rest = rb->chunked->length; break; } /* invalid */ ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "client sent invalid chunked body"); return NGX_HTTP_BAD_REQUEST; } } rc = ngx_http_top_request_body_filter(r, out); ngx_chain_update_chains(r->pool, &rb->free, &rb->busy, &out, (ngx_buf_tag_t) &ngx_http_read_client_request_body); return rc; }
ngx_int_t ngx_http_write_filter(ngx_http_request_t *r, ngx_chain_t *in) { off_t size, sent, nsent, limit; ngx_uint_t last, flush, sync; ngx_msec_t delay; ngx_chain_t *cl, *ln, **ll, *chain; ngx_connection_t *c; ngx_http_core_loc_conf_t *clcf; c = r->connection; if (c->error) { return NGX_ERROR; } size = 0; flush = 0; sync = 0; last = 0; ll = &r->out; /* find the size, the flush point and the last link of the saved chain */ for (cl = r->out; cl; cl = cl->next) { ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->sync) { sync = 1; } if (cl->buf->last_buf) { last = 1; } } /* add the new chain to the existent one */ for (ln = in; ln; ln = ln->next) { cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return NGX_ERROR; } cl->buf = ln->buf; *ll = cl; ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->sync) { sync = 1; } if (cl->buf->last_buf) { last = 1; } } *ll = NULL; ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter: l:%d f:%d s:%O", last, flush, size); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); /* * avoid the output if there are no last buf, no flush point, * there are the incoming bufs and the size of all bufs * is smaller than "postpone_output" directive */ if (!last && !flush && in && size < (off_t) clcf->postpone_output) { return NGX_OK; } if (c->write->delayed) { c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) && !(last && c->need_last_buf)) { if (last || flush || sync) { for (cl = r->out; cl; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } r->out = NULL; c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, c->log, 0, "the http output chain is empty"); ngx_debug_point(); return NGX_ERROR; } if (r->limit_rate) { if (r->limit_rate_after == 0) { r->limit_rate_after = clcf->limit_rate_after; } limit = (off_t) r->limit_rate * (ngx_time() - r->start_sec + 1) - (c->sent - r->limit_rate_after); if (limit <= 0) { c->write->delayed = 1; delay = (ngx_msec_t) (- limit * 1000 / r->limit_rate + 1); ngx_add_timer(c->write, delay); c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (clcf->sendfile_max_chunk && (off_t) clcf->sendfile_max_chunk < limit) { limit = clcf->sendfile_max_chunk; } } else { limit = clcf->sendfile_max_chunk; } sent = c->sent; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter limit %O", limit); chain = c->send_chain(c, r->out, limit); /* ngx_linux_sendfile_chain */ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter %p", chain); if (chain == NGX_CHAIN_ERROR) { c->error = 1; return NGX_ERROR; } if (r->limit_rate) { nsent = c->sent; if (r->limit_rate_after) { sent -= r->limit_rate_after; if (sent < 0) { sent = 0; } nsent -= r->limit_rate_after; if (nsent < 0) { nsent = 0; } } delay = (ngx_msec_t) ((nsent - sent) * 1000 / r->limit_rate); if (delay > 0) { limit = 0; c->write->delayed = 1; ngx_add_timer(c->write, delay); } } if (limit && c->write->ready && c->sent - sent >= limit - (off_t) (2 * ngx_pagesize)) { c->write->delayed = 1; ngx_add_timer(c->write, 1); } for (cl = r->out; cl && cl != chain; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } r->out = chain; if (chain) { c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; if ((c->buffered & NGX_LOWLEVEL_BUFFERED) && r->postponed == NULL) { return NGX_AGAIN; } return NGX_OK; }
// 真正的向客户端发送数据,调用send_chain // 也由ngx_http_set_write_handler设置epoll的写事件触发 // 如果数据发送不完,就保存在r->out里,返回again // 需要再次发生可写事件才能发送 // 不是last、flush,且数据量较小(默认1460) // 那么这次就不真正调用write发送,减少系统调用的次数,提高性能 // 在此函数里处理限速 ngx_int_t ngx_http_write_filter(ngx_http_request_t *r, ngx_chain_t *in) { off_t size, sent, nsent, limit; ngx_uint_t last, flush, sync; ngx_msec_t delay; ngx_chain_t *cl, *ln, **ll, *chain; ngx_connection_t *c; ngx_http_core_loc_conf_t *clcf; // 获取连接对象 c = r->connection; // 如果连接有错误就不能发送数据 if (c->error) { return NGX_ERROR; } // 数据的长度, size = 0; // 是否flush的标志 flush = 0; // 是否sync的标志 sync = 0; // 是否是最后一块数据,即数据全部发送完毕 last = 0; // 请求里存储的待发送的数据链表 // 可能是之前因为again而未能发送出去 ll = &r->out; /* find the size, the flush point and the last link of the saved chain */ // 先遍历当前请求里待发送的数据链表,计算长度 for (cl = r->out; cl; cl = cl->next) { ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 // 缓冲区0长度,但不是flush、sync等控制用缓冲区,报错 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif // 累计数据长度 size += ngx_buf_size(cl->buf); // flush标志 if (cl->buf->flush || cl->buf->recycled) { flush = 1; } // sync标志 if (cl->buf->sync) { sync = 1; } // 发送结束的标志 if (cl->buf->last_buf) { last = 1; } } /* add the new chain to the existent one */ // 遍历新的数据链表,计算长度 // 把in链表里的数据挂到r->out链表后面 for (ln = in; ln; ln = ln->next) { // 分配一个新的链表节点 cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return NGX_ERROR; } // 拷贝缓冲区,然后挂到out后面 cl->buf = ln->buf; *ll = cl; ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 // 缓冲区0长度,但不是flush、sync等控制用缓冲区,报错 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif // 累计数据长度 size += ngx_buf_size(cl->buf); // flush标志 if (cl->buf->flush || cl->buf->recycled) { flush = 1; } // sync标志 if (cl->buf->sync) { sync = 1; } // 发送结束的标志 if (cl->buf->last_buf) { last = 1; } } // 链表的最后节点,必须设置为空指针 *ll = NULL; ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter: l:%ui f:%ui s:%O", last, flush, size); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); /* * avoid the output if there are no last buf, no flush point, * there are the incoming bufs and the size of all bufs * is smaller than "postpone_output" directive */ // 不是last、flush,且数据量较小(默认1460) // 那么这次就不真正调用write发送,减少系统调用的次数,提高性能 if (!last && !flush && in && size < (off_t) clcf->postpone_output) { return NGX_OK; } // flush,用户要求立即发送 // last,所有的数据都集齐了,之后不会有新数据 // size > postpone_output,数据已经积累的足够多,应该发送了 // delayed表示需要限速,那么就暂不发送 if (c->write->delayed) { // 置标志位,表示连接有数据缓冲待发送 c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } // 数据长度为0 if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) && !(last && c->need_last_buf)) { // 释放r->out里的节点 if (last || flush || sync) { for (cl = r->out; cl; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } r->out = NULL; c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, c->log, 0, "the http output chain is empty"); ngx_debug_point(); return NGX_ERROR; } // 处理限速,不研究 if (r->limit_rate) { if (r->limit_rate_after == 0) { r->limit_rate_after = clcf->limit_rate_after; } limit = (off_t) r->limit_rate * (ngx_time() - r->start_sec + 1) - (c->sent - r->limit_rate_after); if (limit <= 0) { c->write->delayed = 1; delay = (ngx_msec_t) (- limit * 1000 / r->limit_rate + 1); ngx_add_timer(c->write, delay); c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (clcf->sendfile_max_chunk && (off_t) clcf->sendfile_max_chunk < limit) { limit = clcf->sendfile_max_chunk; } } else { // 不限速,使用配置的参数,默认是0,即不限制,尽量多发 limit = clcf->sendfile_max_chunk; } // 已经发送的字节数 sent = c->sent; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter limit %O", limit); // 调用send_chain发送out链表 // 实际上调用的是ngx_writev_chain // // 发送limit长度(字节数)的数据 // 如果事件not ready,即暂不可写,那么立即返回,无动作 // 要求缓冲区必须在内存里,否则报错 // 最后返回消费缓冲区之后的链表指针 // 发送出错、遇到again、发送完毕,这三种情况函数结束 // 返回的是最后发送到的链表节点指针 chain = c->send_chain(c, r->out, limit); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter %p", chain); if (chain == NGX_CHAIN_ERROR) { c->error = 1; return NGX_ERROR; } // 处理限速,不研究 if (r->limit_rate) { nsent = c->sent; if (r->limit_rate_after) { sent -= r->limit_rate_after; if (sent < 0) { sent = 0; } nsent -= r->limit_rate_after; if (nsent < 0) { nsent = 0; } } delay = (ngx_msec_t) ((nsent - sent) * 1000 / r->limit_rate); if (delay > 0) { limit = 0; c->write->delayed = 1; ngx_add_timer(c->write, delay); } } // 处理限速,不研究 if (limit && c->write->ready && c->sent - sent >= limit - (off_t) (2 * ngx_pagesize)) { c->write->delayed = 1; ngx_add_timer(c->write, 1); } // 在out链表里把已经发送过的节点都回收,供以后复用 for (cl = r->out; cl && cl != chain; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } // out指向新的位置 r->out = chain; // 不是空指针,表明还有数据没发完 if (chain) { // 置标志位,表示连接有数据缓冲待发送 c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } // 已经发送完了 // 清除缓冲标志位 c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; // NGX_LOWLEVEL_BUFFERED目前似乎在nginx里还没有用到 if ((c->buffered & NGX_LOWLEVEL_BUFFERED) && r->postponed == NULL) { return NGX_AGAIN; } return NGX_OK; }
static ngx_int_t ngx_lcb_init_process(ngx_cycle_t *cycle) { ngx_lcb_main_conf_t *cmcf; struct lcb_create_io_ops_st options; lcb_error_t err; ngx_int_t rc; ngx_uint_t i; ngx_lcb_connection_t *conn; ngx_lcb_loc_conf_t **ccfp; /* initialize libcouchbase IO plugin */ memset(&options, 0, sizeof(options)); options.version = 2; options.v.v2.create = ngx_lcb_create_io_opts; options.v.v2.cookie = &lcb_cookie; err = lcb_create_io_ops(&lcb_cookie.io, &options); if (err != LCB_SUCCESS) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "couchbase: failed to create IO object for libcouchbase: 0x%02xd \"%s\"", err, lcb_strerror(NULL, err)); return NGX_ERROR; } lcb_cookie.log = cycle->log; lcb_cookie.pool = cycle->pool; /* materialize upstream connections */ rc = ngx_array_init(&lcb_connections, cycle->pool, 4, sizeof(ngx_lcb_connection_t)); if (rc != NGX_OK) { return NGX_ERROR; } cmcf = ngx_http_cycle_get_module_main_conf(cycle, ngx_http_couchbase_module); ccfp = cmcf->connection_confs.elts; for (i = 0; i < cmcf->connection_confs.nelts; i++) { struct lcb_create_st opts = ccfp[i]->options; conn = ngx_array_push(&lcb_connections); if (conn == NULL) { return NGX_ERROR; } conn->log = cycle->log; conn->name = ccfp[i]->name; rc = ngx_array_init(&conn->backlog, cycle->pool, 4, sizeof(ngx_http_request_t *)); if (rc != NGX_OK) { return NGX_ERROR; } opts.v.v0.io = lcb_cookie.io; err = lcb_create(&conn->lcb, &opts); if (err != LCB_SUCCESS) { ngx_log_error(NGX_LOG_EMERG, cycle->log, 0, "couchbase: failed to create libcouchbase instance: 0x%02xd \"%s\"", err, lcb_strerror(NULL, err)); return NGX_ERROR; } (void)lcb_set_error_callback(conn->lcb, ngx_lcb_error_callback); (void)lcb_set_timeout(conn->lcb, ccfp[i]->connect_timeout * 1000); /* in usec */ (void)lcb_set_get_callback(conn->lcb, ngx_lcb_get_callback); (void)lcb_set_store_callback(conn->lcb, ngx_lcb_store_callback); (void)lcb_set_remove_callback(conn->lcb, ngx_lcb_remove_callback); (void)lcb_set_configuration_callback(conn->lcb, ngx_lcb_configuration_callback); (void)lcb_set_cookie(conn->lcb, conn); ngx_log_debug7(NGX_LOG_DEBUG_HTTP, cycle->log, 0, "couchbase(%p): configured connection \"%V\": connect_timeout:%Mms " "address:%s bucket:%s user:%s password:%s", conn->lcb, &conn->name, ccfp[i]->connect_timeout, opts.v.v0.host ? opts.v.v0.host : "(null)", opts.v.v0.bucket ? opts.v.v0.bucket : "(null)", opts.v.v0.user ? opts.v.v0.user : "******", opts.v.v0.passwd ? opts.v.v0.passwd : "(null)"); } return NGX_OK; }
static ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, ngx_chain_t *in) { ngx_int_t rc; ngx_chain_t *cl; ngx_http_request_body_t *rb; rb = r->request_body; #if (NGX_DEBUG) for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } for (cl = in; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } #endif /* TODO: coalesce neighbouring buffers */ if (ngx_chain_add_copy(r->pool, &rb->bufs, in) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } for (cl = in; cl; cl = cl->next) { rc = ngx_http_top_input_body_filter(r, cl->buf); if (rc != NGX_OK) { if (rc > NGX_OK && rc < NGX_HTTP_SPECIAL_RESPONSE) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "input filter: return code 1xx or 2xx " "will cause trouble and is converted to 500"); } /** * NGX_OK: success and continue; * NGX_ERROR: failed and exit; * NGX_AGAIN: not ready and retry later. */ if (rc < NGX_HTTP_SPECIAL_RESPONSE && rc != NGX_AGAIN) { rc = NGX_HTTP_INTERNAL_SERVER_ERROR; } return rc; } } return NGX_OK; }
static void ngx_http_cloudrouter_peer_preconnect_read(ngx_event_t *rev) { ngx_connection_t *c; ngx_http_cloudrouter_peer_preconnect_data_t *pcd; ngx_http_upstream_t *u; ngx_http_request_t *r; int i; hs_route_t *route; c = rev->data; pcd = c->data; r = pcd->r; u = pcd->u; route = &pcd->route; ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "preconnect: read"); if (pcd->done>0) { return; } if (r->main==NULL||r->request_complete||r->pool==NULL||r!=r->main) { ngx_close_connection(c); c->destroyed = 1; return; } if (rev->timedout) { ngx_log_error(NGX_LOG_ERR, rev->log, NGX_ETIMEDOUT, "cloudrouter preconnect server timed out"); return ngx_http_cloudrouter_peer_preconnect_close(c, pcd, NGX_ERROR); } if (pcd->buf==NULL) { int size = sizeof(char)*1000; ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "creating buf"); pcd->buf = ngx_pcalloc(r->pool, size); pcd->bufend = pcd->buf+size; pcd->bufpos = pcd->buf; if (pcd->buf==NULL) { ngx_log_error(NGX_LOG_ERR, rev->log, NGX_ENOMEM, "preconnect: read: could not allocate buf"); return ngx_http_cloudrouter_peer_preconnect_close(c, pcd, NGX_ERROR); } } /* * Protocol format: * IP1\nPORT1\n * (optional) IPz\nPORTz\n * -- */ int bufsize = pcd->bufend - pcd->bufpos; ngx_int_t received; if (bufsize > 0) { received = ngx_recv(c, pcd->bufpos, bufsize); } else { received = 0; } if (received==NGX_AGAIN) { return; } else if (received>=0) { pcd->bufpos += received; for (i=0;i<(pcd->bufpos-pcd->buf);i++) { if (*(pcd->buf + i )=='-') { pcd->end_marker_count++; } } if (pcd->end_marker_count>=2) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, rev->log, 0, "CloudRouter preconnect: message complete"); ngx_http_cloudrouter_peer_t *peer = (ngx_http_cloudrouter_peer_t *)u->peer.data; unsigned char* next = pcd->buf; int new_node = 0; ngx_shmtx_lock(&ngx_http_cloudrouter_shpool->mutex); ngx_http_cloudrouter_node_t *e = ngx_http_cloudrouter_get_locked(route); if (e == NULL) { /* likely() */ e = ngx_slab_alloc_locked(ngx_http_cloudrouter_shpool, sizeof *e); new_node = 1; e->node.key = ngx_http_cloudrouter_hash_route(route); (void)ngx_copy(e->di_name, route->di_name, sizeof(route->di_name)); e->di_nlen = route->di_nlen; } else { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, rev->log, 0, "CloudRouter preconnect: reusing existing node"); } e->timestamp = time(NULL); ngx_http_cloudrouter_clear_remotes_locked(e, rev->log); while (next < pcd->bufpos) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, rev->log, 0, "CloudRouter preconnect: parsing message"); unsigned char *ip = NULL; unsigned char *port = NULL; ip = next; while (++next < pcd->bufpos) { if (*(next-1) == '\n') { if (ip && port) break; port = next; } } if (ip && port) { ngx_http_cloudrouter_remote_t *remote = ngx_http_cloudrouter_add_remote_locked(e); int iplen = port-ip-1; iplen = iplen > 16 ? 16 : iplen; remote->inet_addr = ngx_inet_addr(ip, iplen); if (remote->inet_addr == INADDR_NONE) { ngx_log_error(NGX_LOG_ERR, rev->log, NGX_EINVAL, "CloudRouter preconnect: IP address from Agent invalid for route %s", e->di_name); goto failure; } int portlen = next-port-1; remote->port_n = htons(ngx_atoi(port,portlen)); ngx_log_debug7(NGX_LOG_DEBUG_HTTP, rev->log, 0, "CloudRouter preconnect: cached values SET: e=%p rem=%p ts=%d %s[%d] %uxD:%d", e, remote, e->timestamp, e->di_name, e->di_nlen, remote->inet_addr, remote->port_n); } } if (!e->remote) { ngx_log_debug(NGX_LOG_DEBUG_HTTP, rev->log, 0, "CloudRouter preconnect: Agent sent no remotes"); goto failure; } ngx_http_cloudrouter_set_hostandport(r, peer, e); if (new_node) { ngx_rbtree_insert(ngx_http_cloudrouter_rbtree, &e->node); } ngx_shmtx_unlock(&ngx_http_cloudrouter_shpool->mutex); return ngx_http_cloudrouter_peer_preconnect_close(c, pcd, NGX_OK); failure: if (!new_node) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pcd->r->connection->log, 0, "peer_preconnect_read: calling rbtree_delete"); ngx_rbtree_delete(ngx_http_cloudrouter_rbtree, &e->node); } ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pcd->r->connection->log, 0, "peer_preconnect_read: calling free_node_locked"); ngx_http_cloudrouter_free_node_locked(e, rev->log); ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pcd->r->connection->log, 0, "peer_preconnect_read: calling shmtx_unlock"); ngx_shmtx_unlock(&ngx_http_cloudrouter_shpool->mutex); ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pcd->r->connection->log, 0, "peer_preconnect_read: calling peer_preconnect_close"); return ngx_http_cloudrouter_peer_preconnect_close(c, pcd, NGX_ERROR); } return; } /* unknown error condition from ngx_recv */ return; }
ngx_int_t ngx_zeromq_connect(ngx_peer_connection_t *pc) { ngx_zeromq_connection_t *zc = pc->data; ngx_zeromq_endpoint_t *zep; ngx_connection_t *c; ngx_event_t *rev, *wev; void *zmq; int fd, zero; size_t fdsize; ngx_uint_t i; zep = zc->endpoint; zmq = zmq_socket(ngx_zeromq_ctx, zep->type->value); if (zmq == NULL) { ngx_log_error(NGX_LOG_ALERT, pc->log, 0, "zmq_socket(%V) failed (%d: %s)", &zep->type->name, ngx_errno, zmq_strerror(ngx_errno)); return NGX_ERROR; } fdsize = sizeof(int); if (zmq_getsockopt(zmq, ZMQ_FD, &fd, &fdsize) == -1) { ngx_zeromq_log_error(pc->log, "zmq_getsockopt(ZMQ_FD)"); goto failed_zmq; } zero = 0; if (zmq_setsockopt(zmq, ZMQ_LINGER, &zero, sizeof(int)) == -1) { ngx_zeromq_log_error(pc->log, "zmq_setsockopt(ZMQ_LINGER)"); goto failed_zmq; } c = ngx_get_connection(fd, pc->log); if (c == NULL) { goto failed_zmq; } c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); c->recv = ngx_zeromq_recv; c->send = NULL; c->recv_chain = ngx_zeromq_recv_chain; c->send_chain = ngx_zeromq_send_chain; /* This won't fly with ZeroMQ */ c->sendfile = 0; c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; c->log_error = pc->log_error; rev = c->read; wev = c->write; rev->data = zc; wev->data = zc; rev->handler = ngx_zeromq_event_handler; wev->handler = ngx_zeromq_event_handler; rev->log = pc->log; wev->log = pc->log; pc->connection = &zc->connection; zc->connection_ptr = c; memcpy(&zc->connection, c, sizeof(ngx_connection_t)); zc->socket = zmq; if (zep->type->can_send) { zc->send = zc; } if (zep->type->can_recv) { zc->recv = zc; } if (pc->local) { ngx_log_error(NGX_LOG_WARN, pc->log, 0, "zmq_connect: binding to local address is not supported"); } if (zep->bind) { if (zep->rand) { for (i = 0; ; i++) { ngx_zeromq_randomized_endpoint_regen(&zep->addr); if (zmq_bind(zmq, (const char *) zep->addr.data) == -1) { if (ngx_errno == NGX_EADDRINUSE && i < 65535) { continue; } ngx_zeromq_log_error(pc->log, "zmq_bind()"); goto failed; } break; } } else { if (zmq_bind(zmq, (const char *) zep->addr.data) == -1) { ngx_zeromq_log_error(pc->log, "zmq_bind()"); goto failed; } } } else { if (zmq_connect(zmq, (const char *) zep->addr.data) == -1) { ngx_zeromq_log_error(pc->log, "zmq_connect()"); goto failed; } } ngx_log_debug7(NGX_LOG_DEBUG_EVENT, pc->log, 0, "zmq_connect: %s to %V (%V), fd:%d #%d zc:%p zmq:%p", zep->bind ? "bound" : "lazily connected", &zep->addr, &zep->type->name, fd, c->number, zc, zmq); if (ngx_add_conn) { /* rtsig */ if (ngx_add_conn(c) == NGX_ERROR) { goto failed; } } else { if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* kqueue, epoll */ if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) != NGX_OK) { goto failed; } } else { /* select, poll, /dev/poll */ if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) != NGX_OK) { goto failed; } } } /* * ZeroMQ assumes that new socket is read-ready (but it really isn't) * and it won't notify us about any new events if we don't fail to read * from it first. Sigh. */ rev->ready = 1; wev->ready = zep->type->can_send; return NGX_OK; failed: ngx_free_connection(c); c->fd = (ngx_socket_t) -1; pc->connection = NULL; zc->socket = NULL; failed_zmq: if (zmq_close(zmq) == -1) { ngx_zeromq_log_error(pc->log, "zmq_close()"); } return NGX_ERROR; }
/* 参数r是对应的请求,in是保存本次待发送数据的链表缓冲区 */ ngx_int_t ngx_http_write_filter(ngx_http_request_t *r, ngx_chain_t *in) { off_t size, sent, nsent, limit; ngx_uint_t last, flush; ngx_msec_t delay; ngx_chain_t *cl, *ln, **ll, *chain; ngx_connection_t *c; ngx_http_core_loc_conf_t *clcf; /* 获取当前请求所对应的连接 */ c = r->connection; /* * 检查当前连接的错误标志位error,若该标志位为1, * 表示当前请求出错,返回NGX_ERROR; */ if (c->error) { return NGX_ERROR; } size = 0; flush = 0; last = 0; ll = &r->out; /* find the size, the flush point and the last link of the saved chain */ /* * 遍历当前请求out链表缓冲区,计算剩余响应报文的长度; * 因为当响应报文一次性不能发送完成时,会把剩余的响应报文保存在out中, * 相对于本次发送的响应报文数据in来说(即该方法所传入的参数in), * out链表缓冲区保存的是前一次剩余的响应报文; */ for (cl = r->out; cl; cl = cl->next) { ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->last_buf) { last = 1; } } /* add the new chain to the existent one */ /* * 将本次待发送的响应报文的缓冲区in添加到out链表缓冲区的尾部, * 并计算待发送响应报文总的长度size; */ for (ln = in; ln; ln = ln->next) { cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return NGX_ERROR; } cl->buf = ln->buf; *ll = cl;/* 由上面可知 ll=&r->out */ ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->last_buf) { last = 1; } } *ll = NULL; ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter: l:%d f:%d s:%O", last, flush, size); /* 获取ngx_http_core_module模块的loc级别配置项结构体 */ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); /* * avoid the output if there are no last buf, no flush point, * there are the incoming bufs and the size of all bufs * is smaller than "postpone_output" directive */ /* * 若out链表最后一块缓冲区last为空,且没有强制性刷新flush链表缓冲区out, * 且当前有待发响应报文in,但是待发送响应报文总的长度size小于预设可发送条件值postpone_output, * 则本次不能发送响应报文,继续保存在out链表缓冲区中,以待下次才发送; * 其中postpone_output预设值我们可以在配置文件nginx.conf中设置; */ if (!last && !flush && in && size < (off_t) clcf->postpone_output) { return NGX_OK; } /* * 检查当前连接上写事件的delayed标志位, * 若该标志位为1,表示需要延迟发送响应报文, * 因此,返回NGX_AGAIN,表示延迟发送; */ if (c->write->delayed) { c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) && !(last && c->need_last_buf)) { if (last || flush) { for (cl = r->out; cl; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } r->out = NULL; c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, c->log, 0, "the http output chain is empty"); ngx_debug_point(); return NGX_ERROR; } /* * 检查当前请求的限速标志位limit_rate, * 若该标志位为大于0,表示发送响应报文的速度不能超过limit_rate指定的速度; */ if (r->limit_rate) { if (r->limit_rate_after == 0) { r->limit_rate_after = clcf->limit_rate_after; } /* 计算发送速度是否超过限速值 */ limit = (off_t) r->limit_rate * (ngx_time() - r->start_sec + 1) - (c->sent - r->limit_rate_after); /* * 若当前发送响应报文的速度超过限速值,则写事件标志位delayed设为1, * 并把该写事件添加到定时器机制中,并且将buffered设置为可写状态, * 返回NGX_AGAIN,表示链表缓冲区out还保存剩余待发送的响应报文; */ if (limit <= 0) { c->write->delayed = 1; ngx_add_timer(c->write, (ngx_msec_t) (- limit * 1000 / r->limit_rate + 1)); c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (clcf->sendfile_max_chunk && (off_t) clcf->sendfile_max_chunk < limit) { limit = clcf->sendfile_max_chunk; } } else { limit = clcf->sendfile_max_chunk; } /* 若不需要减速,或没有设置速度限制,则向客户端发送响应字符流 */ sent = c->sent; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter limit %O", limit); chain = c->send_chain(c, r->out, limit); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter %p", chain); if (chain == NGX_CHAIN_ERROR) { c->error = 1; return NGX_ERROR; } /* 再次检查limit_rate标志位 */ if (r->limit_rate) { nsent = c->sent; if (r->limit_rate_after) { sent -= r->limit_rate_after; if (sent < 0) { sent = 0; } nsent -= r->limit_rate_after; if (nsent < 0) { nsent = 0; } } /* 再次计算当前发送响应报文速度是否超过限制值 */ delay = (ngx_msec_t) ((nsent - sent) * 1000 / r->limit_rate); /* 若超过,需要限速,并把写事件添加到定时器机制中 */ if (delay > 0) { limit = 0; c->write->delayed = 1; ngx_add_timer(c->write, delay); } } if (limit && c->write->ready && c->sent - sent >= limit - (off_t) (2 * ngx_pagesize)) { c->write->delayed = 1; ngx_add_timer(c->write, 1); } /* 重新调整链表缓冲区out的情况,把已发送数据的缓冲区内存回收 */ for (cl = r->out; cl && cl != chain; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } /* 检查out链表缓冲区是否还有数据 */ r->out = chain; /* 若还有数据,返回NGX_AGAIN,表示还存在待发送的响应报文数据 */ if (chain) { c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; if ((c->buffered & NGX_LOWLEVEL_BUFFERED) && r->postponed == NULL) { return NGX_AGAIN; } /* 若已发送全部数据则返回NGX_OK */ return NGX_OK; }
// 参数in实际上是ngx_http_request_body_length_filter里的out,即读取到的数据 // 从内存池里分配节点 // 拷贝in链表里的buf到rb->bufs里,不是直接连接 // 同样是指针操作,没有内存拷贝 // 如果要求写磁盘文件,那么调用ngx_http_write_request_body ngx_int_t ngx_http_request_body_save_filter(ngx_http_request_t *r, ngx_chain_t *in) { ngx_buf_t *b; ngx_chain_t *cl; ngx_http_request_body_t *rb; // 请求体数据的结构体 rb = r->request_body; #if (NGX_DEBUG) for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } for (cl = in; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http body new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } #endif /* TODO: coalesce neighbouring buffers */ // 从内存池里分配节点 // 拷贝in链表里的buf到rb->bufs里,不是直接连接 // 同样是指针操作,没有内存拷贝 if (ngx_chain_add_copy(r->pool, &rb->bufs, in) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } if (r->request_body_no_buffering) { return NGX_OK; } if (rb->rest > 0) { if (rb->buf && rb->buf->last == rb->buf->end && ngx_http_write_request_body(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } return NGX_OK; } /* rb->rest == 0 */ // 如果要求写磁盘文件,那么调用ngx_http_write_request_body if (rb->temp_file || r->request_body_in_file_only) { if (ngx_http_write_request_body(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } if (rb->temp_file->file.offset != 0) { cl = ngx_chain_get_free_buf(r->pool, &rb->free); if (cl == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b = cl->buf; ngx_memzero(b, sizeof(ngx_buf_t)); b->in_file = 1; b->file_last = rb->temp_file->file.offset; b->file = &rb->temp_file->file; rb->bufs = cl; } } return NGX_OK; }
static void ngx_signal_worker_processes(ngx_cycle_t *cycle, int signo) { ngx_int_t i; ngx_err_t err; ngx_channel_t ch; ngx_memzero(&ch, sizeof(ngx_channel_t)); #if (NGX_BROKEN_SCM_RIGHTS) ch.command = 0; #else switch (signo) { case ngx_signal_value(NGX_SHUTDOWN_SIGNAL): ch.command = NGX_CMD_QUIT; break; case ngx_signal_value(NGX_TERMINATE_SIGNAL): ch.command = NGX_CMD_TERMINATE; break; case ngx_signal_value(NGX_REOPEN_SIGNAL): ch.command = NGX_CMD_REOPEN; break; default: ch.command = 0; } #endif ch.fd = -1; for (i = 0; i < ngx_last_process; i++) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "child: %d %P e:%d t:%d d:%d r:%d j:%d", i, ngx_processes[i].pid, ngx_processes[i].exiting, ngx_processes[i].exited, ngx_processes[i].detached, ngx_processes[i].respawn, ngx_processes[i].just_spawn); if (ngx_processes[i].detached || ngx_processes[i].pid == -1) { continue; } if (ngx_processes[i].just_spawn) { ngx_processes[i].just_spawn = 0; continue; } if (ngx_processes[i].exiting && signo == ngx_signal_value(NGX_SHUTDOWN_SIGNAL)) { continue; } if (ch.command) { if (ngx_write_channel(ngx_processes[i].channel[0], &ch, sizeof(ngx_channel_t), cycle->log) == NGX_OK) { if (signo != ngx_signal_value(NGX_REOPEN_SIGNAL)) { ngx_processes[i].exiting = 1; } continue; } } ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0, "kill (%P, %d)", ngx_processes[i].pid, signo); if (kill(ngx_processes[i].pid, signo) == -1) { err = ngx_errno; ngx_log_error(NGX_LOG_ALERT, cycle->log, err, "kill(%P, %d) failed", ngx_processes[i].pid, signo); if (err == NGX_ESRCH) { ngx_processes[i].exited = 1; ngx_processes[i].exiting = 0; ngx_reap = 1; } continue; } if (signo != ngx_signal_value(NGX_REOPEN_SIGNAL)) { ngx_processes[i].exiting = 1; } } }
static ngx_uint_t ngx_reap_children(ngx_cycle_t *cycle) { ngx_int_t i, n; ngx_uint_t live; ngx_channel_t ch; ngx_core_conf_t *ccf; ngx_memzero(&ch, sizeof(ngx_channel_t)); ch.command = NGX_CMD_CLOSE_CHANNEL; ch.fd = -1; live = 0; for (i = 0; i < ngx_last_process; i++) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "child: %d %P e:%d t:%d d:%d r:%d j:%d", i, ngx_processes[i].pid, ngx_processes[i].exiting, ngx_processes[i].exited, ngx_processes[i].detached, ngx_processes[i].respawn, ngx_processes[i].just_spawn); if (ngx_processes[i].pid == -1) { continue; } if (ngx_processes[i].exited) { if (!ngx_processes[i].detached) { ngx_close_channel(ngx_processes[i].channel, cycle->log); ngx_processes[i].channel[0] = -1; ngx_processes[i].channel[1] = -1; ch.pid = ngx_processes[i].pid; ch.slot = i; for (n = 0; n < ngx_last_process; n++) { if (ngx_processes[n].exited || ngx_processes[n].pid == -1 || ngx_processes[n].channel[0] == -1) { continue; } ngx_log_debug3(NGX_LOG_DEBUG_CORE, cycle->log, 0, "pass close channel s:%i pid:%P to:%P", ch.slot, ch.pid, ngx_processes[n].pid); /* TODO: NGX_AGAIN */ ngx_write_channel(ngx_processes[n].channel[0], &ch, sizeof(ngx_channel_t), cycle->log); } } if (ngx_processes[i].respawn && !ngx_processes[i].exiting && !ngx_terminate && !ngx_quit) { if (ngx_spawn_process(cycle, ngx_processes[i].proc, ngx_processes[i].data, ngx_processes[i].name, i) == NGX_INVALID_PID) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "could not respawn %s", ngx_processes[i].name); continue; } ch.command = NGX_CMD_OPEN_CHANNEL; ch.pid = ngx_processes[ngx_process_slot].pid; ch.slot = ngx_process_slot; ch.fd = ngx_processes[ngx_process_slot].channel[0]; ngx_pass_open_channel(cycle, &ch); live = 1; continue; } if (ngx_processes[i].pid == ngx_new_binary) { ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); if (ngx_rename_file((char *) ccf->oldpid.data, (char *) ccf->pid.data) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, ngx_rename_file_n " %s back to %s failed " "after the new binary process \"%s\" exited", ccf->oldpid.data, ccf->pid.data, ngx_argv[0]); } ngx_new_binary = 0; if (ngx_noaccepting) { ngx_restart = 1; ngx_noaccepting = 0; } } if (i == ngx_last_process - 1) { ngx_last_process--; } else { ngx_processes[i].pid = -1; } } else if (ngx_processes[i].exiting || !ngx_processes[i].detached) { live = 1; } } return live; }
/* * TODO: This the simplest way to add this patch. But it introduces * memory copy. Change me. */ static ngx_int_t ngx_http_copy_non_buffered_request_body(ngx_http_request_t *r) { ssize_t size; ngx_int_t rc; ngx_chain_t *cl; ngx_http_request_body_t *rb; ngx_http_request_body_non_buffered_t *nb; rb = r->request_body; nb = rb->non_buffered; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http copy client request body, non buffered bufs %p", rb->bufs); if (rb->bufs == NULL) { return NGX_OK; } #if (NGX_DEBUG) for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http rb->bufs before t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } #endif for (cl = rb->bufs; cl;) { if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { cl = cl->next; continue; } if ((nb->buf == NULL) || (nb->buf->end == nb->buf->last)) { rc = ngx_http_request_body_get_buf(r); if (rc == NGX_ERROR) { return NGX_ERROR; } else if (rc == NGX_DECLINED) { /* The buffers are full */ return NGX_DECLINED; } } size = nb->buf->end - nb->buf->last; if (size > ngx_buf_size(cl->buf)) { size = ngx_buf_size(cl->buf); } if (size) { ngx_memcpy(nb->buf->last, cl->buf->pos, size); cl->buf->pos += size; nb->buf->last += size; nb->postpone_size += size; } if (cl->buf->pos == cl->buf->last) { /* TODO: honor other tags */ nb->buf->last_buf = cl->buf->last_buf; cl = cl->next; } } #if (NGX_DEBUG) for (cl = nb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http nb->bufs t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } for (cl = rb->bufs; cl; cl = cl->next) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, r->connection->log, 0, "http rb->bufs t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); } #endif rb->bufs = NULL; return NGX_OK; }
static ngx_int_t ngx_stream_write_filter(ngx_stream_session_t *s, ngx_chain_t *in, ngx_uint_t from_upstream) { off_t size; ngx_uint_t last, flush, sync; ngx_chain_t *cl, *ln, **ll, **out, *chain; ngx_connection_t *c; ngx_stream_write_filter_ctx_t *ctx; ctx = ngx_stream_get_module_ctx(s, ngx_stream_write_filter_module); if (ctx == NULL) { ctx = ngx_pcalloc(s->connection->pool, sizeof(ngx_stream_write_filter_ctx_t)); if (ctx == NULL) { return NGX_ERROR; } ngx_stream_set_ctx(s, ctx, ngx_stream_write_filter_module); } if (from_upstream) { c = s->connection; out = &ctx->from_upstream; } else { c = s->upstream->peer.connection; out = &ctx->from_downstream; } if (c->error) { return NGX_ERROR; } size = 0; flush = 0; sync = 0; last = 0; ll = out; /* find the size, the flush point and the last link of the saved chain */ for (cl = *out; cl; cl = cl->next) { ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->sync) { sync = 1; } if (cl->buf->last_buf) { last = 1; } } /* add the new chain to the existent one */ for (ln = in; ln; ln = ln->next) { cl = ngx_alloc_chain_link(c->pool); if (cl == NULL) { return NGX_ERROR; } cl->buf = ln->buf; *ll = cl; ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %O", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->sync) { sync = 1; } if (cl->buf->last_buf) { last = 1; } } *ll = NULL; ngx_log_debug3(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream write filter: l:%ui f:%ui s:%O", last, flush, size); if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED) && !(last && c->need_last_buf)) { if (last || flush || sync) { for (cl = *out; cl; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(c->pool, ln); } *out = NULL; c->buffered &= ~NGX_STREAM_WRITE_BUFFERED; return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, c->log, 0, "the stream output chain is empty"); ngx_debug_point(); return NGX_ERROR; } chain = c->send_chain(c, *out, 0); ngx_log_debug1(NGX_LOG_DEBUG_STREAM, c->log, 0, "stream write filter %p", chain); if (chain == NGX_CHAIN_ERROR) { c->error = 1; return NGX_ERROR; } for (cl = *out; cl && cl != chain; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(c->pool, ln); } *out = chain; if (chain) { if (c->shared) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "shared connection is busy"); return NGX_ERROR; } c->buffered |= NGX_STREAM_WRITE_BUFFERED; return NGX_AGAIN; } c->buffered &= ~NGX_STREAM_WRITE_BUFFERED; if (c->buffered & NGX_LOWLEVEL_BUFFERED) { return NGX_AGAIN; } return NGX_OK; }
static ngx_uint_t ngx_reap_childs(ngx_cycle_t *cycle) { ngx_int_t i, n; ngx_uint_t live; ngx_channel_t ch; ch.command = NGX_CMD_CLOSE_CHANNEL; ch.fd = -1; live = 0; for (i = 0; i < ngx_last_process; i++) { ngx_log_debug7(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "child: %d " PID_T_FMT " e:%d t:%d d:%d r:%d j:%d", i, ngx_processes[i].pid, ngx_processes[i].exiting, ngx_processes[i].exited, ngx_processes[i].detached, ngx_processes[i].respawn, ngx_processes[i].just_respawn); if (ngx_processes[i].pid == -1) { continue; } if (ngx_processes[i].exited) { if (!ngx_processes[i].detached) { ngx_close_channel(ngx_processes[i].channel, cycle->log); ngx_processes[i].channel[0] = -1; ngx_processes[i].channel[1] = -1; ch.pid = ngx_processes[i].pid; ch.slot = i; for (n = 0; n < ngx_last_process; n++) { if (ngx_processes[n].exited || ngx_processes[n].pid == -1 || ngx_processes[n].channel[0] == -1) { continue; } ngx_log_debug3(NGX_LOG_DEBUG_CORE, cycle->log, 0, "pass close channel s:%d pid:" PID_T_FMT " to:" PID_T_FMT, ch.slot, ch.pid, ngx_processes[n].pid); /* TODO: NGX_AGAIN */ ngx_write_channel(ngx_processes[n].channel[0], &ch, sizeof(ngx_channel_t), cycle->log); } } if (ngx_processes[i].respawn && !ngx_processes[i].exiting && !ngx_terminate && !ngx_quit) { if (ngx_spawn_process(cycle, ngx_processes[i].proc, ngx_processes[i].data, ngx_processes[i].name, i) == NGX_ERROR) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "can not respawn %s", ngx_processes[i].name); continue; } live = 1; continue; } if (ngx_processes[i].pid == ngx_new_binary) { ngx_new_binary = 0; if (ngx_noaccepting) { ngx_restart = 1; ngx_noaccepting = 0; } } if (i == ngx_last_process - 1) { ngx_last_process--; } else { ngx_processes[i].pid = -1; } } else if (ngx_processes[i].exiting || !ngx_processes[i].detached) { live = 1; } } return live; }