static ngx_int_t ngx_http_range_header_filter(ngx_http_request_t *r) { time_t if_range_time; ngx_str_t *if_range, *etag; ngx_http_core_loc_conf_t *clcf; ngx_http_range_filter_ctx_t *ctx; if (r->http_version < NGX_HTTP_VERSION_10 || r->headers_out.status != NGX_HTTP_OK || r != r->main || r->headers_out.content_length_n == -1 || !r->allow_ranges) { return ngx_http_next_header_filter(r); } clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); if (clcf->max_ranges == 0) { return ngx_http_next_header_filter(r); } if (r->headers_in.range == NULL || r->headers_in.range->value.len < 7 || ngx_strncasecmp(r->headers_in.range->value.data, (u_char *) "bytes=", 6) != 0) { goto next_filter; } if (r->headers_in.if_range) { if_range = &r->headers_in.if_range->value; if (if_range->len >= 2 && if_range->data[if_range->len - 1] == '"') { if (r->headers_out.etag == NULL) { goto next_filter; } etag = &r->headers_out.etag->value; ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http ir:%V etag:%V", if_range, etag); if (if_range->len != etag->len || ngx_strncmp(if_range->data, etag->data, etag->len) != 0) { goto next_filter; } goto parse; } if (r->headers_out.last_modified_time == (time_t) -1) { goto next_filter; } if_range_time = ngx_http_parse_time(if_range->data, if_range->len); ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http ir:%d lm:%d", if_range_time, r->headers_out.last_modified_time); if (if_range_time != r->headers_out.last_modified_time) { goto next_filter; } } parse: ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_range_filter_ctx_t)); if (ctx == NULL) { return NGX_ERROR; } if (ngx_array_init(&ctx->ranges, r->pool, 1, sizeof(ngx_http_range_t)) != NGX_OK) { return NGX_ERROR; } switch (ngx_http_range_parse(r, ctx, clcf->max_ranges)) { case NGX_OK: ngx_http_set_ctx(r, ctx, ngx_http_range_body_filter_module); r->headers_out.status = NGX_HTTP_PARTIAL_CONTENT; r->headers_out.status_line.len = 0; if (ctx->ranges.nelts == 1) { return ngx_http_range_singlepart_header(r, ctx); } return ngx_http_range_multipart_header(r, ctx); case NGX_HTTP_RANGE_NOT_SATISFIABLE: return ngx_http_range_not_satisfiable(r); case NGX_ERROR: return NGX_ERROR; default: /* NGX_DECLINED */ break; } next_filter: r->headers_out.accept_ranges = ngx_list_push(&r->headers_out.headers); if (r->headers_out.accept_ranges == NULL) { return NGX_ERROR; } r->headers_out.accept_ranges->hash = 1; ngx_str_set(&r->headers_out.accept_ranges->key, "Accept-Ranges"); ngx_str_set(&r->headers_out.accept_ranges->value, "bytes"); return ngx_http_next_header_filter(r); }
ngx_int_t ngx_http_srcache_store_response_header(ngx_http_request_t *r, ngx_http_srcache_ctx_t *ctx) { ngx_chain_t *cl; size_t len; ngx_buf_t *b; ngx_uint_t status; ngx_uint_t i; ngx_str_t *status_line; ngx_list_part_t *part; ngx_table_elt_t *header; u_char buf[sizeof("Mon, 28 Sep 1970 06:00:00 GMT") - 1]; ngx_http_srcache_loc_conf_t *conf; conf = ngx_http_get_module_loc_conf(r, ngx_http_srcache_filter_module); dd("request: %p, uri: %.*s", r, (int) r->uri.len, r->uri.data); len = sizeof("HTTP/1.x ") - 1 + sizeof(CRLF) - 1 /* the end of the header */ + sizeof(CRLF) - 1; if (r->headers_out.status_line.len) { dd("status line defined"); len += r->headers_out.status_line.len; status_line = &r->headers_out.status_line; status = 0; } else { dd("status line not defined"); status = r->headers_out.status; if (status >= NGX_HTTP_OK && status < NGX_HTTP_LAST_LEVEL_200) { /* 2XX */ status -= NGX_HTTP_OK; dd("status: %d", (int) status); status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else if (status >= NGX_HTTP_MOVED_PERMANENTLY && status < NGX_HTTP_LAST_LEVEL_300) { /* 3XX */ if (status == NGX_HTTP_NOT_MODIFIED) { r->header_only = 1; } status = status - NGX_HTTP_MOVED_PERMANENTLY + NGX_HTTP_LEVEL_200; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else if (status >= NGX_HTTP_BAD_REQUEST && status < NGX_HTTP_LAST_LEVEL_400) { /* 4XX */ status = status - NGX_HTTP_BAD_REQUEST + NGX_HTTP_LEVEL_200 + NGX_HTTP_LEVEL_300; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else if (status >= NGX_HTTP_INTERNAL_SERVER_ERROR && status < NGX_HTTP_LAST_LEVEL_500) { /* 5XX */ status = status - NGX_HTTP_INTERNAL_SERVER_ERROR + NGX_HTTP_LEVEL_200 + NGX_HTTP_LEVEL_300 + NGX_HTTP_LEVEL_400; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else { len += NGX_INT_T_LEN; status_line = NULL; } } if (!conf->hide_content_type && r->headers_out.content_type.len) { len += sizeof("Content-Type: ") - 1 + r->headers_out.content_type.len + 2; if (r->headers_out.content_type_len == r->headers_out.content_type.len && r->headers_out.charset.len) { len += sizeof("; charset=") - 1 + r->headers_out.charset.len; } } if (!conf->hide_last_modified) { if (r->headers_out.last_modified_time != -1) { if (r->headers_out.status != NGX_HTTP_OK && r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT && r->headers_out.status != NGX_HTTP_NOT_MODIFIED && r->headers_out.status != NGX_HTTP_NO_CONTENT) { r->headers_out.last_modified_time = -1; r->headers_out.last_modified = NULL; } } dd("last modified time: %d", (int) r->headers_out.last_modified_time); if (r->headers_out.last_modified == NULL && r->headers_out.last_modified_time != -1) { (void) ngx_http_time(buf, r->headers_out.last_modified_time); len += sizeof("Last-Modified: ") - 1 + sizeof(buf) + 2; } } if (r->allow_ranges) { len += sizeof("X-SRCache-Allow-Ranges: 1") - 1 + 2; } part = &r->headers_out.headers.part; header = part->elts; for (i = 0; /* void */; i++) { if (i >= part->nelts) { if (part->next == NULL) { break; } part = part->next; header = part->elts; i = 0; } if (header[i].hash == 0) { continue; } if (ngx_hash_find(&conf->hide_headers_hash, header[i].hash, header[i].lowcase_key, header[i].key.len)) { continue; } len += header[i].key.len + sizeof(": ") - 1 + header[i].value.len + sizeof(CRLF) - 1; } b = ngx_create_temp_buf(r->pool, len); if (b == NULL) { return NGX_ERROR; } /* "HTTP/1.x " */ b->last = ngx_cpymem(b->last, "HTTP/1.1 ", sizeof("HTTP/1.x ") - 1); /* status line */ if (status_line) { b->last = ngx_copy(b->last, status_line->data, status_line->len); } else { b->last = ngx_sprintf(b->last, "%ui", status); } *b->last++ = CR; *b->last++ = LF; if (!conf->hide_content_type && r->headers_out.content_type.len) { b->last = ngx_cpymem(b->last, "Content-Type: ", sizeof("Content-Type: ") - 1); b->last = ngx_copy(b->last, r->headers_out.content_type.data, r->headers_out.content_type.len); if (r->headers_out.content_type_len == r->headers_out.content_type.len && r->headers_out.charset.len) { b->last = ngx_cpymem(b->last, "; charset=", sizeof("; charset=") - 1); b->last = ngx_copy(b->last, r->headers_out.charset.data, r->headers_out.charset.len); } *b->last++ = CR; *b->last++ = LF; } if (!conf->hide_last_modified && r->headers_out.last_modified == NULL && r->headers_out.last_modified_time != -1) { b->last = ngx_cpymem(b->last, "Last-Modified: ", sizeof("Last-Modified: ") - 1); b->last = ngx_cpymem(b->last, buf, sizeof(buf)); *b->last++ = CR; *b->last++ = LF; } if (r->allow_ranges) { b->last = ngx_cpymem(b->last, "X-SRCache-Allow-Ranges: 1\r\n", sizeof("X-SRCache-Allow-Ranges: 1\r\n") - 1); } part = &r->headers_out.headers.part; header = part->elts; for (i = 0; /* void */; i++) { if (i >= part->nelts) { if (part->next == NULL) { break; } part = part->next; header = part->elts; i = 0; } if (header[i].hash == 0) { continue; } dd("header hash: %lu, hash lc: %lu", (unsigned long) header[i].hash, (unsigned long) ngx_hash_key_lc(header[i].key.data, header[i].key.len)); if (ngx_hash_find(&conf->hide_headers_hash, header[i].hash, header[i].lowcase_key, header[i].key.len)) { dd("skipped header key: %.*s", (int) header[i].key.len, header[i].key.data); continue; } dd("header not skipped"); b->last = ngx_copy(b->last, header[i].key.data, header[i].key.len); *b->last++ = ':'; *b->last++ = ' '; b->last = ngx_copy(b->last, header[i].value.data, header[i].value.len); *b->last++ = CR; *b->last++ = LF; } ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "srcache store header %*s", (size_t) (b->last - b->pos), b->pos); /* the end of HTTP header */ *b->last++ = CR; *b->last++ = LF; if (b->last != b->end) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "srcache_fetch: buffer error when serializing the " "response header: %O left", (off_t) (b->last - b->end)); return NGX_ERROR; } cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return NGX_ERROR; } cl->buf = b; cl->next = NULL; ctx->body_to_cache = cl; ctx->response_length += len; return NGX_OK; }
static ngx_int_t ngx_http_memcached_create_request(ngx_http_request_t *r) { size_t len; uintptr_t escape; ngx_buf_t *b; ngx_chain_t *cl; ngx_http_memcached_ctx_t *ctx; ngx_http_variable_value_t *vv; ngx_http_memcached_loc_conf_t *mlcf; mlcf = ngx_http_get_module_loc_conf(r, ngx_http_memcached_module); vv = ngx_http_get_indexed_variable(r, mlcf->index); if (vv == NULL || vv->not_found || vv->len == 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "the \"$memcached_key\" variable is not set"); return NGX_ERROR; } escape = 2 * ngx_escape_uri(NULL, vv->data, vv->len, NGX_ESCAPE_MEMCACHED); len = sizeof("get ") - 1 + vv->len + escape + sizeof(CRLF) - 1; b = ngx_create_temp_buf(r->pool, len); if (b == NULL) { return NGX_ERROR; } cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return NGX_ERROR; } cl->buf = b; cl->next = NULL; r->upstream->request_bufs = cl; *b->last++ = 'g'; *b->last++ = 'e'; *b->last++ = 't'; *b->last++ = ' '; ctx = ngx_http_get_module_ctx(r, ngx_http_memcached_module); ctx->key.data = b->last; if (escape == 0) { b->last = ngx_copy(b->last, vv->data, vv->len); } else { b->last = (u_char *) ngx_escape_uri(b->last, vv->data, vv->len, NGX_ESCAPE_MEMCACHED); } ctx->key.len = b->last - ctx->key.data; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http memcached request: \"%V\"", &ctx->key); *b->last++ = CR; *b->last++ = LF; return NGX_OK; }
static int ngx_http_lua_socket_udp_setpeername(lua_State *L) { ngx_http_request_t *r; ngx_http_lua_ctx_t *ctx; ngx_str_t host; int port; ngx_resolver_ctx_t *rctx, temp; ngx_http_core_loc_conf_t *clcf; int saved_top; int n; u_char *p; size_t len; ngx_url_t url; ngx_int_t rc; ngx_http_lua_loc_conf_t *llcf; int timeout; ngx_http_lua_co_ctx_t *coctx; ngx_http_lua_udp_connection_t *uc; ngx_http_lua_socket_udp_upstream_t *u; /* * TODO: we should probably accept an extra argument to setpeername() * to allow the user bind the datagram unix domain socket himself, * which is necessary for systems without autobind support. */ n = lua_gettop(L); if (n != 2 && n != 3) { return luaL_error(L, "ngx.socket.udp setpeername: expecting 2 or 3 " "arguments (including the object), but seen %d", n); } r = ngx_http_lua_get_req(L); if (r == NULL) { return luaL_error(L, "no request found"); } ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); if (ctx == NULL) { return luaL_error(L, "no ctx found"); } ngx_http_lua_check_context(L, ctx, NGX_HTTP_LUA_CONTEXT_REWRITE | NGX_HTTP_LUA_CONTEXT_ACCESS | NGX_HTTP_LUA_CONTEXT_CONTENT | NGX_HTTP_LUA_CONTEXT_TIMER | NGX_HTTP_LUA_CONTEXT_SSL_CERT); luaL_checktype(L, 1, LUA_TTABLE); p = (u_char *) luaL_checklstring(L, 2, &len); host.data = ngx_palloc(r->pool, len + 1); if (host.data == NULL) { return luaL_error(L, "no memory"); } host.len = len; ngx_memcpy(host.data, p, len); host.data[len] = '\0'; if (n == 3) { port = luaL_checkinteger(L, 3); if (port < 0 || port > 65536) { lua_pushnil(L); lua_pushfstring(L, "bad port number: %d", port); return 2; } } else { /* n == 2 */ port = 0; } lua_rawgeti(L, 1, SOCKET_CTX_INDEX); u = lua_touserdata(L, -1); lua_pop(L, 1); if (u) { if (u->request && u->request != r) { return luaL_error(L, "bad request"); } if (u->waiting) { lua_pushnil(L); lua_pushliteral(L, "socket busy"); return 2; } if (u->udp_connection.connection) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket reconnect without shutting down"); ngx_http_lua_socket_udp_finalize(r, u); } ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua reuse socket upstream ctx"); } else { u = lua_newuserdata(L, sizeof(ngx_http_lua_socket_udp_upstream_t)); if (u == NULL) { return luaL_error(L, "no memory"); } #if 1 lua_pushlightuserdata(L, &ngx_http_lua_udp_udata_metatable_key); lua_rawget(L, LUA_REGISTRYINDEX); lua_setmetatable(L, -2); #endif lua_rawseti(L, 1, SOCKET_CTX_INDEX); } ngx_memzero(u, sizeof(ngx_http_lua_socket_udp_upstream_t)); u->request = r; /* set the controlling request */ llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); u->conf = llcf; uc = &u->udp_connection; uc->log = *r->connection->log; dd("lua peer connection log: %p", &uc->log); lua_rawgeti(L, 1, SOCKET_TIMEOUT_INDEX); timeout = (ngx_int_t) lua_tointeger(L, -1); lua_pop(L, 1); if (timeout > 0) { u->read_timeout = (ngx_msec_t) timeout; } else { u->read_timeout = u->conf->read_timeout; } ngx_memzero(&url, sizeof(ngx_url_t)); url.url.len = host.len; url.url.data = host.data; url.default_port = (in_port_t) port; url.no_resolve = 1; if (ngx_parse_url(r->pool, &url) != NGX_OK) { lua_pushnil(L); if (url.err) { lua_pushfstring(L, "failed to parse host name \"%s\": %s", host.data, url.err); } else { lua_pushfstring(L, "failed to parse host name \"%s\"", host.data); } return 2; } u->resolved = ngx_pcalloc(r->pool, sizeof(ngx_http_upstream_resolved_t)); if (u->resolved == NULL) { return luaL_error(L, "no memory"); } if (url.addrs && url.addrs[0].sockaddr) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket network address given directly"); u->resolved->sockaddr = url.addrs[0].sockaddr; u->resolved->socklen = url.addrs[0].socklen; u->resolved->naddrs = 1; u->resolved->host = url.addrs[0].name; } else { u->resolved->host = host; u->resolved->port = (in_port_t) port; } if (u->resolved->sockaddr) { rc = ngx_http_lua_socket_resolve_retval_handler(r, u, L); if (rc == NGX_AGAIN) { return lua_yield(L, 0); } return rc; } clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); temp.name = host; rctx = ngx_resolve_start(clcf->resolver, &temp); if (rctx == NULL) { u->ft_type |= NGX_HTTP_LUA_SOCKET_FT_RESOLVER; lua_pushnil(L); lua_pushliteral(L, "failed to start the resolver"); return 2; } if (rctx == NGX_NO_RESOLVER) { u->ft_type |= NGX_HTTP_LUA_SOCKET_FT_RESOLVER; lua_pushnil(L); lua_pushfstring(L, "no resolver defined to resolve \"%s\"", host.data); return 2; } rctx->name = host; #if !defined(nginx_version) || nginx_version < 1005008 rctx->type = NGX_RESOLVE_A; #endif rctx->handler = ngx_http_lua_socket_resolve_handler; rctx->data = u; rctx->timeout = clcf->resolver_timeout; u->co_ctx = ctx->cur_co_ctx; u->resolved->ctx = rctx; saved_top = lua_gettop(L); coctx = ctx->cur_co_ctx; ngx_http_lua_cleanup_pending_operation(coctx); coctx->cleanup = ngx_http_lua_udp_resolve_cleanup; if (ngx_resolve_name(rctx) != NGX_OK) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket fail to run resolver immediately"); u->ft_type |= NGX_HTTP_LUA_SOCKET_FT_RESOLVER; u->resolved->ctx = NULL; lua_pushnil(L); lua_pushfstring(L, "%s could not be resolved", host.data); return 2; } if (u->waiting == 1) { /* resolved and already connecting */ return lua_yield(L, 0); } n = lua_gettop(L) - saved_top; if (n) { /* errors occurred during resolving or connecting * or already connected */ return n; } /* still resolving */ u->waiting = 1; u->prepare_retvals = ngx_http_lua_socket_resolve_retval_handler; coctx->data = u; if (ctx->entered_content_phase) { r->write_event_handler = ngx_http_lua_content_wev_handler; } else { r->write_event_handler = ngx_http_core_run_phases; } return lua_yield(L, 0); }
static int ngx_http_lua_socket_udp_receive(lua_State *L) { ngx_http_request_t *r; ngx_http_lua_socket_udp_upstream_t *u; ngx_int_t rc; ngx_http_lua_ctx_t *ctx; ngx_http_lua_co_ctx_t *coctx; size_t size; int nargs; ngx_http_lua_loc_conf_t *llcf; nargs = lua_gettop(L); if (nargs != 1 && nargs != 2) { return luaL_error(L, "expecting 1 or 2 arguments " "(including the object), but got %d", nargs); } r = ngx_http_lua_get_req(L); if (r == NULL) { return luaL_error(L, "no request found"); } ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket calling receive() method"); luaL_checktype(L, 1, LUA_TTABLE); lua_rawgeti(L, 1, SOCKET_CTX_INDEX); u = lua_touserdata(L, -1); lua_pop(L, 1); if (u == NULL || u->udp_connection.connection == NULL) { llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); if (llcf->log_socket_errors) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "attempt to receive data on a closed socket: u:%p, " "c:%p", u, u ? u->udp_connection.connection : NULL); } lua_pushnil(L); lua_pushliteral(L, "closed"); return 2; } if (u->request != r) { return luaL_error(L, "bad request"); } if (u->ft_type) { u->ft_type = 0; } #if 1 if (u->waiting) { lua_pushnil(L); lua_pushliteral(L, "socket busy"); return 2; } #endif ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket read timeout: %M", u->read_timeout); size = (size_t) luaL_optnumber(L, 2, UDP_MAX_DATAGRAM_SIZE); size = ngx_min(size, UDP_MAX_DATAGRAM_SIZE); u->recv_buf_size = size; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket receive buffer size: %uz", u->recv_buf_size); rc = ngx_http_lua_socket_udp_read(r, u); if (rc == NGX_ERROR) { dd("read failed: %d", (int) u->ft_type); rc = ngx_http_lua_socket_udp_receive_retval_handler(r, u, L); dd("udp receive retval returned: %d", (int) rc); return rc; } if (rc == NGX_OK) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "lua udp socket receive done in a single run"); return ngx_http_lua_socket_udp_receive_retval_handler(r, u, L); } /* n == NGX_AGAIN */ u->read_event_handler = ngx_http_lua_socket_udp_read_handler; ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module); if (ctx == NULL) { return luaL_error(L, "no request ctx found"); } coctx = ctx->cur_co_ctx; ngx_http_lua_cleanup_pending_operation(coctx); coctx->cleanup = ngx_http_lua_udp_socket_cleanup; coctx->data = u; if (ctx->entered_content_phase) { r->write_event_handler = ngx_http_lua_content_wev_handler; } else { r->write_event_handler = ngx_http_core_run_phases; } u->co_ctx = coctx; u->waiting = 1; u->prepare_retvals = ngx_http_lua_socket_udp_receive_retval_handler; return lua_yield(L, 0); }
ngx_int_t nchan_pubsub_handler(ngx_http_request_t *r) { nchan_loc_conf_t *cf = ngx_http_get_module_loc_conf(r, ngx_nchan_module); ngx_str_t *channel_id; subscriber_t *sub; nchan_msg_id_t *msg_id; ngx_int_t rc = NGX_DONE; nchan_request_ctx_t *ctx; ngx_str_t *origin_header; #if NCHAN_BENCHMARK struct timeval tv; ngx_gettimeofday(&tv); #endif if((ctx = ngx_pcalloc(r->pool, sizeof(nchan_request_ctx_t))) == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_http_set_ctx(r, ctx, ngx_nchan_module); #if NCHAN_BENCHMARK ctx->start_tv = tv; #endif //X-Accel-Redirected requests get their method mangled to GET. De-mangle it if necessary if(r->upstream && r->upstream->headers_in.x_accel_redirect) { //yep, we got x-accel-redirected. what was the original method?... nchan_recover_x_accel_redirected_request_method(r); } if((origin_header = nchan_get_header_value(r, NCHAN_HEADER_ORIGIN)) != NULL) { ctx->request_origin_header = *origin_header; if(!(cf->allow_origin.len == 1 && cf->allow_origin.data[0] == '*')) { if(!(origin_header->len == cf->allow_origin.len && ngx_strnstr(origin_header->data, (char *)cf->allow_origin.data, origin_header->len) != NULL)) { //CORS origin match failed! return a 403 forbidden goto forbidden; } } } else { ctx->request_origin_header.len=0; ctx->request_origin_header.data=NULL; } if((channel_id = nchan_get_channel_id(r, SUB, 1)) == NULL) { //just get the subscriber_channel_id for now. the publisher one is handled elsewhere return r->headers_out.status ? NGX_OK : NGX_HTTP_INTERNAL_SERVER_ERROR; } if(nchan_detect_websocket_request(r)) { //want websocket? if(cf->sub.websocket) { //we prefer to subscribe #if FAKESHARD memstore_sub_debug_start(); #endif if((msg_id = nchan_subscriber_get_msg_id(r)) == NULL) { goto bad_msgid; } if((sub = websocket_subscriber_create(r, msg_id)) == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "unable to create websocket subscriber"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } sub->fn->subscribe(sub, channel_id); #if FAKESHARD memstore_sub_debug_end(); #endif } else if(cf->pub.websocket) { //no need to subscribe, but keep a connection open for publishing //not yet implemented nchan_create_websocket_publisher(r); } else goto forbidden; return NGX_DONE; } else { subscriber_t *(*sub_create)(ngx_http_request_t *r, nchan_msg_id_t *msg_id) = NULL; switch(r->method) { case NGX_HTTP_GET: if(cf->sub.eventsource && nchan_detect_eventsource_request(r)) { sub_create = eventsource_subscriber_create; } else if(cf->sub.http_chunked && nchan_detect_chunked_subscriber_request(r)) { sub_create = http_chunked_subscriber_create; } else if(cf->sub.http_multipart && nchan_detect_multipart_subscriber_request(r)) { sub_create = http_multipart_subscriber_create; } else if(cf->sub.poll) { sub_create = intervalpoll_subscriber_create; } else if(cf->sub.http_raw_stream) { sub_create = http_raw_stream_subscriber_create; } else if(cf->sub.longpoll) { sub_create = longpoll_subscriber_create; } else if(cf->pub.http) { nchan_http_publisher_handler(r); } else { goto forbidden; } if(sub_create) { #if FAKESHARD memstore_sub_debug_start(); #endif if((msg_id = nchan_subscriber_get_msg_id(r)) == NULL) { goto bad_msgid; } if((sub = sub_create(r, msg_id)) == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "unable to create subscriber"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } sub->fn->subscribe(sub, channel_id); #if FAKESHARD memstore_sub_debug_end(); #endif } break; case NGX_HTTP_POST: case NGX_HTTP_PUT: if(cf->pub.http) { nchan_http_publisher_handler(r); } else goto forbidden; break; case NGX_HTTP_DELETE: if(cf->pub.http) { nchan_http_publisher_handler(r); } else goto forbidden; break; case NGX_HTTP_OPTIONS: if(cf->pub.http) { nchan_OPTIONS_respond(r, &cf->allow_origin, &NCHAN_ACCESS_CONTROL_ALLOWED_PUBLISHER_HEADERS, &NCHAN_ALLOW_GET_POST_PUT_DELETE); } else if(cf->sub.poll || cf->sub.longpoll || cf->sub.eventsource || cf->sub.websocket) { nchan_OPTIONS_respond(r, &cf->allow_origin, &NCHAN_ACCESS_CONTROL_ALLOWED_SUBSCRIBER_HEADERS, &NCHAN_ALLOW_GET); } else goto forbidden; break; } } return rc; forbidden: nchan_respond_status(r, NGX_HTTP_FORBIDDEN, NULL, 0); return NGX_OK; bad_msgid: nchan_respond_cstring(r, NGX_HTTP_BAD_REQUEST, &NCHAN_CONTENT_TYPE_TEXT_PLAIN, "Message ID invalid", 0); return NGX_OK; }
static ngx_int_t ngx_http_reqstat_log_handler(ngx_http_request_t *r) { ngx_uint_t i, j, status, utries; ngx_time_t *tp; ngx_msec_int_t ms, total_ms; ngx_http_reqstat_conf_t *slcf; ngx_http_reqstat_rbnode_t *fnode, **fnode_store; ngx_http_upstream_state_t *state; ngx_http_reqstat_store_t *store; slcf = ngx_http_get_module_loc_conf(r, ngx_http_reqstat_module); if (slcf->monitor == NULL) { return NGX_OK; } store = ngx_http_get_module_ctx(r, ngx_http_reqstat_module); if (store == NULL) { store = ngx_http_reqstat_create_store(r, slcf); if (store == NULL) { return NGX_ERROR; } } if (store->bypass) { return NGX_OK; } fnode_store = store->monitor_index.elts; for (i = 0; i < store->monitor_index.nelts; i++) { fnode = fnode_store[i]; if (r->connection->requests == 1) { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_CONN_TOTAL, 1); } ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_REQ_TOTAL, 1); ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_BYTES_IN, r->connection->received - (store ? store->recv : 0)); ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_BYTES_OUT, r->connection->sent - (store ? store->sent : 0)); if (r->err_status) { status = r->err_status; } else if (r->headers_out.status) { status = r->headers_out.status; } else if (r->http_version == NGX_HTTP_VERSION_9) { status = 9; } else { status = 0; } if (status >= 200 && status < 300) { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_2XX, 1); } else if (status >= 300 && status < 400) { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_3XX, 1); } else if (status >= 400 && status < 500) { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_4XX, 1); } else if (status >= 500 && status < 600) { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_5XX, 1); } else { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_OTHER_STATUS, 1); } tp = ngx_timeofday(); ms = (ngx_msec_int_t) ((tp->sec - r->start_sec) * 1000 + (tp->msec - r->start_msec)); ms = ngx_max(ms, 0); ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_RT, ms); if (r->upstream_states != NULL && r->upstream_states->nelts > 0) { ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_UPS_REQ, 1); j = 0; total_ms = 0; utries = 0; state = r->upstream_states->elts; for ( ;; ) { utries++; #if nginx_version <= 1009000 ms = (ngx_msec_int_t) (state[j].response_sec * 1000 + state[j].response_msec); #else ms = (ngx_msec_int_t) state[j].response_time; #endif ms = ngx_max(ms, 0); total_ms += ms; if (++j == r->upstream_states->nelts) { break; } if (state[j].peer == NULL) { if (++j == r->upstream_states->nelts) { break; } } } ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_UPS_RT, total_ms); ngx_http_reqstat_count(fnode, NGX_HTTP_REQSTAT_UPS_TRIES, utries); } } return NGX_OK; }
static ngx_int_t ngx_http_mytest_handler(ngx_http_request_t *r) { //首先建立http上下文结构体ngx_http_mytest_ctx_t ngx_http_mytest_ctx_t* myctx = ngx_http_get_module_ctx(r, ngx_http_mytest_module); if (myctx == NULL) { myctx = ngx_palloc(r->pool, sizeof(ngx_http_mytest_ctx_t)); if (myctx == NULL) { return NGX_ERROR; } //将新建的上下文与请求关联起来 ngx_http_set_ctx(r, myctx, ngx_http_mytest_module); } //对每1个要使用upstream的请求,必须调用且只能调用1次 //ngx_http_upstream_create方法,它会初始化r->upstream成员 if (ngx_http_upstream_create(r) != NGX_OK) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_upstream_create() failed"); return NGX_ERROR; } //得到配置结构体ngx_http_mytest_conf_t ngx_http_mytest_conf_t *mycf = (ngx_http_mytest_conf_t *) ngx_http_get_module_loc_conf(r, ngx_http_mytest_module); ngx_http_upstream_t *u = r->upstream; //这里用配置文件中的结构体来赋给r->upstream->conf成员 u->conf = &mycf->upstream; //决定转发包体时使用的缓冲区 u->buffering = mycf->upstream.buffering; //以下代码开始初始化resolved结构体,用来保存上游服务器的地址 u->resolved = (ngx_http_upstream_resolved_t*) ngx_pcalloc(r->pool, sizeof(ngx_http_upstream_resolved_t)); if (u->resolved == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_pcalloc resolved error. %s.", strerror(errno)); return NGX_ERROR; } //这里的上游服务器就是www.google.com static struct sockaddr_in backendSockAddr; struct hostent *pHost = gethostbyname((char*) "www.google.com"); if (pHost == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "gethostbyname fail. %s", strerror(errno)); return NGX_ERROR; } //访问上游服务器的80端口 backendSockAddr.sin_family = AF_INET; backendSockAddr.sin_port = htons((in_port_t) 80); char* pDmsIP = inet_ntoa(*(struct in_addr*) (pHost->h_addr_list[0])); backendSockAddr.sin_addr.s_addr = inet_addr(pDmsIP); myctx->backendServer.data = (u_char*)pDmsIP; myctx->backendServer.len = strlen(pDmsIP); //将地址设置到resolved成员中 u->resolved->sockaddr = (struct sockaddr *)&backendSockAddr; u->resolved->socklen = sizeof(struct sockaddr_in); u->resolved->naddrs = 1; //设置三个必须实现的回调方法,也就是5.3.3节至5.3.5节中实现的3个方法 u->create_request = mytest_upstream_create_request; u->process_header = mytest_process_status_line; u->finalize_request = mytest_upstream_finalize_request; //这里必须将count成员加1,理由见5.1.5节 r->main->count++; //启动upstream ngx_http_upstream_init(r); //必须返回NGX_DONE return NGX_DONE; }
static ngx_int_t ngx_http_parallel_handle_request_complete( ngx_http_parallel_ctx_t *ctx, ngx_http_request_t *r, ngx_http_parallel_fiber_ctx_t* fiber) { ngx_http_parallel_loc_conf_t *conf; ngx_http_upstream_t* u; ngx_http_request_t* pr; ngx_buf_t* b; ngx_int_t rc; off_t expected_size; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "fiber finished %uL", fiber->chunk_index); ctx->active_fibers--; // add the fiber to the free list fiber->next = ctx->free_fibers; ctx->free_fibers = fiber; if (ctx->error_code != NGX_AGAIN) { return NGX_OK; } if (r->headers_out.status == NGX_HTTP_RANGE_NOT_SATISFIABLE && fiber->chunk_index != 0) { if (ctx->chunks == NULL) { // don't have the response length yet, flag this chunk as missing ctx->missing_chunks |= (1 << fiber->chunk_index); } else { if (fiber->chunk_index < ctx->chunk_count) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "got 416 error for a required chunk %uL/%uL", fiber->chunk_index, ctx->chunk_count); return NGX_HTTP_BAD_GATEWAY; } } return NGX_OK; } pr = r->parent; if (r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT && pr->header_sent) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "unexpected status %ui after getting 206"); return NGX_HTTP_BAD_GATEWAY; } // update the buffer pointers from the upstream buffer u = r->upstream; if (u == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "unexpected, subrequest has no upstream"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } b = fiber->cl->buf; *b = u->buffer; b->last = b->pos + u->state->response_length; conf = ngx_http_get_module_loc_conf(pr, ngx_http_parallel_module); if (r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT || ctx->fiber_count == 1) { // save the content length to cache if (r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT && ctx->cached_response_length != r->headers_out.content_length_n && conf->content_length_cache_zone != NULL) { if (!ctx->key_inited) { ngx_http_parallel_calculate_key(ctx->key, r); } ngx_fixed_buffer_cache_store( conf->content_length_cache_zone, ctx->key, (u_char*)&r->headers_out.content_length_n, 1); } // copy the response headers from upstream pr->headers_out = r->headers_out; rc = ngx_http_send_header(pr); if (rc == NGX_ERROR || rc > NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "ngx_http_send_header failed %i", rc); return rc; } // send the buffer b->last_buf = 1; ctx->error_code = NGX_OK; if (ngx_buf_size(b) == 0) { b->temporary = b->memory = 0; } rc = ngx_http_output_filter(pr, fiber->cl); if (rc != NGX_OK && rc != NGX_AGAIN) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "ngx_http_output_filter failed %i", rc); return rc; } return NGX_OK; } // initialize the chunks array (on first completed chunk) if (ctx->chunks == NULL) { rc = ngx_http_parallel_init_chunks(ctx, pr, &r->headers_out); if (rc != NGX_OK) { return rc; } } else { if (!ngx_http_parallel_check_consistency(ctx, r, conf)) { return NGX_HTTP_BAD_GATEWAY; } } if (fiber->chunk_index >= ctx->chunk_count) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "unexpected, chunk index %uL exceeds chunk count %uL", fiber->chunk_index, ctx->chunk_count); return NGX_HTTP_BAD_GATEWAY; } // validate content length if (fiber->chunk_index + 1 == ctx->chunk_count) { expected_size = ctx->last_chunk_size; } else if (fiber->chunk_index < ctx->fiber_count) { expected_size = ctx->initial_chunk_size; } else { expected_size = ctx->chunk_size; } if (u->state->response_length != expected_size) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handle_request_complete: " "unexpected response length %O in chunk %uL/%uL, expected %O", u->state->response_length, fiber->chunk_index, ctx->chunk_count, expected_size); return NGX_HTTP_BAD_GATEWAY; } if (fiber->chunk_index == ctx->next_send_chunk) { // send the buffer and any queued buffers rc = ngx_http_parallel_send_buffer(ctx, pr, fiber->cl); if (rc != NGX_OK) { return rc; } } else { // save the buffer to be sent later ctx->chunks[fiber->chunk_index] = fiber->cl; } // start fibers if possible rc = ngx_http_parallel_start_fibers(ctx, pr, conf); if (rc != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } return NGX_OK; }
/* 发送HTTP响应头部 */ static ngx_int_t ngx_http_header_filter(ngx_http_request_t *r) { u_char *p; size_t len; ngx_str_t host, *status_line; ngx_buf_t *b; ngx_uint_t status, i, port; ngx_chain_t out; ngx_list_part_t *part; ngx_table_elt_t *header; ngx_connection_t *c; ngx_http_core_loc_conf_t *clcf; ngx_http_core_srv_conf_t *cscf; struct sockaddr_in *sin; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; #endif u_char addr[NGX_SOCKADDR_STRLEN]; /* * 检查当前请求结构的header_sent标志位,若该标志位为1, * 表示已经发送HTTP请求响应,则无需再发送,此时返回NGX_OK; */ if (r->header_sent) { return NGX_OK; } /* 若之前未发送HTTP请求响应,则现在准备发送,并设置header_sent标志位 */ r->header_sent = 1; /* 当前请求不是原始请求,则返回NGX_OK */ if (r != r->main) { return NGX_OK; } /* * 若HTTP版本为小于1.0 则直接返回NGX_OK; * 因为这些版本不支持请求头部,所有就没有响应头部; */ if (r->http_version < NGX_HTTP_VERSION_10) { return NGX_OK; } if (r->method == NGX_HTTP_HEAD) { r->header_only = 1; } if (r->headers_out.last_modified_time != -1) { if (r->headers_out.status != NGX_HTTP_OK && r->headers_out.status != NGX_HTTP_PARTIAL_CONTENT && r->headers_out.status != NGX_HTTP_NOT_MODIFIED) { r->headers_out.last_modified_time = -1; r->headers_out.last_modified = NULL; } } /* 以下是根据HTTP响应报文的状态行、响应头部字符串序列化为所需的字节数len */ len = sizeof("HTTP/1.x ") - 1 + sizeof(CRLF) - 1 /* the end of the header */ + sizeof(CRLF) - 1; /* status line */ if (r->headers_out.status_line.len) { len += r->headers_out.status_line.len; status_line = &r->headers_out.status_line; #if (NGX_SUPPRESS_WARN) status = 0; #endif } else { status = r->headers_out.status; if (status >= NGX_HTTP_OK && status < NGX_HTTP_LAST_2XX) { /* 2XX */ if (status == NGX_HTTP_NO_CONTENT) { r->header_only = 1; ngx_str_null(&r->headers_out.content_type); r->headers_out.last_modified_time = -1; r->headers_out.last_modified = NULL; r->headers_out.content_length = NULL; r->headers_out.content_length_n = -1; } status -= NGX_HTTP_OK; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else if (status >= NGX_HTTP_MOVED_PERMANENTLY && status < NGX_HTTP_LAST_3XX) { /* 3XX */ if (status == NGX_HTTP_NOT_MODIFIED) { r->header_only = 1; } status = status - NGX_HTTP_MOVED_PERMANENTLY + NGX_HTTP_OFF_3XX; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else if (status >= NGX_HTTP_BAD_REQUEST && status < NGX_HTTP_LAST_4XX) { /* 4XX */ status = status - NGX_HTTP_BAD_REQUEST + NGX_HTTP_OFF_4XX; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else if (status >= NGX_HTTP_INTERNAL_SERVER_ERROR && status < NGX_HTTP_LAST_5XX) { /* 5XX */ status = status - NGX_HTTP_INTERNAL_SERVER_ERROR + NGX_HTTP_OFF_5XX; status_line = &ngx_http_status_lines[status]; len += ngx_http_status_lines[status].len; } else { len += NGX_INT_T_LEN + 1 /* SP */; status_line = NULL; } if (status_line && status_line->len == 0) { status = r->headers_out.status; len += NGX_INT_T_LEN + 1 /* SP */; status_line = NULL; } } clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); if (r->headers_out.server == NULL) { len += clcf->server_tokens ? sizeof(ngx_http_server_full_string) - 1: sizeof(ngx_http_server_string) - 1; } if (r->headers_out.date == NULL) { len += sizeof("Date: Mon, 28 Sep 1970 06:00:00 GMT" CRLF) - 1; } if (r->headers_out.content_type.len) { len += sizeof("Content-Type: ") - 1 + r->headers_out.content_type.len + 2; if (r->headers_out.content_type_len == r->headers_out.content_type.len && r->headers_out.charset.len) { len += sizeof("; charset=") - 1 + r->headers_out.charset.len; } } if (r->headers_out.content_length == NULL && r->headers_out.content_length_n >= 0) { len += sizeof("Content-Length: ") - 1 + NGX_OFF_T_LEN + 2; } if (r->headers_out.last_modified == NULL && r->headers_out.last_modified_time != -1) { len += sizeof("Last-Modified: Mon, 28 Sep 1970 06:00:00 GMT" CRLF) - 1; } c = r->connection; if (r->headers_out.location && r->headers_out.location->value.len && r->headers_out.location->value.data[0] == '/') { r->headers_out.location->hash = 0; if (clcf->server_name_in_redirect) { cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module); host = cscf->server_name; } else if (r->headers_in.server.len) { host = r->headers_in.server; } else { host.len = NGX_SOCKADDR_STRLEN; host.data = addr; if (ngx_connection_local_sockaddr(c, &host, 0) != NGX_OK) { return NGX_ERROR; } } switch (c->local_sockaddr->sa_family) { #if (NGX_HAVE_INET6) case AF_INET6: sin6 = (struct sockaddr_in6 *) c->local_sockaddr; port = ntohs(sin6->sin6_port); break; #endif #if (NGX_HAVE_UNIX_DOMAIN) case AF_UNIX: port = 0; break; #endif default: /* AF_INET */ sin = (struct sockaddr_in *) c->local_sockaddr; port = ntohs(sin->sin_port); break; } len += sizeof("Location: https://") - 1 + host.len + r->headers_out.location->value.len + 2; if (clcf->port_in_redirect) { #if (NGX_HTTP_SSL) if (c->ssl) port = (port == 443) ? 0 : port; else #endif port = (port == 80) ? 0 : port; } else { port = 0; } if (port) { len += sizeof(":65535") - 1; } } else { ngx_str_null(&host); port = 0; } if (r->chunked) { len += sizeof("Transfer-Encoding: chunked" CRLF) - 1; } if (r->headers_out.status == NGX_HTTP_SWITCHING_PROTOCOLS) { len += sizeof("Connection: upgrade" CRLF) - 1; } else if (r->keepalive) { len += sizeof("Connection: keep-alive" CRLF) - 1; /* * MSIE and Opera ignore the "Keep-Alive: timeout=<N>" header. * MSIE keeps the connection alive for about 60-65 seconds. * Opera keeps the connection alive very long. * Mozilla keeps the connection alive for N plus about 1-10 seconds. * Konqueror keeps the connection alive for about N seconds. */ if (clcf->keepalive_header) { len += sizeof("Keep-Alive: timeout=") - 1 + NGX_TIME_T_LEN + 2; } } else { len += sizeof("Connection: close" CRLF) - 1; } #if (NGX_HTTP_GZIP) if (r->gzip_vary) { if (clcf->gzip_vary) { len += sizeof("Vary: Accept-Encoding" CRLF) - 1; } else { r->gzip_vary = 0; } } #endif part = &r->headers_out.headers.part; header = part->elts; for (i = 0; /* void */; i++) { if (i >= part->nelts) { if (part->next == NULL) { break; } part = part->next; header = part->elts; i = 0; } if (header[i].hash == 0) { continue; } len += header[i].key.len + sizeof(": ") - 1 + header[i].value.len + sizeof(CRLF) - 1; } /* 分配用于存储响应头部字符流缓冲区 */ b = ngx_create_temp_buf(r->pool, len); if (b == NULL) { return NGX_ERROR; } /* 将响应报文的状态行、响应头部按照HTTP规范序列化地复制到刚分配的缓冲区b中 */ /* "HTTP/1.x " */ b->last = ngx_cpymem(b->last, "HTTP/1.1 ", sizeof("HTTP/1.x ") - 1); /* status line */ if (status_line) { b->last = ngx_copy(b->last, status_line->data, status_line->len); } else { b->last = ngx_sprintf(b->last, "%03ui ", status); } *b->last++ = CR; *b->last++ = LF; if (r->headers_out.server == NULL) { if (clcf->server_tokens) { p = (u_char *) ngx_http_server_full_string; len = sizeof(ngx_http_server_full_string) - 1; } else { p = (u_char *) ngx_http_server_string; len = sizeof(ngx_http_server_string) - 1; } b->last = ngx_cpymem(b->last, p, len); } if (r->headers_out.date == NULL) { b->last = ngx_cpymem(b->last, "Date: ", sizeof("Date: ") - 1); b->last = ngx_cpymem(b->last, ngx_cached_http_time.data, ngx_cached_http_time.len); *b->last++ = CR; *b->last++ = LF; } if (r->headers_out.content_type.len) { b->last = ngx_cpymem(b->last, "Content-Type: ", sizeof("Content-Type: ") - 1); p = b->last; b->last = ngx_copy(b->last, r->headers_out.content_type.data, r->headers_out.content_type.len); if (r->headers_out.content_type_len == r->headers_out.content_type.len && r->headers_out.charset.len) { b->last = ngx_cpymem(b->last, "; charset=", sizeof("; charset=") - 1); b->last = ngx_copy(b->last, r->headers_out.charset.data, r->headers_out.charset.len); /* update r->headers_out.content_type for possible logging */ r->headers_out.content_type.len = b->last - p; r->headers_out.content_type.data = p; } *b->last++ = CR; *b->last++ = LF; } if (r->headers_out.content_length == NULL && r->headers_out.content_length_n >= 0) { b->last = ngx_sprintf(b->last, "Content-Length: %O" CRLF, r->headers_out.content_length_n); } if (r->headers_out.last_modified == NULL && r->headers_out.last_modified_time != -1) { b->last = ngx_cpymem(b->last, "Last-Modified: ", sizeof("Last-Modified: ") - 1); b->last = ngx_http_time(b->last, r->headers_out.last_modified_time); *b->last++ = CR; *b->last++ = LF; } if (host.data) { p = b->last + sizeof("Location: ") - 1; b->last = ngx_cpymem(b->last, "Location: http", sizeof("Location: http") - 1); #if (NGX_HTTP_SSL) if (c->ssl) { *b->last++ ='s'; } #endif *b->last++ = ':'; *b->last++ = '/'; *b->last++ = '/'; b->last = ngx_copy(b->last, host.data, host.len); if (port) { b->last = ngx_sprintf(b->last, ":%ui", port); } b->last = ngx_copy(b->last, r->headers_out.location->value.data, r->headers_out.location->value.len); /* update r->headers_out.location->value for possible logging */ r->headers_out.location->value.len = b->last - p; r->headers_out.location->value.data = p; ngx_str_set(&r->headers_out.location->key, "Location"); *b->last++ = CR; *b->last++ = LF; } if (r->chunked) { b->last = ngx_cpymem(b->last, "Transfer-Encoding: chunked" CRLF, sizeof("Transfer-Encoding: chunked" CRLF) - 1); } if (r->headers_out.status == NGX_HTTP_SWITCHING_PROTOCOLS) { b->last = ngx_cpymem(b->last, "Connection: upgrade" CRLF, sizeof("Connection: upgrade" CRLF) - 1); } else if (r->keepalive) { b->last = ngx_cpymem(b->last, "Connection: keep-alive" CRLF, sizeof("Connection: keep-alive" CRLF) - 1); if (clcf->keepalive_header) { b->last = ngx_sprintf(b->last, "Keep-Alive: timeout=%T" CRLF, clcf->keepalive_header); } } else { b->last = ngx_cpymem(b->last, "Connection: close" CRLF, sizeof("Connection: close" CRLF) - 1); } #if (NGX_HTTP_GZIP) if (r->gzip_vary) { b->last = ngx_cpymem(b->last, "Vary: Accept-Encoding" CRLF, sizeof("Vary: Accept-Encoding" CRLF) - 1); } #endif part = &r->headers_out.headers.part; header = part->elts; for (i = 0; /* void */; i++) { if (i >= part->nelts) { if (part->next == NULL) { break; } part = part->next; header = part->elts; i = 0; } if (header[i].hash == 0) { continue; } b->last = ngx_copy(b->last, header[i].key.data, header[i].key.len); *b->last++ = ':'; *b->last++ = ' '; b->last = ngx_copy(b->last, header[i].value.data, header[i].value.len); *b->last++ = CR; *b->last++ = LF; } ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "%*s", (size_t) (b->last - b->pos), b->pos); /* the end of HTTP header */ *b->last++ = CR; *b->last++ = LF; r->header_size = b->last - b->pos; if (r->header_only) { b->last_buf = 1; } /* * 将待发送的响应头部挂载到out链表缓冲区中, * 挂载的目的是:当响应头部不能一次性发送完成时, * ngx_http_header_filter方法返回NGX_AGAIN,表示发送的响应头部不完整, * 则把剩余的响应头部保存在out链表中,以便调用ngx_http_finalize_request时, * 再次调用HTTP框架将out链表中剩余的响应头部字符流继续发送; */ out.buf = b; out.next = NULL; /* * 调用方法ngx_http_write_filter将响应头部字符流发送出去; * 所有实际发送响应头部数据的由ngx_http_write_filter方法实现; */ return ngx_http_write_filter(r, &out); }
static ngx_int_t ngx_http_limit_req_handler(ngx_http_request_t *r) { size_t n, total_len; uint32_t hash; ngx_int_t rc; ngx_msec_t delay_time; ngx_uint_t excess, delay_excess, delay_postion, nodelay, i; ngx_time_t *tp; ngx_rbtree_node_t *node; ngx_http_limit_req_t *limit_req; ngx_http_limit_req_ctx_t *ctx; ngx_http_limit_req_node_t *lr; ngx_http_limit_req_conf_t *lrcf; delay_excess = 0; excess = 0; delay_postion = 0; nodelay = 0; ctx = NULL; rc = NGX_DECLINED; if (r->main->limit_req_set) { return NGX_DECLINED; } lrcf = ngx_http_get_module_loc_conf(r, ngx_http_limit_req_module); if (lrcf->rules == NULL) { return NGX_DECLINED; } if (!lrcf->enable) { return NGX_DECLINED; } /* filter whitelist */ if (ngx_http_limit_req_ip_filter(r, lrcf) == NGX_OK) { return NGX_DECLINED; } /* to match limit_req rule*/ limit_req = lrcf->rules->elts; for (i = 0; i < lrcf->rules->nelts; i++) { ctx = limit_req[i].shm_zone->data; ngx_crc32_init(hash); total_len = ngx_http_limit_req_copy_variables(r, &hash, ctx, NULL); if (total_len == 0) { continue; } ngx_crc32_final(hash); ngx_shmtx_lock(&ctx->shpool->mutex); ngx_http_limit_req_expire(r, ctx, 1); rc = ngx_http_limit_req_lookup(r, &limit_req[i], hash, &excess); ngx_log_debug5(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "limit_req module: %i %ui.%03ui " "hash is %D total_len is %uz", rc, excess / 1000, excess % 1000, hash, total_len); /* first limit_req */ if (rc == NGX_DECLINED) { n = offsetof(ngx_rbtree_node_t, color) + offsetof(ngx_http_limit_req_node_t, data) + total_len; node = ngx_slab_alloc_locked(ctx->shpool, n); if (node == NULL) { ngx_http_limit_req_expire(r, ctx, 0); node = ngx_slab_alloc_locked(ctx->shpool, n); if (node == NULL) { ngx_shmtx_unlock(&ctx->shpool->mutex); return lrcf->status_code; } } lr = (ngx_http_limit_req_node_t *) &node->color; node->key = hash; lr->len = (u_char) total_len; tp = ngx_timeofday(); lr->last = (ngx_msec_t) (tp->sec * 1000 + tp->msec); lr->excess = 0; ngx_http_limit_req_copy_variables(r, &hash, ctx, lr); ngx_queue_insert_head(&ctx->sh->queue, &lr->queue); ngx_rbtree_insert(&ctx->sh->rbtree, node); ngx_shmtx_unlock(&ctx->shpool->mutex); continue; } ngx_shmtx_unlock(&ctx->shpool->mutex); if (rc == NGX_BUSY || rc == NGX_ERROR) { break; } /* NGX_AGAIN or NGX_OK */ if (delay_excess < excess) { delay_excess = excess; nodelay = limit_req[i].nodelay; delay_postion = i; } } r->main->limit_req_set = 1; if (rc == NGX_BUSY || rc == NGX_ERROR) { if (rc == NGX_BUSY) { ngx_log_error(lrcf->limit_log_level, r->connection->log, 0, "limiting requests, excess: %ui.%03ui by zone \"%V\"", excess / 1000, excess % 1000, &limit_req[i].shm_zone->shm.name); } if (rc == NGX_ERROR || limit_req[i].forbid_action.len == 0) { return lrcf->status_code; } else if (limit_req[i].forbid_action.data[0] == '@') { ngx_log_error(lrcf->limit_log_level, r->connection->log, 0, "limiting requests, forbid_action is %V", &limit_req[i].forbid_action); (void) ngx_http_named_location(r, &limit_req[i].forbid_action); } else { ngx_log_error(lrcf->limit_log_level, r->connection->log, 0, "limiting requests, forbid_action is %V", &limit_req[i].forbid_action); (void) ngx_http_internal_redirect(r, &limit_req[i].forbid_action, &r->args); } ngx_http_finalize_request(r, NGX_DONE); return NGX_DONE; } /* rc = NGX_AGAIN */ if (delay_excess != 0) { if (nodelay) { return NGX_DECLINED; } delay_time = (ngx_msec_t) delay_excess * 1000 / ctx->rate; ngx_log_error(lrcf->delay_log_level, r->connection->log, 0, "delaying request," "excess: %ui.%03ui, by zone \"%V\", delay \"%M\" s", delay_excess / 1000, delay_excess % 1000, &limit_req[delay_postion].shm_zone->shm.name, delay_time); if (ngx_handle_read_event(r->connection->read, 0) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } r->read_event_handler = ngx_http_test_reading; r->write_event_handler = ngx_http_limit_req_delay; ngx_add_timer(r->connection->write, delay_time); return NGX_AGAIN; } /* rc == NGX_OK or rc == NGX_DECLINED */ return NGX_DECLINED; }
static ngx_int_t ngx_http_brotli_header_filter(ngx_http_request_t *r) { ngx_table_elt_t *h, *ae; ngx_http_brotli_ctx_t *ctx; ngx_http_brotli_conf_t *conf; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http brotli header filter"); conf = ngx_http_get_module_loc_conf(r, ngx_http_brotli_filter_module); if (!conf->enable || (r->headers_out.status != NGX_HTTP_OK && r->headers_out.status != NGX_HTTP_FORBIDDEN && r->headers_out.status != NGX_HTTP_NOT_FOUND) || (r->headers_out.content_encoding && r->headers_out.content_encoding->value.len) || (r->headers_out.content_length_n != -1 && r->headers_out.content_length_n < conf->min_length) || ngx_http_test_content_type(r, &conf->types) == NULL || r->header_only) { return ngx_http_next_header_filter(r); } /* Check that brotli is supported. We do not check possible q value * if brotli is supported it takes precendence over gzip if size > * brotli_min_length */ ae = r->headers_in.accept_encoding; if(!ae) { return ngx_http_next_header_filter(r); } /* Since there is no reason for the br string to be present * unless brotli is accepted either as "br" or "brotli" we * just check for "br" */ if (!ngx_strstrn(ae->value.data, "br", 1)) { return ngx_http_next_header_filter(r); } ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_brotli_ctx_t)); if (ctx == NULL) { return NGX_ERROR; } if (ngx_strstrn(ae->value.data, "brotli", 5)) { ctx->br = 0; } else { ctx->br = 1; } #if (NGX_HTTP_GZIP) r->gzip_vary = 1; /* Make sure gzip does not execute */ r->gzip_tested = 1; r->gzip_ok = 0; #endif ngx_http_set_ctx(r, ctx, ngx_http_brotli_filter_module); ctx->request = r; h = ngx_list_push(&r->headers_out.headers); if (h == NULL) { return NGX_ERROR; } h->hash = 1; ngx_str_set(&h->key, "Content-Encoding"); if (ctx->br) { ngx_str_set(&h->value, "br"); } else { ngx_str_set(&h->value, "brotli"); } r->headers_out.content_encoding = h; r->main_filter_need_in_memory = 1; ngx_http_clear_content_length(r); ngx_http_clear_accept_ranges(r); ngx_http_weak_etag(r); return ngx_http_next_header_filter(r); }
static ngx_int_t ngx_http_secure_cookie_variable(ngx_http_request_t *r, ngx_http_variable_value_t *v, uintptr_t data) { u_char hash_buf[16], md5_buf[16], time_buf[64]; u_char *p, *last; ngx_str_t val, hash, time_dst, time_src; time_t expires; ngx_md5_t md5; ngx_http_secure_cookie_conf_t *conf; conf = ngx_http_get_module_loc_conf(r, ngx_http_secure_cookie_module); if (conf->variable == NULL || conf->md5 == NULL) { goto not_found; } if (ngx_http_complex_value(r, conf->variable, &val) != NGX_OK) { return NGX_ERROR; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "secure cookie: \"%V\"", &val); last = val.data + val.len; p = ngx_strlchr(val.data, last, ','); expires = 0; if (p) { val.len = p++ - val.data; time_src.data = p; time_src.len = last - p; if (time_src.len > 64) { goto not_found; } time_dst.data = time_buf; time_dst.len = 64; if (ngx_decode_base64(&time_dst, &time_src) != NGX_OK) { goto not_found; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "secure time: \"%V\"", &time_dst); expires = ngx_http_parse_time(time_dst.data, time_dst.len); if (expires <= 0) { goto not_found; } } if (val.len > 24) { goto not_found; } hash.len = 16; hash.data = hash_buf; if (ngx_decode_base64(&hash, &val) != NGX_OK) { goto not_found; } if (hash.len != 16) { goto not_found; } if (ngx_http_complex_value(r, conf->md5, &val) != NGX_OK) { return NGX_ERROR; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "secure cookie md5: \"%V\"", &val); ngx_md5_init(&md5); ngx_md5_update(&md5, val.data, val.len); ngx_md5_final(md5_buf, &md5); if (ngx_memcmp(hash_buf, md5_buf, 16) != 0) { goto not_found; } v->data = (u_char *) ((expires && expires < ngx_time()) ? "0" : "1"); v->len = 1; v->valid = 1; v->no_cacheable = 0; v->not_found = 0; return NGX_OK; not_found: v->not_found = 1; return NGX_OK; }
/* ** [ENTRY POINT] does : this is the function called by nginx : ** - Set up the context for the request ** - Check if the job is done and we're called again ** - if it's a PATCH/POST/PUT request, setup hook for body dataz ** - call dummy_data_parse ** - check our context struct (with scores & stuff) against custom check rules ** - check if the request should be denied */ static ngx_int_t ngx_http_dummy_access_handler(ngx_http_request_t *r) { ngx_http_request_ctx_t *ctx; ngx_int_t rc; ngx_http_dummy_loc_conf_t *cf; struct tms tmsstart, tmsend; clock_t start, end; ngx_http_variable_value_t *lookup; static ngx_str_t learning_flag = ngx_string(RT_LEARNING); static ngx_str_t enable_flag = ngx_string(RT_ENABLE); static ngx_str_t post_action_flag = ngx_string(RT_POST_ACTION); static ngx_str_t extensive_log_flag = ngx_string(RT_EXTENSIVE_LOG); static ngx_str_t libinjection_sql_flag = ngx_string(RT_LIBINJECTION_SQL); static ngx_str_t libinjection_xss_flag = ngx_string(RT_LIBINJECTION_XSS); ctx = ngx_http_get_module_ctx(r, ngx_http_naxsi_module); cf = ngx_http_get_module_loc_conf(r, ngx_http_naxsi_module); if (ctx && ctx->over) return (NGX_DECLINED); if (ctx && ctx->wait_for_body) { NX_DEBUG(_debug_mechanics, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "naxsi:NGX_AGAIN"); return (NGX_DONE); } if (!cf) return (NGX_ERROR); /* the module is not enabled here */ /* if enable directive is not present at all in the location, don't try to do dynamic lookup for "live" enabled naxsi, this would be very rude. */ if (!cf->enabled) return (NGX_DECLINED); /* On the other hand, if naxsi has been explicitly disabled in this location (using naxsi directive), user is probably trying to do something. */ if (cf->force_disabled) { /* Look if the user did not try to enable naxsi dynamically */ lookup = ngx_http_get_variable(r, &enable_flag, cf->flag_enable_h); if (lookup && !lookup->not_found && lookup->len > 0) { ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "live enable is present %d", lookup->data[0] - '0'); if (lookup->data[0] - '0' != 1) { return (NGX_DECLINED);} } else return (NGX_DECLINED); } /* don't process internal requests. */ if (r->internal) { NX_DEBUG(_debug_mechanics, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-DON'T PROCESS (%V)|CTX:%p|ARGS:%V|METHOD=%s|INTERNAL:%d", &(r->uri), ctx, &(r->args), r->method == NGX_HTTP_PATCH ? "PATCH" : r->method == NGX_HTTP_POST ? "POST" : r->method == NGX_HTTP_PUT ? "PUT" : r->method == NGX_HTTP_GET ? "GET" : "UNKNOWN!!", r->internal); return (NGX_DECLINED); } NX_DEBUG(_debug_mechanics, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-processing (%V)|CTX:%p|ARGS:%V|METHOD=%s|INTERNAL:%d", &(r->uri), ctx, &(r->args), r->method == NGX_HTTP_PATCH ? "PATCH" : r->method == NGX_HTTP_POST ? "POST" : r->method == NGX_HTTP_PUT ? "PUT" : r->method == NGX_HTTP_GET ? "GET" : "UNKNOWN!!", r->internal); if (!ctx) { ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_request_ctx_t)); if (ctx == NULL) return NGX_ERROR; ngx_http_set_ctx(r, ctx, ngx_http_naxsi_module); NX_DEBUG(_debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : orig learning : %d", cf->learning ? 1 : 0); /* it seems that nginx will - in some cases - have a variable with empty content but with lookup->not_found set to 0, so check len as well */ ctx->learning = cf->learning; lookup = ngx_http_get_variable(r, &learning_flag, cf->flag_learning_h); if (lookup && !lookup->not_found && lookup->len > 0) { ctx->learning = lookup->data[0] - '0'; NX_DEBUG(_debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : override learning : %d (raw=%d)", ctx->learning ? 1 : 0, lookup->len); } NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : [final] learning : %d", ctx->learning ? 1 : 0); ctx->enabled = cf->enabled; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : orig enabled : %d", ctx->enabled ? 1 : 0); lookup = ngx_http_get_variable(r, &enable_flag, cf->flag_enable_h); if (lookup && !lookup->not_found && lookup->len > 0) { ctx->enabled = lookup->data[0] - '0'; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : override enable : %d", ctx->enabled ? 1 : 0); } NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : [final] enabled : %d", ctx->enabled ? 1 : 0); /* ** LIBINJECTION_SQL */ ctx->libinjection_sql = cf->libinjection_sql_enabled; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : orig libinjection_sql : %d", ctx->libinjection_sql ? 1 : 0); lookup = ngx_http_get_variable(r, &libinjection_sql_flag, cf->flag_libinjection_sql_h); if (lookup && !lookup->not_found && lookup->len > 0) { ctx->libinjection_sql = lookup->data[0] - '0'; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : override libinjection_sql : %d", ctx->libinjection_sql ? 1 : 0); } NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : [final] libinjection_sql : %d", ctx->libinjection_sql ? 1 : 0); /* ** LIBINJECTION_XSS */ ctx->libinjection_xss = cf->libinjection_xss_enabled; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : orig libinjection_xss : %d", ctx->libinjection_xss ? 1 : 0); lookup = ngx_http_get_variable(r, &libinjection_xss_flag, cf->flag_libinjection_xss_h); if (lookup && !lookup->not_found && lookup->len > 0) { ctx->libinjection_xss = lookup->data[0] - '0'; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : override libinjection_xss : %d", ctx->libinjection_xss ? 1 : 0); } NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : [final] libinjection_xss : %d", ctx->libinjection_xss ? 1 : 0); /* post_action is off by default. */ ctx->post_action = 0; NX_DEBUG( _debug_modifier , NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : orig post_action : %d", ctx->post_action ? 1 : 0); lookup = ngx_http_get_variable(r, &post_action_flag, cf->flag_post_action_h); if (lookup && !lookup->not_found && lookup->len > 0) { ctx->post_action = lookup->data[0] - '0'; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : override post_action : %d", ctx->post_action ? 1 : 0); } NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : [final] post_action : %d", ctx->post_action ? 1 : 0); NX_DEBUG( _debug_modifier , NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : orig extensive_log : %d", ctx->extensive_log ? 1 : 0); lookup = ngx_http_get_variable(r, &extensive_log_flag, cf->flag_extensive_log_h); if (lookup && !lookup->not_found && lookup->len > 0) { ctx->extensive_log = lookup->data[0] - '0'; NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : override extensive_log : %d", ctx->extensive_log ? 1 : 0); } NX_DEBUG( _debug_modifier, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : [final] extensive_log : %d", ctx->extensive_log ? 1 : 0); /* the module is not enabled here */ if (!ctx->enabled) return (NGX_DECLINED); if ((r->method == NGX_HTTP_PATCH || r->method == NGX_HTTP_POST || r->method == NGX_HTTP_PUT) && !ctx->ready) { NX_DEBUG( _debug_mechanics, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : body_request : before !"); rc = ngx_http_read_client_request_body(r, ngx_http_dummy_payload_handler); /* this might happen quite often, especially with big files / ** low network speed. our handler is called when headers are read, ** but, often, the full body request hasn't yet, so ** read client request body will return ngx_again. Then we need ** to return ngx_done, wait for our handler to be called once ** body request arrived, and let him call core_run_phases ** to be able to process the request. */ if (rc == NGX_AGAIN) { ctx->wait_for_body = 1; NX_DEBUG( _debug_mechanics, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : body_request : NGX_AGAIN !"); return (NGX_DONE); } else if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { /* ** might happen but never saw it, let the debug print. */ ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : SPECIAL RESPONSE !!!!"); return rc; } } else ctx->ready = 1; } if (ctx && ctx->ready && !ctx->over) { if ((start = times(&tmsstart)) == (clock_t)-1) ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : Failed to get time"); ngx_http_dummy_data_parse(ctx, r); cf->request_processed++; if ((end = times(&tmsend)) == (clock_t)-1) ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "XX-dummy : Failed to get time"); if (end - start > 10) ngx_log_debug(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "[MORE THAN 10MS] times : start:%l end:%l diff:%l", start, end, (end-start)); ctx->over = 1; if (ctx->block || ctx->drop) { cf->request_blocked++; rc = ngx_http_output_forbidden_page(ctx, r); //nothing: return (NGX_OK); //redirect : return (NGX_HTTP_OK); return rc; } else if (ctx->log) rc = ngx_http_output_forbidden_page(ctx, r); } NX_DEBUG(_debug_mechanics, NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "NGX_FINISHED !"); return NGX_DECLINED; }
static ngx_int_t ngx_http_push_response_channel_info(ngx_str_t *channel_id, ngx_http_request_t *r, ngx_int_t status_code) { ngx_http_push_channel_t *channel; ngx_http_push_loc_conf_t *cf = ngx_http_get_module_loc_conf(r, ngx_http_push_module); channel = channel_id == NULL ? NULL : ngx_http_push_store->find_channel(channel_id, cf->channel_timeout, NULL); return ngx_http_push_response_channel_ptr_info(channel, r, status_code); }
static ngx_int_t ngx_http_parallel_handler(ngx_http_request_t *r) { ngx_http_parallel_loc_conf_t *conf; ngx_http_parallel_ctx_t* ctx; ngx_http_range_t range = { 0, 0, 0 }; ngx_uint_t header_in_count; ngx_uint_t fiber_count; ngx_uint_t i; ngx_flag_t key_inited = 0; ngx_int_t rc; u_char key[NGX_FIXED_BUFFER_CACHE_KEY_SIZE]; size_t initial_chunk_size; u_char* p; off_t cached_response_length = -1; off_t expected_response_length = -1; // validate method if (!(r->method & (NGX_HTTP_GET | NGX_HTTP_HEAD))) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_handler: " "unsupported method %ui", r->method); return NGX_HTTP_NOT_ALLOWED; } // discard request body, since we don't need it here rc = ngx_http_discard_request_body(r); if (rc != NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: " "ngx_http_discard_request_body failed %i", rc); return rc; } // get fiber count and initial chunk size conf = ngx_http_get_module_loc_conf(r, ngx_http_parallel_module); fiber_count = conf->fiber_count; initial_chunk_size = conf->min_chunk_size; if (r->method == NGX_HTTP_HEAD) { fiber_count = 1; } else if (r->headers_in.range != NULL) { rc = ngx_http_parallel_range_parse(r, &range); if (rc != NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: " "ngx_http_parallel_range_parse failed \"%V\", " "proxying the request as is", &r->headers_in.range->value); fiber_count = 1; } else { range.is_range_request = 1; if (range.end != 0) { expected_response_length = range.end - range.start; } } } if (expected_response_length < 0 && fiber_count != 1 && conf->content_length_cache_zone != NULL) { key_inited = 1; ngx_http_parallel_calculate_key(key, r); ngx_fixed_buffer_cache_fetch( conf->content_length_cache_zone, key, (u_char*)&cached_response_length); expected_response_length = cached_response_length; } if (expected_response_length >= 0) { // optimize the initial chunk size according to the response length if (expected_response_length <= (off_t)(fiber_count * conf->min_chunk_size)) { initial_chunk_size = conf->min_chunk_size; } else if (expected_response_length >= (off_t)(fiber_count * conf->max_chunk_size)) { initial_chunk_size = conf->max_chunk_size; } else { initial_chunk_size = DIV_CEIL(expected_response_length, fiber_count); } // optimize the fiber count according to the response length if (expected_response_length == 0) { fiber_count = 1; } else if (expected_response_length < (off_t)(fiber_count * initial_chunk_size)) { fiber_count = DIV_CEIL(expected_response_length, initial_chunk_size); } } // allocate context ctx = ngx_pcalloc(r->pool, sizeof(*ctx) + sizeof(ngx_http_parallel_fiber_ctx_t) * (fiber_count - 1)); if (ctx == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: ngx_pcalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->error_code = NGX_AGAIN; ctx->initial_chunk_size = initial_chunk_size; ctx->chunk_size = initial_chunk_size; ctx->range = range; ctx->fiber_count = fiber_count; ctx->initial_requested_size = fiber_count * initial_chunk_size; ctx->cached_response_length = cached_response_length; if (key_inited) { ngx_memcpy(ctx->key, key, sizeof(ctx->key)); ctx->key_inited = 1; } ngx_http_set_ctx(r, ctx, ngx_http_parallel_module); // count the number of input headers header_in_count = ngx_http_parallel_list_get_count(&r->headers_in.headers); ctx->original_headers_in = r->headers_in; // build the subrequest uri ctx->sr_uri.data = ngx_pnalloc(r->pool, conf->uri_prefix.len + r->uri.len + 1); if (ctx->sr_uri.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_handler: ngx_pnalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } p = ngx_copy(ctx->sr_uri.data, conf->uri_prefix.data, conf->uri_prefix.len); p = ngx_copy(p, r->uri.data, r->uri.len); *p = '\0'; ctx->sr_uri.len = p - ctx->sr_uri.data; // init and start the fibers for (i = 0; i < fiber_count; i++) { rc = ngx_http_parallel_init_fiber( r, header_in_count, fiber_count == 1, &ctx->fibers[i]); if (rc != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } rc = ngx_http_parallel_start_fiber(r, &ctx->fibers[i], i); if (rc != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } } ctx->next_request_chunk = fiber_count; return NGX_AGAIN; }
ngx_int_t ngx_http_write_filter(ngx_http_request_t *r, ngx_chain_t *in) { off_t size, sent, nsent, limit; ngx_uint_t last, flush; ngx_msec_t delay; ngx_chain_t *cl, *ln, **ll, *chain; ngx_connection_t *c; ngx_http_core_loc_conf_t *clcf; c = r->connection; if (c->error) { return NGX_ERROR; } size = 0; flush = 0; last = 0; ll = &r->out; /* find the size, the flush point and the last link of the saved chain */ for (cl = r->out; cl; cl = cl->next) { ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write old buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->last_buf) { last = 1; } } /* add the new chain to the existent one */ for (ln = in; ln; ln = ln->next) { cl = ngx_alloc_chain_link(r->pool); if (cl == NULL) { return NGX_ERROR; } cl->buf = ln->buf; *ll = cl; ll = &cl->next; ngx_log_debug7(NGX_LOG_DEBUG_EVENT, c->log, 0, "write new buf t:%d f:%d %p, pos %p, size: %z " "file: %O, size: %z", cl->buf->temporary, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last - cl->buf->pos, cl->buf->file_pos, cl->buf->file_last - cl->buf->file_pos); #if 1 if (ngx_buf_size(cl->buf) == 0 && !ngx_buf_special(cl->buf)) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "zero size buf in writer " "t:%d r:%d f:%d %p %p-%p %p %O-%O", cl->buf->temporary, cl->buf->recycled, cl->buf->in_file, cl->buf->start, cl->buf->pos, cl->buf->last, cl->buf->file, cl->buf->file_pos, cl->buf->file_last); ngx_debug_point(); return NGX_ERROR; } #endif size += ngx_buf_size(cl->buf); if (cl->buf->flush || cl->buf->recycled) { flush = 1; } if (cl->buf->last_buf) { last = 1; } } *ll = NULL; ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter: l:%d f:%d s:%O", last, flush, size); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); /* * avoid the output if there are no last buf, no flush point, * there are the incoming bufs and the size of all bufs * is smaller than "postpone_output" directive */ if (!last && !flush && in && size < (off_t) clcf->postpone_output) { return NGX_OK; } if (c->write->delayed) { c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (size == 0 && !(c->buffered & NGX_LOWLEVEL_BUFFERED)) { if (last || flush) { for (cl = r->out; cl; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } r->out = NULL; c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, c->log, 0, "the http output chain is empty"); ngx_debug_point(); return NGX_ERROR; } if (r->limit_rate) { if (r->limit_rate_after == 0) { r->limit_rate_after = clcf->limit_rate_after; } limit = (off_t) r->limit_rate * (ngx_time() - r->start_sec + 1) - (c->sent - r->limit_rate_after); if (limit <= 0) { c->write->delayed = 1; ngx_add_timer(c->write, (ngx_msec_t) (- limit * 1000 / r->limit_rate + 1)); c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } if (clcf->sendfile_max_chunk && (off_t) clcf->sendfile_max_chunk < limit) { limit = clcf->sendfile_max_chunk; } } else { limit = clcf->sendfile_max_chunk; } sent = c->sent; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter limit %O", limit); chain = c->send_chain(c, r->out, limit); ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http write filter %p", chain); if (chain == NGX_CHAIN_ERROR) { c->error = 1; return NGX_ERROR; } if (r->limit_rate) { nsent = c->sent; if (r->limit_rate_after) { sent -= r->limit_rate_after; if (sent < 0) { sent = 0; } nsent -= r->limit_rate_after; if (nsent < 0) { nsent = 0; } } delay = (ngx_msec_t) ((nsent - sent) * 1000 / r->limit_rate); if (delay > 0) { limit = 0; c->write->delayed = 1; ngx_add_timer(c->write, delay); } } if (limit && c->write->ready && c->sent - sent >= limit - (off_t) (2 * ngx_pagesize)) { c->write->delayed = 1; ngx_add_timer(c->write, 1); } for (cl = r->out; cl && cl != chain; /* void */) { ln = cl; cl = cl->next; ngx_free_chain(r->pool, ln); } r->out = chain; if (chain) { c->buffered |= NGX_HTTP_WRITE_BUFFERED; return NGX_AGAIN; } c->buffered &= ~NGX_HTTP_WRITE_BUFFERED; if ((c->buffered & NGX_LOWLEVEL_BUFFERED) && r->postponed == NULL) { return NGX_AGAIN; } return NGX_OK; }
static ngx_int_t ngx_http_parallel_start_fiber( ngx_http_request_t *r, ngx_http_parallel_fiber_ctx_t* fiber, uint64_t chunk_index) { ngx_http_parallel_loc_conf_t *conf; ngx_http_parallel_ctx_t *ctx; ngx_http_request_t* sr; ngx_str_t args = ngx_null_string; ngx_table_elt_t *h; ngx_int_t rc; size_t alloc_size; off_t start_offset; off_t end_offset; ngx_chain_t* cl; ngx_buf_t* b; ctx = ngx_http_get_module_ctx(r, ngx_http_parallel_module); conf = ngx_http_get_module_loc_conf(r, ngx_http_parallel_module); // get a buffer for the response cl = ngx_chain_get_free_buf(r->pool, &ctx->free); if (cl == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_start_fiber: ngx_chain_get_free_buf failed"); return NGX_ERROR; } b = cl->buf; if (cl->buf->start == NULL) { alloc_size = conf->max_headers_size + conf->max_chunk_size; b->start = ngx_palloc(r->pool, alloc_size); if (b->start == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_start_fiber: ngx_palloc failed"); return NGX_ERROR; } b->pos = b->start; b->last = b->start; b->end = b->last + alloc_size; b->temporary = 1; b->tag = (ngx_buf_tag_t)&ngx_http_parallel_module; ctx->allocated_count++; } // allocate a list for the input headers if (ngx_list_init(&fiber->upstream_headers, r->pool, 8, sizeof(ngx_table_elt_t)) != NGX_OK) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_start_fiber: ngx_list_init failed"); return NGX_ERROR; } // set the range if (ctx->fiber_count != 1) { start_offset = ctx->range.start; if (chunk_index < ctx->fiber_count) { start_offset += chunk_index * ctx->chunk_size; } else { start_offset += ctx->initial_requested_size + (chunk_index - ctx->fiber_count) * ctx->chunk_size; } end_offset = start_offset + ctx->chunk_size; if (ctx->range.end != 0 && ctx->range.end < end_offset) { end_offset = ctx->range.end; } h = fiber->headers_in.range; h->value.len = ngx_sprintf( h->value.data, RANGE_FORMAT, start_offset, end_offset - 1) - h->value.data; h->value.data[h->value.len] = '\0'; } // start the request r->headers_in = fiber->headers_in; rc = ngx_http_subrequest(r, &ctx->sr_uri, &args, &sr, fiber->psr, NGX_HTTP_SUBREQUEST_WAITED | NGX_HTTP_SUBREQUEST_IN_MEMORY); r->headers_in = ctx->original_headers_in; if (rc != NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_start_fiber: ngx_http_subrequest failed %i", rc); return rc; } ngx_http_set_ctx(sr, fiber, ngx_http_parallel_module); ctx->active_fibers++; sr->write_event_handler = ngx_http_parallel_fiber_initial_wev_handler; sr->method = r->method; // copy the method to the subrequest sr->method_name = r->method_name; // (ngx_http_subrequest always uses GET) sr->header_in = r->header_in; // fix the last pointer in headers_in (from echo-nginx-module) if (fiber->headers_in.headers.last == &fiber->headers_in.headers.part) { sr->headers_in.headers.last = &sr->headers_in.headers.part; } fiber->chunk_index = chunk_index; fiber->cl = cl; fiber->sr = sr; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_start_fiber: started fiber %uL", chunk_index); return NGX_OK; }
ngx_int_t nchan_maybe_send_channel_event_message(ngx_http_request_t *r, channel_event_type_t event_type) { static nchan_loc_conf_t evcf_data; static nchan_loc_conf_t *evcf = NULL; static ngx_str_t group = ngx_string("meta"); static ngx_str_t evt_sub_enqueue = ngx_string("subscriber_enqueue"); static ngx_str_t evt_sub_dequeue = ngx_string("subscriber_dequeue"); static ngx_str_t evt_sub_recvmsg = ngx_string("subscriber_receive_message"); static ngx_str_t evt_sub_recvsts = ngx_string("subscriber_receive_status"); static ngx_str_t evt_chan_publish= ngx_string("channel_publish"); static ngx_str_t evt_chan_delete = ngx_string("channel_delete"); struct timeval tv; nchan_loc_conf_t *cf = ngx_http_get_module_loc_conf(r, ngx_nchan_module); ngx_http_complex_value_t *cv = cf->channel_events_channel_id; if(cv==NULL) { //nothing to send return NGX_OK; } nchan_request_ctx_t *ctx = ngx_http_get_module_ctx(r, ngx_nchan_module); ngx_str_t tmpid; size_t sz; ngx_str_t *id; u_char *cur; ngx_str_t evstr; ngx_buf_t buf; nchan_msg_t msg; switch(event_type) { case SUB_ENQUEUE: ctx->channel_event_name = &evt_sub_enqueue; break; case SUB_DEQUEUE: ctx->channel_event_name = &evt_sub_dequeue; break; case SUB_RECEIVE_MESSAGE: ctx->channel_event_name = &evt_sub_recvmsg; break; case SUB_RECEIVE_STATUS: ctx->channel_event_name = &evt_sub_recvsts; break; case CHAN_PUBLISH: ctx->channel_event_name = &evt_chan_publish; break; case CHAN_DELETE: ctx->channel_event_name = &evt_chan_delete; break; } //the id ngx_http_complex_value(r, cv, &tmpid); sz = group.len + 1 + tmpid.len; if((id = ngx_palloc(r->pool, sizeof(*id) + sz)) == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "nchan: can't allocate space for legacy channel id"); return NGX_ERROR; } id->len = sz; id->data = (u_char *)&id[1]; cur = id->data; ngx_memcpy(cur, group.data, group.len); cur += group.len; cur[0]='/'; cur++; ngx_memcpy(cur, tmpid.data, tmpid.len); //the event message ngx_http_complex_value(r, cf->channel_event_string, &evstr); ngx_memzero(&buf, sizeof(buf)); //do we really need this?... buf.temporary = 1; buf.memory = 1; buf.last_buf = 1; buf.pos = evstr.data; buf.last = evstr.data + evstr.len; buf.start = buf.pos; buf.end = buf.last; ngx_memzero(&msg, sizeof(msg)); ngx_gettimeofday(&tv); msg.id.time = tv.tv_sec; msg.id.tagcount = 1; msg.buf = &buf; if(evcf == NULL) { evcf = &evcf_data; ngx_memzero(evcf, sizeof(*evcf)); evcf->buffer_timeout = 10; evcf->max_messages = NGX_MAX_INT_T_VALUE; evcf->subscriber_first_message = 0; evcf->channel_timeout = 30; } evcf->storage_engine = cf->storage_engine; evcf->redis = cf->redis; evcf->storage_engine->publish(id, &msg, evcf, NULL, NULL); return NGX_OK; }
static ngx_int_t ngx_http_parallel_init_chunks( ngx_http_parallel_ctx_t *ctx, ngx_http_request_t *r, ngx_http_headers_out_t* headers_out) { ngx_http_parallel_loc_conf_t *conf; ngx_table_elt_t *h; ngx_str_t* content_range; uint64_t missing_chunks_mask; off_t instance_length; off_t remaining_length; off_t content_length; ngx_int_t rc; // find the instance length content_range = ngx_http_parallel_header_get_value( &headers_out->headers, &content_range_name, content_range_hash); if (content_range == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_init_chunks: failed to get content-range header"); return NGX_HTTP_BAD_GATEWAY; } instance_length = ngx_http_parallel_get_instance_length( content_range, r->connection->log); if (instance_length < 0) { return NGX_HTTP_BAD_GATEWAY; } conf = ngx_http_get_module_loc_conf(r, ngx_http_parallel_module); // update cache if (conf->content_length_cache_zone != NULL && instance_length != ctx->cached_response_length) { if (!ctx->key_inited) { ngx_http_parallel_calculate_key(ctx->key, r); } ngx_fixed_buffer_cache_store( conf->content_length_cache_zone, ctx->key, (u_char*)&instance_length, 1); } // find the content length content_length = instance_length; if (ctx->range.end != 0 && ctx->range.end < content_length) { content_length = ctx->range.end; } if (content_length < ctx->range.start) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_init_chunks: " "unexpected, content length %O less than range start %O", content_length, ctx->range.start); return NGX_HTTP_BAD_GATEWAY; } content_length -= ctx->range.start; // find the chunk size and count if (content_length <= (off_t)ctx->initial_requested_size) { ctx->chunk_count = DIV_CEIL(content_length, ctx->chunk_size); ctx->last_chunk_size = content_length + ctx->chunk_size - ctx->chunk_count * ctx->chunk_size; } else { remaining_length = content_length - ctx->initial_requested_size; if (remaining_length <= (off_t)(ctx->fiber_count * conf->min_chunk_size)) { ctx->chunk_size = conf->min_chunk_size; } else if (remaining_length >= (off_t)(ctx->fiber_count * conf->max_chunk_size)) { ctx->chunk_size = conf->max_chunk_size; } else { ctx->chunk_size = DIV_CEIL(remaining_length, ctx->fiber_count); } ctx->chunk_count = DIV_CEIL(remaining_length, ctx->chunk_size); ctx->last_chunk_size = remaining_length + ctx->chunk_size - ctx->chunk_count * ctx->chunk_size; ctx->chunk_count += ctx->fiber_count; } ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: " "chunk count %uL chunk size %uz last chunk size %uz", ctx->chunk_count, ctx->chunk_size, ctx->last_chunk_size); // check for missing chunks missing_chunks_mask = ULLONG_MAX; if (ctx->chunk_count < 64) { missing_chunks_mask = ((1ULL << ctx->chunk_count) - 1); } if ((ctx->missing_chunks & missing_chunks_mask) != 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "ngx_http_parallel_init_chunks: " "missing chunks 0x%uxL chunk count %uL", ctx->missing_chunks, ctx->chunk_count); return NGX_HTTP_BAD_GATEWAY; } // initialize the chunks array (null terminated) ctx->chunks = ngx_pcalloc(r->pool, sizeof(ctx->chunks[0]) * (ctx->chunk_count + 1)); if (ctx->chunks == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pcalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } // save headers for consistency check if (conf->consistency_check_etag) { h = headers_out->etag; if (h != NULL) { ctx->etag.data = ngx_pstrdup(r->pool, &h->value); if (ctx->etag.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pstrdup failed (1)"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->etag.len = h->value.len; } } if (conf->consistency_check_last_modified) { h = headers_out->last_modified; if (h != NULL) { ctx->last_modified.data = ngx_pstrdup(r->pool, &h->value); if (ctx->last_modified.data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pstrdup failed (2)"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->last_modified.len = h->value.len; } } // build the response headers r->headers_out = *headers_out; if (ctx->range.is_range_request) { // leave the status as 206 and update the content range content_range->data = ngx_pnalloc( r->pool, sizeof(CONTENT_RANGE_FORMAT) + 3 * NGX_OFF_T_LEN); if (content_range->data == NULL) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_pnalloc failed"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } content_range->len = ngx_sprintf(content_range->data, CONTENT_RANGE_FORMAT, ctx->range.start, ctx->range.start + content_length - 1, instance_length) - content_range->data; } else { // change status to 200 and clear the content range r->headers_out.status = NGX_HTTP_OK; r->headers_out.status_line.len = 0; ngx_http_parallel_header_clear_value( &r->headers_out.headers, &content_range_name, content_range_hash); } ngx_http_clear_content_length(r); r->headers_out.content_length_n = content_length; // send the response headers rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "ngx_http_parallel_init_chunks: ngx_http_send_header failed %i", rc); return rc; } return NGX_OK; }
static ngx_int_t ngx_http_reqstat_show_handler(ngx_http_request_t *r) { ngx_int_t rc; ngx_buf_t *b; ngx_uint_t i, j; ngx_array_t *display; ngx_chain_t *tl, *free, *busy; ngx_queue_t *q; ngx_shm_zone_t **shm_zone; ngx_http_reqstat_ctx_t *ctx; ngx_http_reqstat_conf_t *slcf; ngx_http_reqstat_conf_t *smcf; ngx_http_reqstat_rbnode_t *node; slcf = ngx_http_get_module_loc_conf(r, ngx_http_reqstat_module); smcf = ngx_http_get_module_main_conf(r, ngx_http_reqstat_module); display = slcf->display == NULL ? smcf->monitor : slcf->display; if (display == NULL) { r->headers_out.status = NGX_HTTP_NO_CONTENT; return ngx_http_send_header(r); } r->headers_out.status = NGX_HTTP_OK; ngx_http_clear_content_length(r); rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) { return rc; } shm_zone = display->elts; for (free = busy = NULL, i = 0; i < display->nelts; i++) { ctx = shm_zone[i]->data; for (q = ngx_queue_head(&ctx->sh->queue); q != ngx_queue_sentinel(&ctx->sh->queue); q = ngx_queue_next(q)) { node = ngx_queue_data(q, ngx_http_reqstat_rbnode_t, queue); tl = ngx_chain_get_free_buf(r->pool, &free); if (tl == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b = tl->buf; if (b->start == NULL) { b->start = ngx_pcalloc(r->pool, 512); if (b->start == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b->end = b->start + 512; } b->last = b->pos = b->start; b->memory = 1; b->temporary = 1; b->last = ngx_slprintf(b->last, b->end, "%*s,", (size_t) node->len, node->data); for (j = 0; j < sizeof(ngx_http_reqstat_fields) / sizeof(off_t); j++) { b->last = ngx_slprintf(b->last, b->end, "%uA,", *REQ_FIELD(node, ngx_http_reqstat_fields[j])); } *(b->last - 1) = '\n'; if (ngx_http_output_filter(r, tl) == NGX_ERROR) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } #if nginx_version >= 1002000 ngx_chain_update_chains(r->pool, &free, &busy, &tl, (ngx_buf_tag_t) &ngx_http_reqstat_module); #else ngx_chain_update_chains(&free, &busy, &tl, (ngx_buf_tag_t) &ngx_http_reqstat_module); #endif } } tl = ngx_chain_get_free_buf(r->pool, &free); if (tl == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } b = tl->buf; b->last_buf = 1; return ngx_http_output_filter(r, tl); }
ngx_inline static ngx_http_xrlt_ctx_t * ngx_http_xrlt_create_ctx(ngx_http_request_t *r, size_t id) { ngx_http_xrlt_ctx_t *ctx; ngx_http_xrlt_loc_conf_t *conf; ngx_uint_t i, j; ngx_http_xrlt_param_t *params; xmlChar **xrltparams; conf = ngx_http_get_module_loc_conf(r, ngx_http_xrlt_module); ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_xrlt_ctx_t)); if (ctx == NULL) { return NULL; } if (id == 0) { ngx_pool_cleanup_t *cln; cln = ngx_pool_cleanup_add(r->pool, 0); if (cln == NULL) { return NULL; } dd("XRLT context creation"); if (conf->params != NULL && conf->params->nelts > 0) { params = conf->params->elts; xrltparams = ngx_palloc( r->pool, sizeof(xmlChar *) * (conf->params->nelts * 2 + 1) ); j = 0; for (i = 0; i < conf->params->nelts; i++) { xrltparams[j++] = xmlStrndup(params[i].name.data, params[i].name.len); xrltparams[j++] = xmlStrndup(params[i].value.value.data, params[i].value.value.len); } xrltparams[j] = NULL; } else { xrltparams = NULL; } ctx->xctx = xrltContextCreate(conf->sheet, xrltparams); ctx->id = 0; ctx->main_ctx = ctx; if (ctx->xctx == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "Failed to create XRLT context"); } cln->handler = ngx_http_xrlt_cleanup_context; cln->data = ctx->xctx; } else { ngx_http_xrlt_ctx_t *main_ctx; main_ctx = ngx_http_get_module_ctx(r->main, ngx_http_xrlt_module); if (main_ctx == NULL || main_ctx->xctx == NULL) { return NULL; } ctx->xctx = main_ctx->xctx; ctx->id = id; ctx->main_ctx = main_ctx; } return ctx; }
static int ngx_http_lua_socket_udp_send(lua_State *L) { ssize_t n; ngx_http_request_t *r; u_char *p; size_t len; ngx_http_lua_socket_udp_upstream_t *u; int type; const char *msg; ngx_str_t query; ngx_http_lua_loc_conf_t *llcf; if (lua_gettop(L) != 2) { return luaL_error(L, "expecting 2 arguments (including the object), " "but got %d", lua_gettop(L)); } r = ngx_http_lua_get_req(L); if (r == NULL) { return luaL_error(L, "request object not found"); } luaL_checktype(L, 1, LUA_TTABLE); lua_rawgeti(L, 1, SOCKET_CTX_INDEX); u = lua_touserdata(L, -1); lua_pop(L, 1); if (u == NULL || u->udp_connection.connection == NULL) { llcf = ngx_http_get_module_loc_conf(r, ngx_http_lua_module); if (llcf->log_socket_errors) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "attempt to send data on a closed socket: u:%p, c:%p", u, u ? u->udp_connection.connection : NULL); } lua_pushnil(L); lua_pushliteral(L, "closed"); return 2; } if (u->request != r) { return luaL_error(L, "bad request"); } if (u->ft_type) { u->ft_type = 0; } if (u->waiting) { lua_pushnil(L); lua_pushliteral(L, "socket busy"); return 2; } type = lua_type(L, 2); switch (type) { case LUA_TNUMBER: case LUA_TSTRING: lua_tolstring(L, 2, &len); break; case LUA_TTABLE: len = ngx_http_lua_calc_strlen_in_table(L, 2, 2, 1 /* strict */); break; default: msg = lua_pushfstring(L, "string, number, boolean, nil, " "or array table expected, got %s", lua_typename(L, type)); return luaL_argerror(L, 2, msg); } query.data = lua_newuserdata(L, len); query.len = len; switch (type) { case LUA_TNUMBER: case LUA_TSTRING: p = (u_char *) lua_tolstring(L, 2, &len); ngx_memcpy(query.data, (u_char *) p, len); break; case LUA_TTABLE: (void) ngx_http_lua_copy_str_in_table(L, 2, query.data); break; default: return luaL_error(L, "impossible to reach here"); } u->ft_type = 0; /* mimic ngx_http_upstream_init_request here */ #if 1 u->waiting = 0; #endif dd("sending query %.*s", (int) query.len, query.data); n = ngx_send(u->udp_connection.connection, query.data, query.len); dd("ngx_send returns %d (query len %d)", (int) n, (int) query.len); if (n == NGX_ERROR || n == NGX_AGAIN) { u->socket_errno = ngx_socket_errno; return ngx_http_lua_socket_error_retval_handler(r, u, L); } if (n != (ssize_t) query.len) { dd("not the while query was sent"); u->ft_type |= NGX_HTTP_LUA_SOCKET_FT_PARTIALWRITE; return ngx_http_lua_socket_error_retval_handler(r, u, L); } dd("n == len"); lua_pushinteger(L, 1); return 1; }
ngx_int_t ngx_http_srcache_access_handler(ngx_http_request_t *r) { ngx_str_t skip; ngx_int_t rc; ngx_http_srcache_loc_conf_t *conf; ngx_http_srcache_main_conf_t *smcf; ngx_http_srcache_ctx_t *ctx; ngx_chain_t *cl; size_t len; unsigned no_store; if (r != r->main) { return NGX_DECLINED; } /* being the main request */ conf = ngx_http_get_module_loc_conf(r, ngx_http_srcache_filter_module); if (conf->fetch == NULL && conf->store == NULL) { dd("bypass: %.*s", (int) r->uri.len, r->uri.data); return NGX_DECLINED; } dd("store defined? %p", conf->store); dd("req method: %lu", (unsigned long) r->method); dd("cache methods: %lu", (unsigned long) conf->cache_methods); if (!(r->method & conf->cache_methods)) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "srcache_fetch and srcache_store skipped due to request " "method %V", &r->method_name); return NGX_DECLINED; } if (conf->req_cache_control && ngx_http_srcache_request_no_cache(r, &no_store) == NGX_OK) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "srcache_fetch skipped due to request headers " "\"Cache-Control: no-cache\" or \"Pragma: no-cache\""); if (!no_store) { /* register a ctx to give a chance to srcache_store to run */ ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module)); if (ctx == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module); } else { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "srcache_store skipped due to request header " "\"Cache-Control: no-store\""); } return NGX_DECLINED; } if (conf->fetch_skip != NULL && ngx_http_complex_value(r, conf->fetch_skip, &skip) == NGX_OK && skip.len && (skip.len != 1 || skip.data[0] != '0')) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "srcache_fetch skipped due to the true value fed into " "srcache_fetch_skip: \"%V\"", &skip); /* register a ctx to give a chance to srcache_store to run */ ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module)); if (ctx == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module); return NGX_DECLINED; } ctx = ngx_http_get_module_ctx(r, ngx_http_srcache_filter_module); if (ctx != NULL) { /* if (ctx->fetch_error) { return NGX_DECLINED; } */ if (ctx->waiting_subrequest) { dd("waiting subrequest"); return NGX_AGAIN; } if (ctx->request_done) { dd("request done"); if ( #if defined(nginx_version) && nginx_version >= 8012 ngx_http_post_request(r, NULL) #else ngx_http_post_request(r) #endif != NGX_OK) { return NGX_ERROR; } if (!ctx->from_cache) { return NGX_DECLINED; } dd("sending header"); if (ctx->body_from_cache && !(r->method & NGX_HTTP_HEAD)) { len = 0; for (cl = ctx->body_from_cache; cl->next; cl = cl->next) { len += ngx_buf_size(cl->buf); } len += ngx_buf_size(cl->buf); cl->buf->last_buf = 1; r->headers_out.content_length_n = len; rc = ngx_http_srcache_next_header_filter(r); if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) { return rc; } rc = ngx_http_srcache_next_body_filter(r, ctx->body_from_cache); if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) { return rc; } dd("sent body from cache: %d", (int) rc); } else { r->headers_out.content_length_n = 0; rc = ngx_http_srcache_next_header_filter(r); if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) { return rc; } dd("sent header from cache: %d", (int) rc); dd("send last buf for the main request"); cl = ngx_alloc_chain_link(r->pool); cl->buf = ngx_calloc_buf(r->pool); cl->buf->last_buf = 1; rc = ngx_http_srcache_next_body_filter(r, cl); if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) { return rc; } dd("sent last buf from cache: %d", (int) rc); } dd("finalize from here..."); ngx_http_finalize_request(r, NGX_OK); /* dd("r->main->count (post): %d", (int) r->main->count); */ return NGX_DONE; } } else { ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module)); if (ctx == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module); } smcf = ngx_http_get_module_main_conf(r, ngx_http_srcache_filter_module); if (! smcf->postponed_to_access_phase_end) { ngx_http_core_main_conf_t *cmcf; ngx_http_phase_handler_t tmp; ngx_http_phase_handler_t *ph; ngx_http_phase_handler_t *cur_ph; ngx_http_phase_handler_t *last_ph; smcf->postponed_to_access_phase_end = 1; cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module); ph = cmcf->phase_engine.handlers; cur_ph = &ph[r->phase_handler]; last_ph = &ph[cur_ph->next - 1]; if (cur_ph < last_ph) { dd("swaping the contents of cur_ph and last_ph..."); tmp = *cur_ph; memmove(cur_ph, cur_ph + 1, (last_ph - cur_ph) * sizeof (ngx_http_phase_handler_t)); *last_ph = tmp; r->phase_handler--; /* redo the current ph */ return NGX_DECLINED; } } if (conf->fetch == NULL) { dd("fetch is not defined"); return NGX_DECLINED; } dd("running phase handler..."); /* issue a subrequest to fetch cached stuff (if any) */ rc = ngx_http_srcache_fetch_subrequest(r, conf, ctx); if (rc != NGX_OK) { return rc; } ctx->waiting_subrequest = 1; dd("quit"); return NGX_AGAIN; }
static ngx_int_t ngx_http_limit_conn_handler(ngx_http_request_t *r) { size_t n; uint32_t hash; ngx_str_t key; ngx_uint_t i; ngx_slab_pool_t *shpool; ngx_rbtree_node_t *node; ngx_pool_cleanup_t *cln; ngx_http_limit_conn_ctx_t *ctx; ngx_http_limit_conn_node_t *lc; ngx_http_limit_conn_conf_t *lccf; ngx_http_limit_conn_limit_t *limits; ngx_http_limit_conn_cleanup_t *lccln; if (r->main->limit_conn_set) { return NGX_DECLINED; } lccf = ngx_http_get_module_loc_conf(r, ngx_http_limit_conn_module); limits = lccf->limits.elts; for (i = 0; i < lccf->limits.nelts; i++) { ctx = limits[i].shm_zone->data; if (ngx_http_complex_value(r, &ctx->key, &key) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } if (key.len == 0) { continue; } if (key.len > 255) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "the value of the \"%V\" key " "is more than 255 bytes: \"%V\"", &ctx->key.value, &key); continue; } r->main->limit_conn_set = 1; hash = ngx_crc32_short(key.data, key.len); shpool = (ngx_slab_pool_t *) limits[i].shm_zone->shm.addr; ngx_shmtx_lock(&shpool->mutex); node = ngx_http_limit_conn_lookup(ctx->rbtree, &key, hash); if (node == NULL) { n = offsetof(ngx_rbtree_node_t, color) + offsetof(ngx_http_limit_conn_node_t, data) + key.len; node = ngx_slab_alloc_locked(shpool, n); if (node == NULL) { ngx_shmtx_unlock(&shpool->mutex); ngx_http_limit_conn_cleanup_all(r->pool); return lccf->status_code; } lc = (ngx_http_limit_conn_node_t *) &node->color; node->key = hash; lc->len = (u_char) key.len; lc->conn = 1; ngx_memcpy(lc->data, key.data, key.len); ngx_rbtree_insert(ctx->rbtree, node); } else { lc = (ngx_http_limit_conn_node_t *) &node->color; if ((ngx_uint_t) lc->conn >= limits[i].conn) { ngx_shmtx_unlock(&shpool->mutex); ngx_log_error(lccf->log_level, r->connection->log, 0, "limiting connections by zone \"%V\"", &limits[i].shm_zone->shm.name); ngx_http_limit_conn_cleanup_all(r->pool); return lccf->status_code; } lc->conn++; } ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "limit conn: %08XD %d", node->key, lc->conn); ngx_shmtx_unlock(&shpool->mutex); cln = ngx_pool_cleanup_add(r->pool, sizeof(ngx_http_limit_conn_cleanup_t)); if (cln == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } cln->handler = ngx_http_limit_conn_cleanup; lccln = cln->data; lccln->shm_zone = limits[i].shm_zone; lccln->node = node; } return NGX_DECLINED; }
static int ngx_http_lua_ngx_req_init_body(lua_State *L) { ngx_http_request_t *r; int n; ngx_http_request_body_t *rb; size_t size; lua_Integer num; #if 1 ngx_temp_file_t *tf; #endif ngx_http_core_loc_conf_t *clcf; n = lua_gettop(L); if (n != 1 && n != 0) { return luaL_error(L, "expecting 0 or 1 argument but seen %d", n); } lua_pushlightuserdata(L, &ngx_http_lua_request_key); lua_rawget(L, LUA_GLOBALSINDEX); r = lua_touserdata(L, -1); lua_pop(L, 1); if (r->discard_body) { return luaL_error(L, "request body already discarded asynchronously"); } if (r->request_body == NULL) { return luaL_error(L, "request body not read yet"); } if (n == 1) { num = luaL_checkinteger(L, 1); if (num <= 0) { return luaL_error(L, "bad size argument: %d", (int) num); } size = (size_t) num; } else { clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); size = clcf->client_body_buffer_size; size += size >> 2; /* avoid allocating an unnecessary large buffer */ if (size > (size_t) r->headers_in.content_length_n) { size = r->headers_in.content_length_n; } } rb = r->request_body; #if 1 tf = rb->temp_file; if (tf) { if (tf->file.fd != NGX_INVALID_FILE) { dd("cleaning temp file %.*s", (int) tf->file.name.len, tf->file.name.data); ngx_http_lua_pool_cleanup_file(r->pool, tf->file.fd); ngx_memzero(tf, sizeof(ngx_temp_file_t)); tf->file.fd = NGX_INVALID_FILE; dd("temp file cleaned: %.*s", (int) tf->file.name.len, tf->file.name.data); } rb->temp_file = NULL; } #endif r->request_body_in_clean_file = 1; r->headers_in.content_length_n = 0; rb->buf = ngx_create_temp_buf(r->pool, size); if (rb->buf == NULL) { return luaL_error(L, "out of memory"); } rb->bufs = ngx_alloc_chain_link(r->pool); if (rb->bufs == NULL) { return luaL_error(L, "out of memory"); } rb->bufs->buf = rb->buf; rb->bufs->next = NULL; return 0; }
static ngx_int_t ngx_http_memcached_handler(ngx_http_request_t *r) { ngx_int_t rc; ngx_http_upstream_t *u; ngx_http_memcached_ctx_t *ctx; ngx_http_memcached_loc_conf_t *mlcf; if (!(r->method & (NGX_HTTP_GET|NGX_HTTP_HEAD))) { return NGX_HTTP_NOT_ALLOWED; } rc = ngx_http_discard_request_body(r); if (rc != NGX_OK) { return rc; } //设置content type if (ngx_http_set_content_type(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } //创建一个upstream if (ngx_http_upstream_create(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } u = r->upstream; //设置schema ngx_str_set(&u->schema, "memcached://"); u->output.tag = (ngx_buf_tag_t) &ngx_http_memcached_module; mlcf = ngx_http_get_module_loc_conf(r, ngx_http_memcached_module); //设置config,可以看到它就是在memcached_pass中add的upstream u->conf = &mlcf->upstream; //开始设置回调, u->create_request = ngx_http_memcached_create_request; u->reinit_request = ngx_http_memcached_reinit_request; u->process_header = ngx_http_memcached_process_header; u->abort_request = ngx_http_memcached_abort_request; u->finalize_request = ngx_http_memcached_finalize_request; //创建上下文 ctx = ngx_palloc(r->pool, sizeof(ngx_http_memcached_ctx_t)); if (ctx == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ctx->request = r; ngx_http_set_ctx(r, ctx, ngx_http_memcached_module); //设置另外的回调,这几个回调主要是针对非buffering的情况 u->input_filter_init = ngx_http_memcached_filter_init; u->input_filter = ngx_http_memcached_filter; u->input_filter_ctx = ctx; r->main->count++; ngx_http_upstream_init(r); return NGX_DONE; }
static void ngx_http_graphicsmagick_command_handler(ngx_http_request_t *r) { ngx_str_t *source; ngx_str_t *dest; ngx_str_t *ai; ngx_str_t *cmd; ngx_str_t *uri; ngx_array_t *tokens; ngx_int_t rc; ngx_uint_t i; ngx_log_t *log; ngx_buf_t *b; ngx_chain_t out; ngx_fd_t fd; ngx_open_file_info_t of; ngx_http_core_loc_conf_t *clcf; size_t argc; char **argv; u_char *cp; u_char *last; size_t root; ngx_temp_file_t *tf; unsigned int status; log = r->connection->log; tokens = ngx_array_create(r->pool, 10, sizeof(ngx_str_t)); if (tokens == NULL) { ngx_http_graphicsmagick_server_error(r); return; } ai = ngx_array_push(tokens); if (ai == NULL) { ngx_http_graphicsmagick_server_error(r); return; } ai->data = (u_char *) "convert"; ai->len = 7; // get command from HTTP headers or queryString cmd = ngx_http_graphicsmagick_get_command(r); if (cmd == NULL) { ngx_http_graphicsmagick_server_error(r); return; } ngx_log_error(NGX_LOG_ERR, log, 0, "graphicsmagick convert command: \"%s\"", cmd->data); //ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0, // "graphicsmagick convert command: \"%s\"", cmd->data); clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); if (r->method & NGX_HTTP_POST) { source = dest = &r->request_body->temp_file->file.name; } else { uri = ngx_pcalloc(r->pool, sizeof(ngx_str_t)); source = ngx_pcalloc(r->pool, sizeof(ngx_str_t)); cp = cmd->data; while (cp < cmd->data + cmd->len) { if (*cp == ' ') { uri->data = cmd->data; uri->len = cp - cmd->data; cmd->data = cp + 1; cmd->len = cmd->len - uri->len - 1; break; } cp++; } if (uri->len == 0) { ngx_http_graphicsmagick_server_error(r); return; } last = ngx_http_graphicsmagickd_map_uri_to_path(r, uri, source, &root, 0); if (last == NULL) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } source->len = last - source->data; tf = ngx_pcalloc(r->pool, sizeof(ngx_temp_file_t)); if (tf == NULL) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } tf->file.fd = NGX_INVALID_FILE; tf->file.log = r->connection->log; tf->path = clcf->client_body_temp_path; tf->pool = r->pool; tf->log_level = r->request_body_file_log_level; tf->persistent = r->request_body_in_persistent_file; tf->clean = 1; if (r->request_body_file_group_access) { tf->access = 0660; } if (ngx_create_temp_file(&tf->file, tf->path, tf->pool, tf->persistent, tf->clean, tf->access) != NGX_OK) { ngx_http_graphicsmagick_server_error(r); return; } dest = &tf->file.name; } // push source file name into tokens ai = ngx_array_push(tokens); if (ai == NULL) { ngx_http_graphicsmagick_server_error(r); return; } *ai = *source; // tokenize command, and push them into tokens array rc = ngx_http_graphicsmagick_tokenize_command(r, cmd, tokens); if (rc == NGX_ERROR) { ngx_http_graphicsmagick_server_error(r); return; } ai = ngx_array_push_n(tokens, 2); if (ai == NULL) { ngx_http_graphicsmagick_server_error(r); return; } ai->data = (u_char *) "-compress"; ai->len = 9; ai++; ai->data = (u_char *) "JPEG"; ai->len = 4; // push dest filename into tokens again, to save generated thumbnail into dest file ai = ngx_array_push(tokens); if (ai == NULL) { ngx_http_graphicsmagick_server_error(r); return; } *ai = *dest; // OK, prepare convert args argc = tokens->nelts; argv = ngx_palloc(r->pool, argc * sizeof(char*)); if (argv == NULL) { ngx_http_graphicsmagick_server_error(r); return; } ai = tokens->elts; for (i = 0; i < argc; i++) { argv[i] = (char *) ai[i].data; ngx_log_error(NGX_LOG_ERR, log, 0, "current[%d]: %s", i, argv[i]); } ngx_array_destroy(tokens); // DO graphicsmagick converting status = ngx_http_graphicsmagick_convert(argv, argc); if (status == 0) { ngx_http_graphicsmagick_server_error(r); return; } // Done, write response of.test_dir = 0; //of.retest = clcf->open_file_cache_retest; of.errors = clcf->open_file_cache_errors; of.events = clcf->open_file_cache_events; rc = ngx_open_cached_file(clcf->open_file_cache, dest, &of, r->pool); if (rc == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, log, of.err, "failed to open file \"%s\"", dest->data); ngx_http_graphicsmagick_server_error(r); return; } fd = of.fd; log->action = "sending response to client"; r->headers_out.status = NGX_HTTP_OK; r->headers_out.content_type.len = sizeof("image/jpeg") - 1; r->headers_out.content_type.data = (u_char *) "image/jpeg"; r->headers_out.content_length_n = of.size; r->headers_out.last_modified_time = of.mtime; if (r != r->main && of.size == 0) { rc = ngx_http_send_header(r); ngx_http_finalize_request(r, rc); return; } b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); if (b == NULL) { ngx_http_graphicsmagick_server_error(r); return; } b->file = ngx_pcalloc(r->pool, sizeof(ngx_file_t)); if (b->file == NULL) { ngx_http_graphicsmagick_server_error(r); return; } rc = ngx_http_send_header(r); b->file_pos = 0; b->file_last = of.size; b->in_file = b->file_last ? 1: 0; b->last_buf = (r == r->main) ? 1: 0; b->last_in_chain = 1; b->file->fd = fd; b->file->name = *dest; b->file->log = log; out.buf = b; out.next = NULL; rc = ngx_http_output_filter(r, &out); ngx_http_finalize_request(r, rc); return; }
static ngx_int_t ngx_http_memcached_process_header(ngx_http_request_t *r) { u_char *p, *start; ngx_str_t line; ngx_uint_t flags; ngx_table_elt_t *h; ngx_http_upstream_t *u; ngx_http_memcached_ctx_t *ctx; ngx_http_memcached_loc_conf_t *mlcf; u = r->upstream; for (p = u->buffer.pos; p < u->buffer.last; p++) { if (*p == LF) { goto found; } } return NGX_AGAIN; found: line.data = u->buffer.pos; line.len = p - u->buffer.pos; if (line.len == 0 || *(p - 1) != CR) { goto no_valid; } *p = '\0'; line.len--; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "memcached: \"%V\"", &line); p = u->buffer.pos; ctx = ngx_http_get_module_ctx(r, ngx_http_memcached_module); mlcf = ngx_http_get_module_loc_conf(r, ngx_http_memcached_module); if (ngx_strncmp(p, "VALUE ", sizeof("VALUE ") - 1) == 0) { p += sizeof("VALUE ") - 1; if (ngx_strncmp(p, ctx->key.data, ctx->key.len) != 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "memcached sent invalid key in response \"%V\" " "for key \"%V\"", &line, &ctx->key); return NGX_HTTP_UPSTREAM_INVALID_HEADER; } p += ctx->key.len; if (*p++ != ' ') { goto no_valid; } /* flags */ start = p; while (*p) { if (*p++ == ' ') { if (mlcf->gzip_flag) { goto flags; } else { goto length; } } } goto no_valid; flags: flags = ngx_atoi(start, p - start - 1); if (flags == (ngx_uint_t) NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "memcached sent invalid flags in response \"%V\" " "for key \"%V\"", &line, &ctx->key); return NGX_HTTP_UPSTREAM_INVALID_HEADER; } if (flags & mlcf->gzip_flag) { h = ngx_list_push(&r->headers_out.headers); if (h == NULL) { return NGX_ERROR; } h->hash = 1; ngx_str_set(&h->key, "Content-Encoding"); ngx_str_set(&h->value, "gzip"); r->headers_out.content_encoding = h; } length: start = p; p = line.data + line.len; u->headers_in.content_length_n = ngx_atoof(start, p - start); if (u->headers_in.content_length_n == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "memcached sent invalid length in response \"%V\" " "for key \"%V\"", &line, &ctx->key); return NGX_HTTP_UPSTREAM_INVALID_HEADER; } u->headers_in.status_n = 200; u->state->status = 200; u->buffer.pos = p + sizeof(CRLF) - 1; return NGX_OK; } if (ngx_strcmp(p, "END\x0d") == 0) { ngx_log_error(NGX_LOG_INFO, r->connection->log, 0, "key: \"%V\" was not found by memcached", &ctx->key); u->headers_in.content_length_n = 0; u->headers_in.status_n = 404; u->state->status = 404; u->buffer.pos = p + sizeof("END" CRLF) - 1; u->keepalive = 1; return NGX_OK; } no_valid: ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "memcached sent invalid response: \"%V\"", &line); return NGX_HTTP_UPSTREAM_INVALID_HEADER; }
static ngx_int_t ngx_http_fancyindex_handler(ngx_http_request_t *r) { ngx_http_request_t *sr; ngx_str_t *sr_uri; ngx_str_t rel_uri; ngx_int_t rc; ngx_http_fancyindex_loc_conf_t *alcf; ngx_chain_t out[3] = { { NULL, NULL }, { NULL, NULL}, { NULL, NULL }}; if (r->uri.data[r->uri.len - 1] != '/') { return NGX_DECLINED; } /* TODO: Win32 */ if (!(r->method & (NGX_HTTP_GET|NGX_HTTP_HEAD))) { return NGX_DECLINED; } alcf = ngx_http_get_module_loc_conf(r, ngx_http_fancyindex_module); if (!alcf->enable) { return NGX_DECLINED; } if ((rc = make_content_buf(r, &out[0].buf, alcf) != NGX_OK)) return rc; out[0].buf->last_in_chain = 1; r->headers_out.status = NGX_HTTP_OK; r->headers_out.content_type_len = ngx_sizeof_ssz("text/html"); r->headers_out.content_type.len = ngx_sizeof_ssz("text/html"); r->headers_out.content_type.data = (u_char *) "text/html"; rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) { return rc; } if (alcf->header.len > 0) { /* URI is configured, make Nginx take care of with a subrequest. */ sr_uri = &alcf->header; if (*sr_uri->data != '/') { /* Relative path */ rel_uri.len = r->uri.len + alcf->header.len; rel_uri.data = ngx_palloc(r->pool, rel_uri.len); if (rel_uri.data == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_memcpy(ngx_cpymem(rel_uri.data, r->uri.data, r->uri.len), alcf->header.data, alcf->header.len); sr_uri = &rel_uri; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: header subrequest \"%V\"", sr_uri); rc = ngx_http_subrequest(r, sr_uri, NULL, &sr, NULL, 0); if (rc == NGX_ERROR || rc == NGX_DONE) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: header subrequest for \"%V\" failed", sr_uri); return rc; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: header subrequest status = %i", sr->headers_out.status); if (sr->headers_out.status != NGX_HTTP_OK) { /* * XXX: Should we write a message to the error log just in case * we get something different from a 404? */ goto add_builtin_header; } } else { add_builtin_header: ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: adding built-in header"); /* Make space before */ out[1].next = out[0].next; out[1].buf = out[0].buf; /* Chain header buffer */ out[0].next = &out[1]; out[0].buf = make_header_buf(r); } /* If footer is disabled, chain up footer buffer. */ if (alcf->footer.len == 0) { ngx_uint_t last = (alcf->header.len == 0) ? 2 : 1; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: adding built-in footer at %i", last); out[last-1].next = &out[last]; out[last].buf = make_footer_buf(r); out[last-1].buf->last_in_chain = 0; out[last].buf->last_in_chain = 1; out[last].buf->last_buf = 1; /* Send everything with a single call :D */ return ngx_http_output_filter(r, &out[0]); } /* * If we reach here, we were asked to send a custom footer. We need to: * partially send whatever is referenced from out[0] and then send the * footer as a subrequest. If the subrequest fails, we should send the * standard footer as well. */ rc = ngx_http_output_filter(r, &out[0]); if (rc != NGX_OK && rc != NGX_AGAIN) return NGX_HTTP_INTERNAL_SERVER_ERROR; /* URI is configured, make Nginx take care of with a subrequest. */ sr_uri = &alcf->footer; if (*sr_uri->data != '/') { /* Relative path */ rel_uri.len = r->uri.len + alcf->footer.len; rel_uri.data = ngx_palloc(r->pool, rel_uri.len); if (rel_uri.data == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_memcpy(ngx_cpymem(rel_uri.data, r->uri.data, r->uri.len), alcf->footer.data, alcf->footer.len); sr_uri = &rel_uri; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: footer subrequest \"%V\"", sr_uri); rc = ngx_http_subrequest(r, sr_uri, NULL, &sr, NULL, 0); if (rc == NGX_ERROR || rc == NGX_DONE) { ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: footer subrequest for \"%V\" failed", sr_uri); return rc; } ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http fancyindex: header subrequest status = %i", sr->headers_out.status); if (sr->headers_out.status != NGX_HTTP_OK) { /* * XXX: Should we write a message to the error log just in case * we get something different from a 404? */ out[0].next = NULL; out[0].buf = make_footer_buf(r); out[0].buf->last_in_chain = 1; out[0].buf->last_buf = 1; /* Directly send out the builtin footer */ return ngx_http_output_filter(r, &out[0]); } return (r != r->main) ? rc : ngx_http_send_special(r, NGX_HTTP_LAST); }