static ngx_int_t
ngx_child_request_finished_handler(
	ngx_http_request_t *r, 
	void *data, 
	ngx_int_t rc)
{
	ngx_http_request_t          *pr;
	ngx_child_request_context_t* ctx;

	ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
		"ngx_child_request_finished_handler: error code %ui", rc);

	// make sure we are not called twice for the same request
	r->post_subrequest = NULL;

	// save the completed upstream and error code in the context for the write event handler
	ctx = ngx_http_get_module_ctx(r, ngx_http_vod_module);
	if (ctx == NULL)
	{
		ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
			"ngx_child_request_finished_handler: unexpected, context is null");
		return NGX_ERROR;
	}

	ctx->upstream = r->upstream;
	ctx->error_code = rc;

	if (ctx->original_write_event_handler != NULL)
	{
		ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
			"ngx_child_request_finished_handler: "
			"unexpected original_write_event_handler not null");
		return NGX_ERROR;
	}

	// replace the parent write event handler
	pr = r->parent;

	ctx->original_write_event_handler = pr->write_event_handler;
	pr->write_event_handler = ngx_child_request_wev_handler;

	// temporarily replace the parent context
	ctx->original_context = ngx_http_get_module_ctx(pr, ngx_http_vod_module);
	ngx_http_set_ctx(pr, ctx, ngx_http_vod_module);

	// work-around issues in nginx's event module (from echo-nginx-module)
	if (r != r->connection->data
		&& r->postponed
		&& (r->main->posted_requests == NULL
		|| r->main->posted_requests->request != pr))
	{
#if defined(nginx_version) && nginx_version >= 8012
		ngx_http_post_request(pr, NULL);
#else
		ngx_http_post_request(pr);
#endif
	}

	return NGX_OK;
}
static void
ngx_http_parallel_wev_handler(ngx_http_request_t *r)
{
	ngx_http_parallel_fiber_ctx_t* fiber;
	ngx_http_parallel_ctx_t *ctx;
	ngx_int_t rc;

	ctx = ngx_http_get_module_ctx(r, ngx_http_parallel_module);

	// restore the write event handler
	r->write_event_handler = ctx->original_write_event_handler;
	ctx->original_write_event_handler = NULL;

	// get the fiber
	fiber = ctx->completed_fiber;
	ctx->completed_fiber = NULL;

	if (fiber == NULL)
	{
		ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
			"ngx_http_parallel_wev_handler: unexpected, fiber is null");
		return;
	}

	// code taken from echo-nginx-module to work around nginx subrequest issues
	if (r == r->connection->data && r->postponed) {

		if (r->postponed->request) {
			r->connection->data = r->postponed->request;

#if defined(nginx_version) && nginx_version >= 8012
			ngx_http_post_request(r->postponed->request, NULL);
#else
			ngx_http_post_request(r->postponed->request);
#endif

		}
		else {
			ngx_http_output_filter(r, NULL);
		}
	}

	rc = ngx_http_parallel_handle_request_complete(ctx, fiber->sr, fiber);
	if (rc != NGX_OK && ctx->error_code == NGX_AGAIN)
	{
		ctx->error_code = rc;
	}

	if (ctx->error_code != NGX_AGAIN && ctx->active_fibers == 0)
	{
		ngx_http_finalize_request(r, ctx->error_code);
	}
}
ngx_int_t
ngx_http_echo_post_subrequest(ngx_http_request_t *r,
    void *data, ngx_int_t rc)
{
    ngx_http_echo_ctx_t         *ctx = data;
    ngx_http_request_t          *pr;
    ngx_http_echo_ctx_t         *pr_ctx;

    dd("echo post_subrequest: %.*s", (int) r->uri.len, r->uri.data);

    if (ctx->run_post_subrequest) {
        dd("already run post_subrequest: %p: %.*s", ctx,
           (int) r->uri.len, r->uri.data);

        return rc;
    }

    dd("setting run_post_subrequest to 1 for %p for %.*s", ctx,
       (int) r->uri.len, r->uri.data);

    ctx->run_post_subrequest = 1;

    pr = r->parent;

    pr_ctx = ngx_http_get_module_ctx(pr, ngx_http_echo_module);
    if (pr_ctx == NULL) {
        return NGX_ERROR;
    }

    dd("mark ready %d", (int) pr_ctx->next_handler_cmd);

    pr_ctx->waiting = 0;
    pr_ctx->done = 1;

    pr->write_event_handler = ngx_http_echo_wev_handler;

    /* work-around issues in nginx's event module */

    if (r != r->connection->data
        && r->postponed
        && (r->main->posted_requests == NULL
            || r->main->posted_requests->request != pr))
    {
#if defined(nginx_version) && nginx_version >= 8012
        ngx_http_post_request(pr, NULL);
#else
        ngx_http_post_request(pr);
#endif
    }

    return rc;
}
static ngx_int_t
ngx_http_parallel_subrequest_finished_handler(
	ngx_http_request_t *r, 
	void *data, 
	ngx_int_t rc)
{
	ngx_http_request_t          *pr;
	ngx_http_parallel_ctx_t *ctx;

	// make sure we are not called twice for the same request
	r->post_subrequest = NULL;

	pr = r->parent;

	// save the completed fiber in the context for the write event handler
	ctx = ngx_http_get_module_ctx(pr, ngx_http_parallel_module);

	ctx->completed_fiber = data;
	if (rc != NGX_OK && ctx->error_code == NGX_AGAIN)
	{
		ctx->error_code = rc;
	}

	if (ctx->original_write_event_handler != NULL)
	{
		ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
			"ngx_http_parallel_subrequest_finished_handler: "
			"unexpected original_write_event_handler not null");
		return NGX_ERROR;
	}

	ctx->original_write_event_handler = pr->write_event_handler;
	pr->write_event_handler = ngx_http_parallel_wev_handler;

	/* work-around issues in nginx's event module */

	if (r != r->connection->data
		&& r->postponed
		&& (r->main->posted_requests == NULL
		|| r->main->posted_requests->request != pr))
	{
#if defined(nginx_version) && nginx_version >= 8012
		ngx_http_post_request(pr, NULL);
#else
		ngx_http_post_request(pr);
#endif
	}

	return NGX_OK;
}
Ejemplo n.º 5
0
void
ngx_http_echo_wev_handler(ngx_http_request_t *r)
{
    ngx_int_t                    rc;
    ngx_http_echo_ctx_t         *ctx;

    dd_enter();

    ctx = ngx_http_get_module_ctx(r, ngx_http_echo_module);

    if (ctx == NULL) {
        ngx_http_finalize_request(r, NGX_ERROR);
        return;
    }

    if (ctx->waiting && ! ctx->done) {
        if (r->main->posted_requests
                && r->main->posted_requests->request != r)
        {
            dd("HOT SPIN");

#if defined(nginx_version) && nginx_version >= 8012
            ngx_http_post_request(r, NULL);
#else
            ngx_http_post_request(r);
#endif

            return;
        }
    }

    ctx->done = 0;

    ctx->next_handler_cmd++;

    rc = ngx_http_echo_run_cmds(r);

    dd("rc: %d", (int) rc);

    if (rc == NGX_DONE) {
        return;
    }

    if (rc == NGX_AGAIN) {
        dd("mark busy %d", (int) ctx->next_handler_cmd);
        ctx->waiting = 1;
        ctx->done = 0;

    } else {
        dd("mark ready %d", (int) ctx->next_handler_cmd);
        ctx->waiting = 0;
        ctx->done = 1;

        dd("finalizing with rc %d", (int) rc);

        dd("finalize request %.*s with %d", (int) r->uri.len, r->uri.data,
                (int) rc);

        ngx_http_finalize_request(r, rc);
    }
}
void
ngx_http_echo_wev_handler(ngx_http_request_t *r)
{
    ngx_int_t                    rc;
    ngx_http_echo_ctx_t         *ctx;

    dd("wev handler");

    ctx = ngx_http_get_module_ctx(r, ngx_http_echo_module);

    if (ctx == NULL) {
        ngx_http_finalize_request(r, NGX_ERROR);
        return;
    }

    dd("waiting: %d, done: %d", (int) ctx->waiting, (int) ctx->done);

    if (ctx->waiting && ! ctx->done) {

        if (r == r->connection->data && r->postponed) {

            if (r->postponed->request) {
                r->connection->data = r->postponed->request;

#if defined(nginx_version) && nginx_version >= 8012
                ngx_http_post_request(r->postponed->request, NULL);
#else
                ngx_http_post_request(r->postponed->request);
#endif

            } else {
                ngx_http_echo_flush_postponed_outputs(r);
            }
        }

        return;
    }

    ctx->done = 0;

    ctx->next_handler_cmd++;

    rc = ngx_http_echo_run_cmds(r);

    dd("rc: %d", (int) rc);

    if (rc == NGX_DONE) {
        ngx_http_finalize_request(r, rc);
        return;
    }

    if (rc == NGX_AGAIN) {
        dd("mark busy %d for %.*s", (int) ctx->next_handler_cmd,
           (int) r->uri.len,
           r->uri.data);

        ctx->waiting = 1;
        ctx->done = 0;

    } else {
        dd("mark ready %d", (int) ctx->next_handler_cmd);
        ctx->waiting = 0;
        ctx->done = 1;

        dd("finalizing with rc %d", (int) rc);

        dd("finalize request %.*s with %d", (int) r->uri.len, r->uri.data,
                (int) rc);

        ngx_http_finalize_request(r, rc);
    }
}
static ngx_int_t
ngx_http_postpone_filter(ngx_http_request_t *r, ngx_chain_t *in)
{
    ngx_connection_t              *c;
    ngx_http_postponed_request_t  *pr;

    c = r->connection;

    ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0,
                   "http postpone filter \"%V?%V\" %p", &r->uri, &r->args, in);

    if (r != c->data) {

        if (in) {
            ngx_http_postpone_filter_add(r, in);
            return NGX_OK;
        }

#if 0
        /* TODO: SSI may pass NULL */
        ngx_log_error(NGX_LOG_ALERT, c->log, 0,
                      "http postpone filter NULL inactive request",
                      &r->uri, &r->args);
#endif

        return NGX_OK;
    }

    if (r->postponed == NULL) {

        if (in || c->buffered) {
            return ngx_http_next_body_filter(r->main, in);
        }

        return NGX_OK;
    }

    if (in) {
        ngx_http_postpone_filter_add(r, in);
    }

    do {
        pr = r->postponed;

        if (pr->request) {

            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
                           "http postpone filter wake \"%V?%V\"",
                           &pr->request->uri, &pr->request->args);

            r->postponed = pr->next;

            c->data = pr->request;

            return ngx_http_post_request(pr->request, NULL);
        }

        if (pr->out == NULL) {
            ngx_log_error(NGX_LOG_ALERT, c->log, 0,
                          "http postpone filter NULL output",
                          &r->uri, &r->args);

        } else {
            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
                           "http postpone filter output \"%V?%V\"",
                           &r->uri, &r->args);

            if (ngx_http_next_body_filter(r->main, pr->out) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

        r->postponed = pr->next;

    } while (r->postponed);

    return NGX_OK;
}
ngx_int_t
ngx_http_srcache_access_handler(ngx_http_request_t *r)
{
    ngx_str_t                       skip;
    ngx_int_t                       rc;
    ngx_http_srcache_loc_conf_t    *conf;
    ngx_http_srcache_main_conf_t   *smcf;
    ngx_http_srcache_ctx_t         *ctx;
    ngx_chain_t                    *cl;
    size_t                          len;
    unsigned                        no_store;

    /* access phase handlers are skipped in subrequests,
     * so the current request must be a main request */

    conf = ngx_http_get_module_loc_conf(r, ngx_http_srcache_filter_module);

    if (conf->fetch == NULL && conf->store == NULL) {
        dd("bypass: %.*s", (int) r->uri.len, r->uri.data);
        return NGX_DECLINED;
    }

    dd("store defined? %p", conf->store);

    dd("req method: %lu", (unsigned long) r->method);
    dd("cache methods: %lu", (unsigned long) conf->cache_methods);

    if (!(r->method & conf->cache_methods)) {
        ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                       "srcache_fetch and srcache_store skipped due to request "
                       "method %V", &r->method_name);

        return NGX_DECLINED;
    }

    if (conf->req_cache_control
        && ngx_http_srcache_request_no_cache(r, &no_store) == NGX_OK)
    {
        ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                       "srcache_fetch skipped due to request headers "
                       "\"Cache-Control: no-cache\" or \"Pragma: no-cache\"");

        if (!no_store) {
            /* register a ctx to give a chance to srcache_store to run */

            ctx = ngx_pcalloc(r->pool,
                              sizeof(ngx_http_srcache_filter_module));

            if (ctx == NULL) {
                return NGX_ERROR;
            }

            ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module);

        } else {
            ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                           "srcache_store skipped due to request header "
                           "\"Cache-Control: no-store\"");
        }

        return NGX_DECLINED;
    }

    if (conf->fetch_skip != NULL
        && ngx_http_complex_value(r, conf->fetch_skip, &skip) == NGX_OK
        && skip.len
        && (skip.len != 1 || skip.data[0] != '0'))
    {
        ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                       "srcache_fetch skipped due to the true value fed into "
                       "srcache_fetch_skip: \"%V\"", &skip);

        /* register a ctx to give a chance to srcache_store to run */

        ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module));

        if (ctx == NULL) {
            return NGX_ERROR;
        }

        ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module);

        return NGX_DECLINED;
    }

    ctx = ngx_http_get_module_ctx(r, ngx_http_srcache_filter_module);

    if (ctx != NULL) {
        /*
        if (ctx->fetch_error) {
            return NGX_DECLINED;
        }
        */

        if (ctx->waiting_subrequest) {
            dd("waiting subrequest");
            return NGX_AGAIN;
        }

        if (ctx->waiting_request_body) {
            return NGX_AGAIN;
        }

        if (ctx->request_body_done == 1) {
            ctx->request_body_done = 0;
            goto do_fetch_subrequest;
        }

        if (ctx->request_done) {
            dd("request done");

            if (ngx_http_post_request(r, NULL) != NGX_OK) {
                return NGX_ERROR;
            }

            if (!ctx->from_cache) {
                return NGX_DECLINED;
            }

            dd("sending header");

            if (ctx->body_from_cache) {
                len = 0;

                for (cl = ctx->body_from_cache; cl->next; cl = cl->next) {
                    len += ngx_buf_size(cl->buf);
                }

                len += ngx_buf_size(cl->buf);

                cl->buf->last_buf = 1;

                r->headers_out.content_length_n = len;

                rc = ngx_http_send_header(r);

                dd("srcache fetch header returned %d", (int) rc);

                if (rc == NGX_ERROR || rc > NGX_OK) {
                    return rc;
                }

#if 1
                if (r->header_only) {
                    return NGX_HTTP_OK;
                }
#endif

                if (!r->filter_finalize) {
                    rc = ngx_http_output_filter(r, ctx->body_from_cache);
                    if (rc == NGX_ERROR || rc > NGX_OK) {
                        return rc;
                    }
                }

                dd("sent body from cache: %d", (int) rc);
                dd("finalize from here...");

                ngx_http_finalize_request(r, rc);

                /* dd("r->main->count (post): %d", (int) r->main->count); */
                return NGX_DONE;
            }

            return NGX_DECLINED;
        }

    } else {
        ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module));

        if (ctx == NULL) {
            return NGX_ERROR;
        }

        ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module);
    }

    smcf = ngx_http_get_module_main_conf(r, ngx_http_srcache_filter_module);

    if (!smcf->postponed_to_access_phase_end) {
        ngx_http_core_main_conf_t       *cmcf;
        ngx_http_phase_handler_t         tmp;
        ngx_http_phase_handler_t        *ph;
        ngx_http_phase_handler_t        *cur_ph;
        ngx_http_phase_handler_t        *last_ph;

        smcf->postponed_to_access_phase_end = 1;

        cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module);

        ph = cmcf->phase_engine.handlers;
        cur_ph = &ph[r->phase_handler];

        /* we should skip the post_access phase handler here too */
        last_ph = &ph[cur_ph->next - 2];

        if (cur_ph < last_ph) {
            dd("swaping the contents of cur_ph and last_ph...");

            tmp = *cur_ph;

            memmove(cur_ph, cur_ph + 1,
                    (last_ph - cur_ph) * sizeof (ngx_http_phase_handler_t));

            *last_ph = tmp;

            r->phase_handler--; /* redo the current ph */

            return NGX_DECLINED;
        }
    }

    if (conf->fetch == NULL) {
        dd("fetch is not defined");
        return NGX_DECLINED;
    }

    dd("running phase handler...");

    if (!r->request_body) {
        dd("reading request body: ctx = %p", ctx);

        rc = ngx_http_read_client_request_body(r,
                                               ngx_http_srcache_post_read_body);
        if (rc == NGX_ERROR || rc > NGX_OK) {
#if (nginx_version < 1002006)                                               \
    || (nginx_version >= 1003000 && nginx_version < 1003009)
            r->main->count--;
#endif
            return rc;
        }

        if (rc == NGX_AGAIN) {
            ctx->waiting_request_body = 1;
            return NGX_AGAIN;
        }

        /* rc == NGX_OK */
    }

do_fetch_subrequest:
    /* issue a subrequest to fetch cached stuff (if any) */

    rc = ngx_http_srcache_fetch_subrequest(r, conf, ctx);

    if (rc != NGX_OK) {
        return rc;
    }

    ctx->waiting_subrequest = 1;

    dd("quit");

    return NGX_AGAIN;
}
static void
ngx_child_request_wev_handler(ngx_http_request_t *r)
{
	ngx_child_request_context_t* ctx;
	ngx_http_upstream_t *u;
	ngx_int_t rc;
	off_t content_length;

	ctx = ngx_http_get_module_ctx(r, ngx_http_vod_module);

	// restore the write event handler
	r->write_event_handler = ctx->original_write_event_handler;
	ctx->original_write_event_handler = NULL;

	// restore the original context
	ngx_http_set_ctx(r, ctx->original_context, ngx_http_vod_module);

	// get the completed upstream
	u = ctx->upstream;
	ctx->upstream = NULL;

	if (u == NULL)
	{
		ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
			"ngx_child_request_wev_handler: unexpected, upstream is null");
		return;
	}

	// code taken from echo-nginx-module to work around nginx subrequest issues
	if (r == r->connection->data && r->postponed) {

		if (r->postponed->request) {
			r->connection->data = r->postponed->request;

#if defined(nginx_version) && nginx_version >= 8012
			ngx_http_post_request(r->postponed->request, NULL);
#else
			ngx_http_post_request(r->postponed->request);
#endif

		}
		else {
			ngx_http_output_filter(r, NULL);
		}
	}

	// get the final error code
	rc = ctx->error_code;
	if (rc == NGX_OK && is_in_memory(ctx))
	{
		if (u->headers_in.status_n != NGX_HTTP_OK && u->headers_in.status_n != NGX_HTTP_PARTIAL_CONTENT)
		{
			if (u->headers_in.status_n != 0)
			{
				ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
					"ngx_child_request_wev_handler: upstream returned a bad status %ui", u->headers_in.status_n);
			}
			else
			{
				ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
					"ngx_child_request_wev_handler: failed to get upstream status");
			}
			rc = NGX_HTTP_BAD_GATEWAY;
		}
		else if (u->length != 0 && u->length != -1 && !u->headers_in.chunked)
		{
			ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
				"ngx_child_request_wev_handler: upstream connection was closed with %O bytes left to read", u->length);
			rc = NGX_HTTP_BAD_GATEWAY;
		}
	}
	else if (rc == NGX_ERROR)
	{
		ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
			"ngx_child_request_wev_handler: got error -1, changing to 502");
		rc = NGX_HTTP_BAD_GATEWAY;
	}

	if (ctx->send_header_result != NGX_OK)
	{
		rc = ctx->send_header_result;
	}

	// get the content length
	if (is_in_memory(ctx))
	{
		content_length = u->buffer.last - u->buffer.pos;
	}
	else if (u->state != NULL)
	{
		content_length = u->state->response_length;
	}
	else
	{
		content_length = 0;
	}

	if (ctx->callback != NULL)
	{
		// notify the caller
		ctx->callback(ctx->callback_context, rc, &u->buffer, content_length);
	}
	else
	{
		if (r->header_sent || ctx->dont_send_header)
		{
			// flush the buffer and close the request
			ngx_http_send_special(r, NGX_HTTP_LAST);
			ngx_http_finalize_request(r, NGX_OK);
		}
		else
		{
			// finalize the request
			ngx_http_finalize_request(r, rc);
		}
	}
}
Ejemplo n.º 10
0
ngx_int_t
ngx_http_lua_wev_handler(ngx_http_request_t *r)
{
    ngx_int_t                    rc;
    ngx_http_lua_ctx_t          *ctx;
    ngx_http_lua_main_conf_t    *lmcf;
    lua_State                   *cc;
    ngx_str_t                   *body_str;
    ngx_http_headers_out_t      *sr_headers;
    ngx_list_part_t             *part;
    ngx_table_elt_t             *header;
    ngx_uint_t                   i, index;

    dd("wev handler %.*s %.*s a:%d, postponed:%p",
            (int) r->uri.len, r->uri.data,
            (int) ngx_cached_err_log_time.len,
            ngx_cached_err_log_time.data,
            r == r->connection->data,
            r->postponed);
#if 0
    ngx_http_lua_dump_postponed(r);
#endif

    ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module);
    if (ctx == NULL) {
        goto error;
    }

    dd("ctx = %p", ctx);
    dd("request done: %d", (int) r->done);
    dd("cleanup done: %p", ctx->cleanup);

    if (ctx->cleanup == NULL) {
        /* already done */
        dd("cleanup is null: %.*s", (int) r->uri.len, r->uri.data);

        if (ctx->entered_content_phase) {
            ngx_http_finalize_request(r,
                    ngx_http_lua_flush_postponed_outputs(r));
        }

        return NGX_DONE;
    }

    dd("waiting: %d, done: %d", (int) ctx->waiting,
            ctx->done);

    if (ctx->waiting && ! ctx->done) {
        dd("%.*s waiting and not done", (int) r->uri.len, r->uri.data);

#if 0
        ngx_http_lua_dump_postponed(r);
#endif

        if (r == r->connection->data && r->postponed) {
            if (r->postponed->request) {
                r->connection->data = r->postponed->request;

#if defined(nginx_version) && nginx_version >= 8012
                ngx_http_post_request(r->postponed->request, NULL);
#else
                ngx_http_post_request(r->postponed->request);
#endif

            } else {
                ngx_http_lua_flush_postponed_outputs(r);
            }
        }

        return NGX_DONE;
    }

    ctx->done = 0;

    dd("nsubreqs: %d", (int) ctx->nsubreqs);

    for (index = 0; index < ctx->nsubreqs; index++) {
        dd("summary: reqs %d, subquery %d, waiting %d, req %.*s",
                (int) ctx->nsubreqs,
                (int) index,
                (int) ctx->waiting,
                (int) r->uri.len, r->uri.data);

        cc = ctx->cc;

        /*  {{{ construct ret value */
        lua_newtable(cc);

        /*  copy captured status */
        lua_pushinteger(cc, ctx->sr_statuses[index]);
        lua_setfield(cc, -2, "status");

        /*  copy captured body */

        body_str = &ctx->sr_bodies[index];

        lua_pushlstring(cc, (char *) body_str->data, body_str->len);
        lua_setfield(cc, -2, "body");

        if (body_str->data) {
            dd("free body buffer ASAP");
            ngx_pfree(r->pool, body_str->data);
        }

        /* copy captured headers */

        lua_newtable(cc); /* res.header */

        sr_headers = ctx->sr_headers[index];

        if (sr_headers->content_length == NULL
            && sr_headers->content_length_n >= 0)
        {
            lua_pushliteral(cc, "Content-Length"); /* header key */

            lua_pushnumber(cc, sr_headers->content_length_n);
                /* head key value */

            lua_rawset(cc, -3); /* head */
        }

        if (sr_headers->content_type.len) {
            lua_pushliteral(cc, "Content-Type"); /* header key */
            lua_pushlstring(cc, (char *) sr_headers->content_type.data,
                    sr_headers->content_type.len); /* head key value */
            lua_rawset(cc, -3); /* head */
        }

        dd("saving subrequest response headers");

        part = &sr_headers->headers.part;
        header = part->elts;

        for (i = 0; /* void */; i++) {

            if (i >= part->nelts) {
                if (part->next == NULL) {
                    break;
                }

                part = part->next;
                header = part->elts;
                i = 0;
            }

            dd("checking sr header %.*s", (int) header[i].key.len,
                    header[i].key.data);

#if 1
            if (header[i].hash == 0) {
                continue;
            }
#endif

            dd("pushing sr header %.*s", (int) header[i].key.len,
                    header[i].key.data);

            lua_pushlstring(cc, (char *) header[i].key.data,
                    header[i].key.len); /* header key */
            lua_pushvalue(cc, -1); /* stack: table key key */

            /* check if header already exists */
            lua_rawget(cc, -3); /* stack: table key value */

            if (lua_isnil(cc, -1)) {
                lua_pop(cc, 1); /* stack: table key */

                lua_pushlstring(cc, (char *) header[i].value.data,
                        header[i].value.len); /* stack: table key value */

                lua_rawset(cc, -3); /* stack: table */

            } else {
                if (! lua_istable(cc, -1)) { /* already inserted one value */
                    lua_createtable(cc, 4, 0);
                        /* stack: table key value table */

                    lua_insert(cc, -2); /* stack: table key table value */
                    lua_rawseti(cc, -2, 1); /* stack: table key table */

                    lua_pushlstring(cc, (char *) header[i].value.data,
                            header[i].value.len);
                        /* stack: table key table value */

                    lua_rawseti(cc, -2, lua_objlen(cc, -2) + 1);
                        /* stack: table key table */

                    lua_rawset(cc, -3); /* stack: table */

                } else {
                    lua_pushlstring(cc, (char *) header[i].value.data,
                            header[i].value.len);
                        /* stack: table key table value */

                    lua_rawseti(cc, -2, lua_objlen(cc, -2) + 1);
                        /* stack: table key table */

                    lua_pop(cc, 2); /* stack: table */
                }
            }
        }

        lua_setfield(cc, -2, "header");

        /*  }}} */
    }

    dd("free sr_statues/headers/bodies memory ASAP");

#if 1
    ngx_pfree(r->pool, ctx->sr_statuses);

    ctx->sr_statuses = NULL;
    ctx->sr_headers = NULL;
    ctx->sr_bodies = NULL;
#endif

    lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module);

    dd("about to run thread for %.*s...", (int) r->uri.len, r->uri.data);

    rc = ngx_http_lua_run_thread(lmcf->lua, r, ctx, ctx->nsubreqs);

    dd("already run thread for %.*s...", (int) r->uri.len, r->uri.data);

    if (rc == NGX_AGAIN || rc == NGX_DONE) {
        return NGX_DONE;
    }

    dd("entered content phase: %d", (int) ctx->entered_content_phase);

    if (ctx->entered_content_phase) {
        ngx_http_finalize_request(r, rc);
    }

    if (rc == NGX_OK) {
        return NGX_DECLINED;
    }

    return rc;

error:
    if (ctx->entered_content_phase) {
        ngx_http_finalize_request(r,
                ctx->headers_sent ? NGX_ERROR: NGX_HTTP_INTERNAL_SERVER_ERROR);
    }

    return NGX_ERROR;
}
static ngx_int_t
ngx_http_postpone_filter(ngx_http_request_t *r, ngx_chain_t *in)
{
    ngx_connection_t              *c;
    ngx_http_postponed_request_t  *pr;

    c = r->connection;

    ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0, "http postpone filter \"%V?%V\" %p", &r->uri, &r->args, in);

	//当前请求对象并不是最前面的,因此不能向out chain链写入(如果有)数据,
	//取而代之的是通过函数ngx_http_postpone_filter_add将其加入到当前请求的postponed链内。
    if (r != c->data)
	{
        if (in)
		{
            ngx_http_postpone_filter_add(r, in);
            return NGX_OK;
        }

#if 0
        /* TODO: SSI may pass NULL */
        ngx_log_error(NGX_LOG_ALERT, c->log, 0, "http postpone filter NULL inactive request");
#endif

        return NGX_OK;
    }

	//如果r->postponed为空,则说明是最后一个sub request,也就是最新的那个,因此需要将它先发送出去。  
    /* 到这里,表示当前请求可以往out chain发送数据,如果它的postponed链表中没有子请求,也没有数据,
       则直接发送当前产生的数据in或者继续发送out chain中之前没有发送完成的数据 */
    if (r->postponed == NULL) 
	{

        if (in || c->buffered) 
		{
            return ngx_http_next_body_filter(r->main, in);
        }
		//当前请求没有需要发送的数据
        return NGX_OK;
    }
	
	//当前请求是否有新数据产生,如果有的话需加到最后
	 /* 当前请求的postponed链表中之前就存在需要处理的节点,则新建一个节点,保存当前产生的数据in,
	   并将它插入到postponed队尾 */
    if (in) 
	{
        ngx_http_postpone_filter_add(r, in);
    }

	/* 处理postponed链表中的节点 */
    do 
	{
        pr = r->postponed;

	/* 如果该节点保存的是一个子请求,则将它加到主请求的posted_requests链表中,
           以便下次调用ngx_http_run_posted_requests函数,处理该子节点 */
        if (pr->request)
		{

            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http postpone filter wake \"%V?%V\"", &pr->request->uri, &pr->request->args);

            r->postponed = pr->next;

			/* 按照后序遍历产生的序列,因为当前请求(节点)有未处理的子请求(节点),
               必须先处理完改子请求,才能继续处理后面的子节点。
               这里将该子请求设置为可以往out chain发送数据的请求。  */
            c->data = pr->request;

            return ngx_http_post_request(pr->request, NULL);
        }
		  /* 如果该节点保存的是数据,可以直接处理该节点,将它发送到out chain */
        if (pr->out == NULL)
		{
            ngx_log_error(NGX_LOG_ALERT, c->log, 0, "http postpone filter NULL output");
        } 
		else 
		{
            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0, "http postpone filter output \"%V?%V\"", &r->uri, &r->args);

            if (ngx_http_next_body_filter(r->main, pr->out) == NGX_ERROR) 
			{
                return NGX_ERROR;
            }
        }

        r->postponed = pr->next;

    } 
	while (r->postponed);

    return NGX_OK;
}
ngx_int_t
ngx_http_srcache_access_handler(ngx_http_request_t *r)
{
    ngx_str_t                       skip;
    ngx_int_t                       rc;
    ngx_http_srcache_loc_conf_t    *conf;
    ngx_http_srcache_main_conf_t   *smcf;
    ngx_http_srcache_ctx_t         *ctx;
    ngx_chain_t                    *cl;
    size_t                          len;
    unsigned                        no_store;

    if (r != r->main) {
        return NGX_DECLINED;
    }

    /* being the main request */

    conf = ngx_http_get_module_loc_conf(r, ngx_http_srcache_filter_module);

    if (conf->fetch == NULL && conf->store == NULL) {
        dd("bypass: %.*s", (int) r->uri.len, r->uri.data);
        return NGX_DECLINED;
    }

    dd("store defined? %p", conf->store);

    dd("req method: %lu", (unsigned long) r->method);
    dd("cache methods: %lu", (unsigned long) conf->cache_methods);

    if (!(r->method & conf->cache_methods)) {
        ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                "srcache_fetch and srcache_store skipped due to request "
                "method %V", &r->method_name);

        return NGX_DECLINED;
    }

    if (conf->req_cache_control
        && ngx_http_srcache_request_no_cache(r, &no_store) == NGX_OK)
    {
        ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                "srcache_fetch skipped due to request headers "
                "\"Cache-Control: no-cache\" or \"Pragma: no-cache\"");

        if (!no_store) {
            /* register a ctx to give a chance to srcache_store to run */

            ctx = ngx_pcalloc(r->pool,
                    sizeof(ngx_http_srcache_filter_module));

            if (ctx == NULL) {
                return NGX_HTTP_INTERNAL_SERVER_ERROR;
            }

            ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module);

        } else {
            ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                    "srcache_store skipped due to request header "
                    "\"Cache-Control: no-store\"");
        }

        return NGX_DECLINED;
    }

    if (conf->fetch_skip != NULL
            && ngx_http_complex_value(r, conf->fetch_skip, &skip) == NGX_OK
            && skip.len
            && (skip.len != 1 || skip.data[0] != '0'))
    {
        ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                "srcache_fetch skipped due to the true value fed into "
                "srcache_fetch_skip: \"%V\"", &skip);

        /* register a ctx to give a chance to srcache_store to run */

        ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module));

        if (ctx == NULL) {
            return NGX_HTTP_INTERNAL_SERVER_ERROR;
        }

        ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module);

        return NGX_DECLINED;
    }

    ctx = ngx_http_get_module_ctx(r, ngx_http_srcache_filter_module);

    if (ctx != NULL) {
        /*
        if (ctx->fetch_error) {
            return NGX_DECLINED;
        }
        */

        if (ctx->waiting_subrequest) {
            dd("waiting subrequest");
            return NGX_AGAIN;
        }

        if (ctx->request_done) {
            dd("request done");

            if (
#if defined(nginx_version) && nginx_version >= 8012
                ngx_http_post_request(r, NULL)
#else
                ngx_http_post_request(r)
#endif
                    != NGX_OK)
            {
                return NGX_ERROR;
            }

            if (!ctx->from_cache) {
                return NGX_DECLINED;
            }

            dd("sending header");

            if (ctx->body_from_cache && !(r->method & NGX_HTTP_HEAD)) {
                len = 0;

                for (cl = ctx->body_from_cache; cl->next; cl = cl->next) {
                    len += ngx_buf_size(cl->buf);
                }

                len += ngx_buf_size(cl->buf);

                cl->buf->last_buf = 1;

                r->headers_out.content_length_n = len;

                rc = ngx_http_srcache_next_header_filter(r);

                if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) {
                    return rc;
                }

                rc = ngx_http_srcache_next_body_filter(r, ctx->body_from_cache);

                if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) {
                    return rc;
                }

                dd("sent body from cache: %d", (int) rc);

            } else {
                r->headers_out.content_length_n = 0;

                rc = ngx_http_srcache_next_header_filter(r);

                if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) {
                    return rc;
                }

                dd("sent header from cache: %d", (int) rc);

                dd("send last buf for the main request");

                cl = ngx_alloc_chain_link(r->pool);
                cl->buf = ngx_calloc_buf(r->pool);
                cl->buf->last_buf = 1;

                rc = ngx_http_srcache_next_body_filter(r, cl);

                if (rc == NGX_ERROR || rc >= NGX_HTTP_SPECIAL_RESPONSE) {
                    return rc;
                }

                dd("sent last buf from cache: %d", (int) rc);
            }

            dd("finalize from here...");
            ngx_http_finalize_request(r, NGX_OK);
            /* dd("r->main->count (post): %d", (int) r->main->count); */
            return NGX_DONE;
        }

    } else {
        ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_srcache_filter_module));

        if (ctx == NULL) {
            return NGX_HTTP_INTERNAL_SERVER_ERROR;
        }

        ngx_http_set_ctx(r, ctx, ngx_http_srcache_filter_module);
    }

    smcf = ngx_http_get_module_main_conf(r, ngx_http_srcache_filter_module);

    if (! smcf->postponed_to_access_phase_end) {
        ngx_http_core_main_conf_t       *cmcf;
        ngx_http_phase_handler_t         tmp;
        ngx_http_phase_handler_t        *ph;
        ngx_http_phase_handler_t        *cur_ph;
        ngx_http_phase_handler_t        *last_ph;

        smcf->postponed_to_access_phase_end = 1;

        cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module);

        ph = cmcf->phase_engine.handlers;
        cur_ph = &ph[r->phase_handler];
        last_ph = &ph[cur_ph->next - 1];

        if (cur_ph < last_ph) {
            dd("swaping the contents of cur_ph and last_ph...");

            tmp = *cur_ph;

            memmove(cur_ph, cur_ph + 1,
                (last_ph - cur_ph) * sizeof (ngx_http_phase_handler_t));

            *last_ph = tmp;

            r->phase_handler--; /* redo the current ph */

            return NGX_DECLINED;
        }
    }

    if (conf->fetch == NULL) {
        dd("fetch is not defined");
        return NGX_DECLINED;
    }

    dd("running phase handler...");

    /* issue a subrequest to fetch cached stuff (if any) */

    rc = ngx_http_srcache_fetch_subrequest(r, conf, ctx);

    if (rc != NGX_OK) {
        return rc;
    }

    ctx->waiting_subrequest = 1;

    dd("quit");

    return NGX_AGAIN;
}
static ngx_int_t
ngx_http_postpone_filter(ngx_http_request_t *r, ngx_chain_t *in)
{
    ngx_connection_t              *c;
    ngx_http_postponed_request_t  *pr;

    c = r->connection;							//[p]获取当前的连接对象

    ngx_log_debug3(NGX_LOG_DEBUG_HTTP, c->log, 0,
                   "http postpone filter \"%V?%V\" %p", &r->uri, &r->args, in);

    if (r != c->data) {							//[p]r不是当前正在处理的请求

        if (in) {
            ngx_http_postpone_filter_add(r, in);//[p]数据加入到父请求链表末尾
            return NGX_OK;						//[p]直接结束过滤链表
        }

#if 0
        /* TODO: SSI may pass NULL */
        ngx_log_error(NGX_LOG_ALERT, c->log, 0,
                      "http postpone filter NULL inactive request",
                      &r->uri, &r->args);
#endif

        return NGX_OK;
    }

    if (r->postponed == NULL) {				//[p]没有需要延后发送的数据

        if (in || c->buffered) {			//[p]使用主请求发送数据
            return ngx_http_next_filter(r->main, in);
        }

        return NGX_OK;
    }

    if (in) {								//[p]有需要延后发送的数据
        ngx_http_postpone_filter_add(r, in);//[p]将数据加入到父请求链表末尾
    }

    do {									//[p]循环处理现有的待发数据
        pr = r->postponed;					//[p]获取头结点

        if (pr->request) {					//[p]节点是子请求

            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
                           "http postpone filter wake \"%V?%V\"",
                           &pr->request->uri, &pr->request->args);

            r->postponed = pr->next;		//[p]移除头结点

            c->data = pr->request;			//[p]置为连接处理的当前请求

            return ngx_http_post_request(pr->request, NULL);//[p]加入待处理请求链表
        }

        if (pr->out == NULL) {
            ngx_log_error(NGX_LOG_ALERT, c->log, 0,
                          "http postpone filter NULL output",
                          &r->uri, &r->args);

        } else {
            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
                           "http postpone filter output \"%V?%V\"",
                           &r->uri, &r->args);

            if (ngx_http_next_filter(r->main, pr->out) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

        r->postponed = pr->next;	//[p]处理下一个节点

    } while (r->postponed);			//[p]直到所有数据节点处理完毕

    return NGX_OK;
}
void
ngx_http_lua_wev_handler(ngx_http_request_t *r)
{
    ngx_int_t                    rc;
    ngx_http_lua_ctx_t          *ctx;
    ngx_http_lua_main_conf_t    *lmcf;
    lua_State                   *cc;
    ngx_chain_t                    *cl;
    size_t                       len;
    u_char                      *pos, *last;

    ctx = ngx_http_get_module_ctx(r, ngx_http_lua_module);
    if (ctx == NULL) {
        goto error;
    }

    dd("request done: %d", (int) r->done);
    dd("cleanup done: %p", ctx->cleanup);

#if 1
    if (ctx->cleanup == NULL) {
        return;
    }
#endif

    if (ctx->waiting && ! ctx->done) {
        if (r->main->posted_requests
                && r->main->posted_requests->request != r)
        {
            dd("postpone our wev handler");

#if defined(nginx_version) && nginx_version >= 8012
            ngx_http_post_request(r, NULL);
#else
            ngx_http_post_request(r);
#endif

            return;
        }
    }

    ctx->done = 0;

    len = 0;
    for (cl = ctx->sr_body; cl; cl = cl->next) {
        /*  ignore all non-memory buffers */
        len += cl->buf->last - cl->buf->pos;
    }

    if (len == 0) {
        pos = NULL;

    } else {
        last = pos = ngx_palloc(r->pool, len);
        if (pos == NULL) {
            goto error;
        }

        for (cl = ctx->sr_body; cl; cl = cl->next) {
            /*  ignore all non-memory buffers */
            last = ngx_copy(last, cl->buf->pos, cl->buf->last - cl->buf->pos);
        }
    }

    cc = ctx->cc;

    /*  {{{ construct ret value */
    lua_newtable(cc);

    /*  copy captured status */
    lua_pushinteger(cc, ctx->sr_status);
    lua_setfield(cc, -2, "status");

    /*  copy captured body */
    lua_pushlstring(cc, (const char*)pos, len);
    lua_setfield(cc, -2, "body");
    /*  }}} */

    lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module);

    dd("about to run thread...");

    rc = ngx_http_lua_run_thread(lmcf->lua, r, ctx, 1);

    dd("already run thread...");

    if (rc == NGX_AGAIN || rc == NGX_DONE) {
        ctx->waiting = 1;
        ctx->done = 0;

    } else {
        ctx->waiting = 0;
        ctx->done = 1;

        ngx_http_finalize_request(r, rc);
    }

    return;

error:
    ngx_http_finalize_request(r,
            ctx->headers_sent ? NGX_ERROR: NGX_HTTP_INTERNAL_SERVER_ERROR);

    return;
}