static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, const char *protocol) { int found = 0; const char **protos = h2_h2_is_tls(c)? h2_tls_protos : h2_clear_protos; const char **p = protos; (void)s; while (*p) { if (!strcmp(*p, protocol)) { found = 1; break; } p++; } if (found) { h2_ctx *ctx = h2_ctx_get(c); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "switching protocol to '%s'", protocol); h2_ctx_protocol_set(ctx, protocol); h2_ctx_server_set(ctx, s); if (r != NULL) { apr_status_t status; /* Switching in the middle of a request means that * we have to send out the response to this one in h2 * format. So we need to take over the connection * right away. */ ap_remove_input_filter_byhandle(r->input_filters, "http_in"); ap_remove_input_filter_byhandle(r->input_filters, "reqtimeout"); ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); /* Ok, start an h2_conn on this one. */ status = h2_conn_process(r->connection, r, r->server); if (status != DONE) { /* Nothing really to do about this. */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, "session proessed, unexpected status"); } } return DONE; } return DECLINED; }
apr_status_t h2_task_do(h2_task *task, h2_worker *worker) { apr_status_t status = APR_SUCCESS; h2_config *cfg = h2_config_get(task->mplx->c); h2_task_env env; AP_DEBUG_ASSERT(task); memset(&env, 0, sizeof(env)); env.id = task->id; env.stream_id = task->stream_id; env.mplx = task->mplx; task->mplx = NULL; env.input_eos = task->input_eos; env.serialize_headers = !!h2_config_geti(cfg, H2_CONF_SER_HEADERS); /* Create a subpool from the worker one to be used for all things * with life-time of this task_env execution. */ apr_pool_create(&env.pool, h2_worker_get_pool(worker)); /* Link the env to the worker which provides useful things such * as mutex, a socket etc. */ env.io = h2_worker_get_cond(worker); /* Clone fields, so that lifetimes become (more) independent. */ env.method = apr_pstrdup(env.pool, task->method); env.path = apr_pstrdup(env.pool, task->path); env.authority = apr_pstrdup(env.pool, task->authority); env.headers = apr_table_clone(env.pool, task->headers); /* Setup the pseudo connection to use our own pool and bucket_alloc */ if (task->c) { env.c = *task->c; task->c = NULL; status = h2_conn_setup(&env, worker); } else { status = h2_conn_init(&env, worker); } /* save in connection that this one is a pseudo connection, prevents * other hooks from messing with it. */ h2_ctx_create_for(&env.c, &env); if (status == APR_SUCCESS) { env.input = h2_task_input_create(&env, env.pool, env.c.bucket_alloc); env.output = h2_task_output_create(&env, env.pool, env.c.bucket_alloc); status = h2_conn_process(&env.c, h2_worker_get_socket(worker)); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, &env.c, "h2_task(%s): processing done", env.id); } else { ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, &env.c, "h2_task(%s): error setting up h2_task_env", env.id); } if (env.input) { h2_task_input_destroy(env.input); env.input = NULL; } if (env.output) { h2_task_output_close(env.output); h2_task_output_destroy(env.output); env.output = NULL; } h2_task_set_finished(task); if (env.io) { apr_thread_cond_signal(env.io); } if (env.pool) { apr_pool_destroy(env.pool); env.pool = NULL; } if (env.c.id) { h2_conn_post(&env.c, worker); } h2_mplx_task_done(env.mplx, env.stream_id); return status; }
int h2_h2_process_conn(conn_rec* c) { h2_ctx *ctx = h2_ctx_get(c); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn"); if (h2_ctx_is_task(ctx)) { /* our stream pseudo connection */ return DECLINED; } if (h2_ctx_protocol_get(c)) { /* Something has been negotiated */ } else if (!strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c)) && h2_allows_h2_direct(c) && h2_is_acceptable_connection(c, 1)) { /* connection still is on http/1.1 and H2Direct is enabled. * Otherwise connection is in a fully acceptable state. * -> peek at the first 24 incoming bytes */ apr_bucket_brigade *temp; apr_status_t status; char *s = NULL; apr_size_t slen; temp = apr_brigade_create(c->pool, c->bucket_alloc); status = ap_get_brigade(c->input_filters, temp, AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, "h2_h2, error reading 24 bytes speculative"); apr_brigade_destroy(temp); return DECLINED; } apr_brigade_pflatten(temp, &s, &slen, c->pool); if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, direct mode detected"); h2_ctx_protocol_set(ctx, h2_h2_is_tls(c)? "h2" : "h2c"); } else { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, not detected in %d bytes: %s", (int)slen, s); } apr_brigade_destroy(temp); } else { /* the connection is not HTTP/1.1 or not for us, don't touch it */ return DECLINED; } /* If "h2" was selected as protocol (by whatever mechanism), take over * the connection. */ if (h2_ctx_is_active(ctx)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, connection, h2 active"); return h2_conn_process(c, NULL, ctx->server); } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined"); return DECLINED; }
apr_status_t h2_task_do(h2_task *task, h2_worker *worker) { apr_status_t status = APR_SUCCESS; h2_config *cfg = h2_config_get(task->mplx->c); h2_task_env env; AP_DEBUG_ASSERT(task); memset(&env, 0, sizeof(env)); env.id = task->id; env.stream_id = task->stream_id; env.mplx = task->mplx; /* Not cloning these task fields: * If the stream is destroyed before the task is done, this might * be a problem. However that should never happen as stream destruction * explicitly checks if task processing has finished. */ env.method = task->method; env.path = task->path; env.authority = task->authority; env.headers = task->headers; env.input_eos = task->input_eos; task->io = env.io = h2_worker_get_cond(worker); env.conn = task->conn; task->conn = NULL; env.serialize_headers = !!h2_config_geti(cfg, H2_CONF_SER_HEADERS); status = h2_conn_prep(env.conn, task->mplx->c, worker); /* save in connection that this one is for us, prevents * other hooks from messing with it. */ h2_ctx_create_for(env.conn->c, &env); if (status == APR_SUCCESS) { apr_pool_t *pool = env.conn->pool; apr_bucket_alloc_t *bucket_alloc = env.conn->bucket_alloc; env.input = h2_task_input_create(&env, pool, bucket_alloc); env.output = h2_task_output_create(&env, pool, bucket_alloc); status = h2_conn_process(env.conn); } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, env.conn->c, "h2_task(%s):processing done", task->id); if (env.input) { h2_task_input_destroy(env.input); env.input = NULL; } if (env.conn) { h2_conn_post(env.conn, worker); env.conn = NULL; } if (env.output) { h2_task_output_close(env.output); h2_task_output_destroy(env.output); env.output = NULL; } h2_task_set_finished(task); if (env.io) { apr_thread_cond_signal(env.io); } return status; }
apr_status_t h2_task_do(h2_task *task, h2_worker *worker) { apr_status_t status = APR_SUCCESS; h2_config *cfg = h2_config_get(task->mplx->c); AP_DEBUG_ASSERT(task); task->serialize_headers = h2_config_geti(cfg, H2_CONF_SER_HEADERS); /* Create a subpool from the worker one to be used for all things * with life-time of this task execution. */ apr_pool_create(&task->pool, h2_worker_get_pool(worker)); /* Link the task to the worker which provides useful things such * as mutex, a socket etc. */ task->io = h2_worker_get_cond(worker); status = h2_conn_setup(task, worker); /* save in connection that this one is a pseudo connection, prevents * other hooks from messing with it. */ h2_ctx_create_for(task->c, task); if (status == APR_SUCCESS) { task->input = h2_task_input_create(task, task->pool, task->c->bucket_alloc); task->output = h2_task_output_create(task, task->pool, task->c->bucket_alloc); status = h2_conn_process(task->c, h2_worker_get_socket(worker)); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, task->c, "h2_task(%s): processing done", task->id); } else { ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, task->c, APLOGNO(02957) "h2_task(%s): error setting up h2_task", task->id); } if (task->input) { h2_task_input_destroy(task->input); task->input = NULL; } if (task->output) { h2_task_output_close(task->output); h2_task_output_destroy(task->output); task->output = NULL; } if (task->io) { apr_thread_cond_signal(task->io); } if (task->pool) { apr_pool_destroy(task->pool); task->pool = NULL; } if (task->c->id) { h2_conn_post(task->c, worker); } h2_mplx_task_done(task->mplx, task->stream_id); return status; }