h2_task_input *h2_task_input_create(h2_task *task, apr_pool_t *pool, apr_bucket_alloc_t *bucket_alloc) { h2_task_input *input = apr_pcalloc(pool, sizeof(h2_task_input)); if (input) { input->task = task; input->bb = NULL; if (task->serialize_headers) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, "h2_task_input(%s): serialize request %s %s", task->id, task->request->method, task->request->path); input->bb = apr_brigade_create(pool, bucket_alloc); apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n", task->request->method, task->request->path); apr_table_do(ser_header, input, task->request->headers, NULL); apr_brigade_puts(input->bb, NULL, NULL, "\r\n"); if (input->task->input_eos) { APR_BRIGADE_INSERT_TAIL(input->bb, apr_bucket_eos_create(bucket_alloc)); } } else if (!input->task->input_eos) { input->bb = apr_brigade_create(pool, bucket_alloc); } else { /* We do not serialize and have eos already, no need to * create a bucket brigade. */ } } return input; }
apr_status_t h2_to_h1_add_data(h2_to_h1 *to_h1, const char *data, size_t len) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, h2_mplx_get_conn(to_h1->m), "h2_to_h1(%ld-%d): add %ld data bytes", h2_mplx_get_id(to_h1->m), to_h1->stream_id, (long)len); if (to_h1->chunked) { /* if input may have a body and we have not seen any * content-length header, we need to chunk the input data. */ apr_status_t status = apr_brigade_printf(to_h1->bb, NULL, NULL, "%lx\r\n", len); if (status == APR_SUCCESS) { status = h2_to_h1_add_data_raw(to_h1, data, len); if (status == APR_SUCCESS) { status = apr_brigade_puts(to_h1->bb, NULL, NULL, "\r\n"); } } return status; } else { to_h1->remain_len -= len; if (to_h1->remain_len < 0) { ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, h2_mplx_get_conn(to_h1->m), "h2_to_h1(%ld-%d): got %ld more content bytes than announced " "in content-length header: %ld", h2_mplx_get_id(to_h1->m), to_h1->stream_id, (long)to_h1->content_len, -(long)to_h1->remain_len); } return h2_to_h1_add_data_raw(to_h1, data, len); } }
static int dump_table(void *data, const char *key, const char *value) { struct ctx_t *ctx = (struct ctx_t *)data; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, ctx->r, "%s => %s", key, value); apr_brigade_printf(ctx->bb,NULL,NULL,"\t%s => %s\n", key, value); return 1; }
int output_dirsync_status(music_query_t* music_query){ int i = 0; apr_bucket_brigade* output_bb = music_query->output->bucket_brigade; apr_table_add(music_query->output->headers,"Access-Control-Allow-Origin", "*"); apr_cpystrn((char*)music_query->output->content_type, "application/json", 255); apr_brigade_puts(music_query->output->bucket_brigade, NULL,NULL, "{\n"); //Print Status if(music_query->globals->music_dirs != NULL){ apr_brigade_puts(output_bb, NULL,NULL,"\t\"dir_sync_status\" : {\n"); for(i = 0; i < music_query->globals->music_dirs->nelts; i++){ dir_t* dir = &(((dir_t*)music_query->globals->music_dirs->elts)[i]); apr_brigade_printf(output_bb, NULL, NULL, "\t\t\"%s\" : {\n",dir->path); apr_brigade_printf(output_bb, NULL,NULL, "\t\t\"Progress\" : \"%.2f\",\n",dir->stats->sync_progress); apr_brigade_printf(output_bb, NULL,NULL, "\t\t\"Files Scanned\" : \"%d\"\n", dir->stats->files_scanned); apr_brigade_printf(output_bb, NULL, NULL, "\t\t}"); if(i < (music_query->globals->music_dirs->nelts - 1)){ apr_brigade_printf(output_bb, NULL, NULL, ","); } } apr_brigade_puts(output_bb, NULL,NULL,"\t},\n"); } apr_brigade_puts(output_bb, NULL,NULL,"\t\"db_status\" : "); output_db_result_json(music_query->results,music_query->db_query,music_query->output); apr_brigade_puts(output_bb, NULL,NULL,"\n,"); print_error_messages(music_query->pool,output_bb, music_query->error_messages); apr_brigade_puts(output_bb, NULL,NULL,"\n}\n"); return 0; }
static apr_status_t input_add_data(h2_stream *stream, const char *data, size_t len, int chunked) { apr_status_t status = APR_SUCCESS; if (chunked) { status = apr_brigade_printf(stream->bbin, input_flush, stream, "%lx\r\n", (unsigned long)len); if (status == APR_SUCCESS) { status = apr_brigade_write(stream->bbin, input_flush, stream, data, len); if (status == APR_SUCCESS) { status = apr_brigade_puts(stream->bbin, input_flush, stream, "\r\n"); } } } else { status = apr_brigade_write(stream->bbin, input_flush, stream, data, len); } return status; }
h2_task_input *h2_task_input_create(h2_task_env *env, apr_pool_t *pool, apr_bucket_alloc_t *bucket_alloc) { h2_task_input *input = apr_pcalloc(pool, sizeof(h2_task_input)); if (input) { input->env = env; input->bb = NULL; if (env->serialize_headers) { input->bb = apr_brigade_create(pool, bucket_alloc); apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n", env->method, env->path); apr_table_do(ser_header, input, env->headers, NULL); apr_brigade_puts(input->bb, NULL, NULL, "\r\n"); if (input->env->input_eos) { APR_BRIGADE_INSERT_TAIL(input->bb, apr_bucket_eos_create(bucket_alloc)); } } else if (!input->env->input_eos) { input->bb = apr_brigade_create(pool, bucket_alloc); } else { /* We do not serialize and have eos already, no need to * create a bucket brigade. */ } if (APLOGcdebug(&env->c)) { char buffer[1024]; apr_size_t len = sizeof(buffer)-1; if (input->bb) { apr_brigade_flatten(input->bb, buffer, &len); } buffer[len] = 0; ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, &env->c, "h2_task_input(%s): request is: %s", env->id, buffer); } } return input; }
static int input_ser_header(void *ctx, const char *name, const char *value) { h2_task *task = ctx; apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value); return 1; }
apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) { conn_rec *c; ap_assert(task); c = task->c; task->worker_started = 1; task->started_at = apr_time_now(); if (c->master) { /* Each conn_rec->id is supposed to be unique at a point in time. Since * some modules (and maybe external code) uses this id as an identifier * for the request_rec they handle, it needs to be unique for slave * connections also. * The connection id is generated by the MPM and most MPMs use the formula * id := (child_num * max_threads) + thread_num * which means that there is a maximum id of about * idmax := max_child_count * max_threads * If we assume 2024 child processes with 2048 threads max, we get * idmax ~= 2024 * 2048 = 2 ** 22 * On 32 bit systems, we have not much space left, but on 64 bit systems * (and higher?) we can use the upper 32 bits without fear of collision. * 32 bits is just what we need, since a connection can only handle so * many streams. */ int slave_id, free_bits; task->id = apr_psprintf(task->pool, "%ld-%d", c->master->id, task->stream_id); if (sizeof(unsigned long) >= 8) { free_bits = 32; slave_id = task->stream_id; } else { /* Assume we have a more limited number of threads/processes * and h2 workers on a 32-bit system. Use the worker instead * of the stream id. */ free_bits = 8; slave_id = worker_id; } task->c->id = (c->master->id << free_bits)^slave_id; c->keepalive = AP_CONN_KEEPALIVE; } h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output", H2_BEAM_OWNER_SEND, 0, task->timeout); if (!task->output.beam) { return APR_ENOMEM; } h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer); h2_beam_send_from(task->output.beam, task->pool); h2_ctx_create_for(c, task); apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id); if (task->input.beam) { h2_beam_mutex_enable(task->input.beam); } h2_slave_run_pre_connection(c, ap_get_conn_socket(c)); task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc); if (task->request->serialize) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_task(%s): serialize request %s %s", task->id, task->request->method, task->request->path); apr_brigade_printf(task->input.bb, NULL, NULL, "%s %s HTTP/1.1\r\n", task->request->method, task->request->path); apr_table_do(input_ser_header, task, task->request->headers, NULL); apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n"); } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_task(%s): process connection", task->id); task->c->current_thread = thread; ap_run_process_connection(c); if (task->frozen) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_task(%s): process_conn returned frozen task", task->id); /* cleanup delayed */ return APR_EAGAIN; } else { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_task(%s): processing done", task->id); return output_finish(task); } }
static int ser_header(void *ctx, const char *name, const char *value) { h2_task_input *input = (h2_task_input*)ctx; apr_brigade_printf(input->bb, NULL, NULL, "%s: %s\r\n", name, value); return 1; }