Exemple #1
0
static int
php_apache_sapi_read_post(char *buf, uint count_bytes)
{
	apr_size_t len;
	php_struct *ctx = SG(server_context);
	apr_bucket_brigade *brigade;
	apr_bucket *partition;

	brigade = ctx->post_data;
	len = count_bytes;

	switch (apr_brigade_partition(ctx->post_data, count_bytes, &partition)) {
	case APR_SUCCESS:
		apr_brigade_flatten(ctx->post_data, buf, &len);
		brigade = apr_brigade_split(ctx->post_data, partition);
		apr_brigade_destroy(ctx->post_data);
		ctx->post_data = brigade;
		break;
	case APR_INCOMPLETE:
		apr_brigade_flatten(ctx->post_data, buf, &len);
		apr_brigade_cleanup(ctx->post_data);
		break;
	}

	return len;
}
Exemple #2
0
APU_DECLARE(apr_status_t) apr_brigade_pflatten(apr_bucket_brigade *bb,
                                               char **c,
                                               apr_size_t *len,
                                               apr_pool_t *pool)
{
    apr_off_t actual;
    apr_size_t total;
    apr_status_t rv;

    apr_brigade_length(bb, 1, &actual);
    
    /* XXX: This is dangerous beyond belief.  At least in the
     * apr_brigade_flatten case, the user explicitly stated their
     * buffer length - so we don't up and palloc 4GB for a single
     * file bucket.  This API must grow a useful max boundry,
     * either compiled-in or preset via the *len value.
     *
     * Shouldn't both fn's grow an additional return value for 
     * the case that the brigade couldn't be flattened into the
     * provided or allocated buffer (such as APR_EMOREDATA?)
     * Not a failure, simply an advisory result.
     */
    total = (apr_size_t)actual;

    *c = apr_palloc(pool, total);
    
    rv = apr_brigade_flatten(bb, *c, &total);

    if (rv != APR_SUCCESS) {
        return rv;
    }

    *len = total;
    return APR_SUCCESS;
}
Params *getPostParms(request_rec *r, apr_off_t * postSize) {
    apr_array_header_t *pairs = NULL;
    apr_off_t len;
    apr_size_t size;
    int res;
    int i = 0;
    char *buffer;
    Params *params = NULL;
    
    res = ap_parse_form_data(r, NULL, &pairs, -1, HUGE_STRING_LEN);
    if (res != OK || !pairs) return NULL; 
    params = apr_pcalloc(r->pool, sizeof(Params) * (pairs->nelts + 1));
    while (pairs && !apr_is_empty_array(pairs)) {
        ap_form_pair_t *pair = (ap_form_pair_t *) apr_array_pop(pairs);
        apr_brigade_length(pair->value, 1, &len);
        size = (apr_size_t) len;
        buffer = apr_palloc(r->pool, size + 1);
        apr_brigade_flatten(pair->value, buffer, &size);
        buffer[len] = 0;
        params[i].key = apr_pstrdup(r->pool, pair->name);
        params[i].val = buffer;
        params[i].length = strlen(buffer);
        //ap_rprintf(r,"key : val : len: is %s : %s : %d ===", params[i].key, params[i].val, params[i].length);
        i++;
    }
    *postSize = i;

    return params;
}
Exemple #4
0
static int brigade_flatten(lua_State*L)
{
	apr_bucket_brigade *bb = (apr_bucket_brigade*)CHECK_BUCKETBRIGADE_OBJECT(1);
	apr_off_t off = 0;
	apr_status_t rc = apr_brigade_length(bb, 1, &off);
	apr_size_t len = (apr_size_t)off;
	
	if(rc==APR_SUCCESS)
	{
		char* buf = apr_bucket_alloc(len, bb->bucket_alloc);
		rc = apr_brigade_flatten(bb, buf, &len);
		if(rc==APR_SUCCESS)
		{
			lua_pushlstring(L,buf, len);
		}else
		{
			lua_pushnil(L);
		}
		apr_bucket_free(buf);
	}else
	{
		lua_pushnil(L);
	}
	lua_pushinteger(L,rc);
	return 2;
}
static llzr_conn* get_post_data(request_rec* r)
{
    GTable *request_post_data = g_hash_table_new(g_str_hash, g_str_equal);
    apr_array_header_t *pairs = NULL;
    apr_off_t len;
    apr_size_t size;
    int res;
    char *buffer;

    res = ap_parse_form_data(r, NULL, &pairs, -1, HUGE_STRING_LEN);
    if (res != OK || !pairs) return NULL; /* Return NULL if we failed or if there are is no POST data */

    while (pairs && !apr_is_empty_array(pairs)) {
        ap_form_pair_t *pair = (ap_form_pair_t *) apr_array_pop(pairs);
        apr_brigade_length(pair->value, 1, &len);
        size = (apr_size_t) len;
        buffer = apr_palloc(r->pool, size + 1);
        apr_brigade_flatten(pair->value, buffer, &size);
        buffer[len] = 0;

        g_hash_table_insert( request_post_data, apr_pstrdup(r->pool, pair->name), buffer);
    }

    return request_post_data;
}
Exemple #6
0
static apr_size_t
php_apache_sapi_read_post(char *buf, size_t count_bytes)
{
	apr_size_t len, tlen=0;
	php_struct *ctx = SG(server_context);
	request_rec *r;
	apr_bucket_brigade *brigade;

	r = ctx->r;
	brigade = ctx->brigade;
	len = count_bytes;

	/*
	 * This loop is needed because ap_get_brigade() can return us partial data
	 * which would cause premature termination of request read. Therefor we
	 * need to make sure that if data is available we fill the buffer completely.
	 */

	while (ap_get_brigade(r->input_filters, brigade, AP_MODE_READBYTES, APR_BLOCK_READ, len) == APR_SUCCESS) {
		apr_brigade_flatten(brigade, buf, &len);
		apr_brigade_cleanup(brigade);
		tlen += len;
		if (tlen == count_bytes || !len) {
			break;
		}
		buf += len;
		len = count_bytes - tlen;
	}

	return tlen;
}
Exemple #7
0
static void bind_post(int *count, lily_parse_state *parser, request_rec *r)
{
    if (*count == -1)
        return;

    lily_var *post_var = bind_hash_str_str_var(parser->symtab, "post");
    lily_hash_val *hash_val = post_var->value.hash;

    apr_array_header_t *pairs;
    apr_off_t len;
    apr_size_t size;
    char *buffer;
    char *sipkey = parser->vm->sipkey;
    lily_class *string_cls = lily_class_by_id(parser->symtab, SYM_CLASS_STRING);
    lily_sig *string_sig = string_cls->sig;

    /* Credit: I found out how to use this by reading httpd 2.4's mod_lua
       (specifically req_parsebody of lua_request.c). */
    int res = ap_parse_form_data(r, NULL, &pairs, -1, 1024 * 8);
    if (res == OK) {
        while (pairs && !apr_is_empty_array(pairs)) {
            ap_form_pair_t *pair = (ap_form_pair_t *) apr_array_pop(pairs);
            apr_brigade_length(pair->value, 1, &len);
            size = (apr_size_t) len;
            buffer = lily_malloc(size + 1);
            if (buffer == NULL) {
                *count = -1;
                return;
            }

            apr_brigade_flatten(pair->value, buffer, &size);
            buffer[len] = 0;

            lily_value *elem_key = bind_string(string_sig, pair->name);
            /* Give the buffer to the value to save memory. */
            lily_value *elem_value = bind_string_and_buffer(string_sig, buffer);
            lily_hash_elem *new_elem = bind_hash_elem_with_values(sipkey,
                    elem_key, elem_value);

            if (elem_key == NULL || elem_value == NULL || new_elem == NULL) {
                lily_free(new_elem);
                deref_destroy_value(elem_key);
                deref_destroy_value(elem_value);
                *count = -1;
                return;
            }

            new_elem->next = hash_val->elem_chain;
            hash_val->elem_chain = new_elem;
        }
    }

    (*count)++;
}
Exemple #8
0
/**
var post: Hash[String, Tainted[String]]

This contains key+value pairs that were sent to the server as POST variables.
Any pair that has a key or a value that is not valid utf-8 will not be present.
*/
static lily_value *load_var_post(lily_options *options, uint16_t *unused)
{
    lily_value *v = lily_new_empty_value();
    lily_move_hash_f(MOVE_DEREF_NO_GC, v, lily_new_hash_val());
    lily_hash_val *hash_val = v->value.hash;
    request_rec *r = (request_rec *)options->data;

    apr_array_header_t *pairs;
    apr_off_t len;
    apr_size_t size;
    char *buffer;

    /* Credit: I found out how to use this by reading httpd 2.4's mod_lua
       (specifically req_parsebody of lua_request.c). */
    int res = ap_parse_form_data(r, NULL, &pairs, -1, 1024 * 8);
    if (res == OK) {
        while (pairs && !apr_is_empty_array(pairs)) {
            ap_form_pair_t *pair = (ap_form_pair_t *) apr_array_pop(pairs);
            if (lily_is_valid_utf8(pair->name) == 0)
                continue;

            apr_brigade_length(pair->value, 1, &len);
            size = (apr_size_t) len;
            buffer = lily_malloc(size + 1);

            if (lily_is_valid_utf8(buffer) == 0) {
                lily_free(buffer);
                continue;
            }

            apr_brigade_flatten(pair->value, buffer, &size);
            buffer[len] = 0;

            lily_value *elem_key = lily_new_string(pair->name);
            /* Give the buffer to the value to save memory. */
            lily_value *elem_raw_value = lily_new_string_take(buffer);
            lily_value *elem_value = bind_tainted_of(elem_raw_value);

            apache_add_unique_hash_entry(options->sipkey, hash_val, elem_key,
                    elem_value);
        }
    }

    return v;
}
Exemple #9
0
/* tests that 'bb' flattens to string 'expect'. */
static void flatten_match(abts_case *tc, const char *ctx,
                          apr_bucket_brigade *bb,
                          const char *expect)
{
    apr_size_t elen = strlen(expect);
    char *buf = malloc(elen);
    apr_size_t len = elen;
    char msg[200];

    sprintf(msg, "%s: flatten brigade", ctx);
    apr_assert_success(tc, msg, apr_brigade_flatten(bb, buf, &len));
    sprintf(msg, "%s: length match (%ld not %ld)", ctx,
            (long)len, (long)elen);
    ABTS_ASSERT(tc, msg, len == elen);
    sprintf(msg, "%s: result match", msg);
    ABTS_STR_NEQUAL(tc, expect, buf, len);
    free(buf);
}
Exemple #10
0
static void
read_post_data(sl_vm_t* vm, sl_request_opts_t* opts, request_rec* r)
{
    apr_bucket_brigade* brigade = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    size_t len = 1024;
    opts->post_length = 0;
    opts->post_data = NULL;
    while(ap_get_brigade(r->input_filters, brigade, AP_MODE_READBYTES, APR_BLOCK_READ, len) == APR_SUCCESS) {
        opts->post_data = sl_realloc(vm->arena, opts->post_data, opts->post_length + len);
        apr_brigade_flatten(brigade, opts->post_data + opts->post_length, &len);
        apr_brigade_cleanup(brigade);
        opts->post_length += len;
        if(!len) {
            break;
        }
        len = 1024;
    }
}
Exemple #11
0
static apr_size_t
php_apache_sapi_read_post(char *buf, size_t count_bytes)
{
	apr_size_t len, tlen=0;
	php_struct *ctx = SG(server_context);
	request_rec *r;
	apr_bucket_brigade *brigade;
	apr_status_t ret;

	r = ctx->r;
	brigade = ctx->brigade;
	len = count_bytes;

	/*
	 * This loop is needed because ap_get_brigade() can return us partial data
	 * which would cause premature termination of request read. Therefor we
	 * need to make sure that if data is available we fill the buffer completely.
	 */

	while ((ret=ap_get_brigade(r->input_filters, brigade, AP_MODE_READBYTES, APR_BLOCK_READ, len)) == APR_SUCCESS) {
		apr_brigade_flatten(brigade, buf, &len);
		apr_brigade_cleanup(brigade);
		tlen += len;
		if (tlen == count_bytes || !len) {
			break;
		}
		buf += len;
		len = count_bytes - tlen;
	}

	if (ret != APR_SUCCESS) {
		if (APR_STATUS_IS_TIMEUP(ret)) {
			SG(sapi_headers).http_response_code = php_ap_map_http_request_error(ret, HTTP_REQUEST_TIME_OUT);
		} else {
			SG(sapi_headers).http_response_code = php_ap_map_http_request_error(ret, HTTP_BAD_REQUEST);
		}
	}

	return tlen;
}
Exemple #12
0
h2_task_input *h2_task_input_create(h2_task_env *env, apr_pool_t *pool, 
                                    apr_bucket_alloc_t *bucket_alloc)
{
    h2_task_input *input = apr_pcalloc(pool, sizeof(h2_task_input));
    if (input) {
        input->env = env;
        input->bb = NULL;
        
        if (env->serialize_headers) {
            input->bb = apr_brigade_create(pool, bucket_alloc);
            apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n", 
                               env->method, env->path);
            apr_table_do(ser_header, input, env->headers, NULL);
            apr_brigade_puts(input->bb, NULL, NULL, "\r\n");
            if (input->env->input_eos) {
                APR_BRIGADE_INSERT_TAIL(input->bb, apr_bucket_eos_create(bucket_alloc));
            }
        }
        else if (!input->env->input_eos) {
            input->bb = apr_brigade_create(pool, bucket_alloc);
        }
        else {
            /* We do not serialize and have eos already, no need to
             * create a bucket brigade. */
        }
        
        if (APLOGcdebug(&env->c)) {
            char buffer[1024];
            apr_size_t len = sizeof(buffer)-1;
            if (input->bb) {
                apr_brigade_flatten(input->bb, buffer, &len);
            }
            buffer[len] = 0;
            ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, &env->c,
                          "h2_task_input(%s): request is: %s", 
                          env->id, buffer);
        }
    }
    return input;
}
Exemple #13
0
char* parse_form_from_POST(request_rec *r, int *in_size_) {
    char *buf;
    int in_size;
    apr_status_t rv;
    apr_bucket_brigade *bbin;
    apr_size_t bbin_size;
    const char *clen = apr_table_get(r->headers_in, "Content-Length");
    if (clen != NULL) {
        in_size = strtol(clen, NULL, 0);
        if (in_size >= WEBGFFARM_MAX_POST_SIZE) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Content-Length too big. Content-Length: %d bytes; limit: %d", in_size, WEBGFFARM_MAX_POST_SIZE);
            *in_size_ = 0;
            return NULL;
        }
    } else {
        in_size = WEBGFFARM_MAX_POST_SIZE;
    }

    bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc);
    rv = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, in_size);
    if (rv != APR_SUCCESS) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "[parse_form_from_POST] ap_get_brigade returns some error");
        return NULL;
    }
    bbin_size = in_size;
    buf = apr_palloc(r->pool, bbin_size);
    rv = apr_brigade_flatten(bbin, buf, &bbin_size);
    if (rv != APR_SUCCESS) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "[parse_form_from_POST] apr_brigade_flatten returns some error");
        return NULL;
    }
    apr_brigade_destroy(bbin);
    *in_size_ = bbin_size;

    if (in_size != bbin_size) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "[parse_form_from_POST] in_size is incorrect(in: %d, out:%d)", in_size, (int) bbin_size);
    }
    return buf;
}
static ngx_inline ngx_int_t
ngx_http_modsecurity_save_request_body(ngx_http_request_t *r)
{
    ngx_http_modsecurity_ctx_t    *ctx;
    apr_off_t                      content_length;
    ngx_buf_t                     *buf;
    ngx_http_core_srv_conf_t      *cscf;
    size_t                         size;
    ngx_http_connection_t         *hc;

    ctx = ngx_http_get_module_ctx(r, ngx_http_modsecurity);

    apr_brigade_length(ctx->brigade, 0, &content_length);

    if (r->header_in->end - r->header_in->last >= content_length) {
        /* use r->header_in */

        if (ngx_buf_size(r->header_in)) {
            /* move to the end */
            ngx_memmove(r->header_in->pos + content_length,
                        r->header_in->pos,
                        ngx_buf_size(r->header_in));
        }

        if (apr_brigade_flatten(ctx->brigade,
                                (char *)r->header_in->pos,
                                (apr_size_t *)&content_length) != APR_SUCCESS) {
            return NGX_ERROR;
        }

        apr_brigade_cleanup(ctx->brigade);

        r->header_in->last += content_length;

        return NGX_OK;
    }

    if (ngx_buf_size(r->header_in)) {

        /*
         * ngx_http_set_keepalive will reuse r->header_in if
         * (r->header_in != c->buffer && r->header_in.last != r->header_in.end),
         * so we need this code block.
         * see ngx_http_set_keepalive, ngx_http_alloc_large_header_buffer
         */
        cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module);

        size = ngx_max(cscf->large_client_header_buffers.size,
                       (size_t)content_length + ngx_buf_size(r->header_in));

        hc = r->http_connection;

        if (hc->nfree && size == cscf->large_client_header_buffers.size) {

            buf = hc->free[--hc->nfree];

            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                           "ModSecurity: use http free large header buffer: %p %uz",
                           buf->pos, buf->end - buf->last);

        } else if (hc->nbusy < cscf->large_client_header_buffers.num) {

            if (hc->busy == NULL) {
                hc->busy = ngx_palloc(r->connection->pool,
                                      cscf->large_client_header_buffers.num * sizeof(ngx_buf_t *));
            }

            if (hc->busy == NULL) {
                return NGX_ERROR;
            } else {
                buf = ngx_create_temp_buf(r->connection->pool, size);
            }
        } else {
            /* TODO: how to deal this case ? */
            return NGX_ERROR;
        }

    } else {

        buf = ngx_create_temp_buf(r->pool, (size_t) content_length);
    }

    if (buf == NULL) {
        return NGX_ERROR;
    }

    if (apr_brigade_flatten(ctx->brigade, (char *)buf->pos,
                            (apr_size_t *)&content_length) != APR_SUCCESS) {
        return NGX_ERROR;
    }

    apr_brigade_cleanup(ctx->brigade);
    buf->last += content_length;

    ngx_memcpy(buf->last, r->header_in->pos, ngx_buf_size(r->header_in));
    buf->last += ngx_buf_size(r->header_in);

    r->header_in = buf;

    return NGX_OK;
}
Exemple #15
0
apr_status_t h2_task_input_read(h2_task_input *input,
                                ap_filter_t* f,
                                apr_bucket_brigade* bb,
                                ap_input_mode_t mode,
                                apr_read_type_e block,
                                apr_off_t readbytes)
{
    apr_status_t status = APR_SUCCESS;
    apr_off_t bblen = 0;
    
    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
                  "h2_task_input(%s): read, block=%d, mode=%d, readbytes=%ld", 
                  input->task->id, block, mode, (long)readbytes);
    
    if (mode == AP_MODE_INIT) {
        return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
    }
    
    if (is_aborted(f)) {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
                      "h2_task_input(%s): is aborted", input->task->id);
        return APR_ECONNABORTED;
    }
    
    if (input->bb) {
        status = apr_brigade_length(input->bb, 1, &bblen);
        if (status != APR_SUCCESS) {
            ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, f->c,
                          APLOGNO(02958) "h2_task_input(%s): brigade length fail", 
                          input->task->id);
            return status;
        }
    }
    
    if ((bblen == 0) && input->task->input_eos) {
        return APR_EOF;
    }
    
    while ((bblen == 0) || (mode == AP_MODE_READBYTES && bblen < readbytes)) {
        /* Get more data for our stream from mplx.
         */
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                      "h2_task_input(%s): get more data from mplx, block=%d, "
                      "readbytes=%ld, queued=%ld",
                      input->task->id, block, 
                      (long)readbytes, (long)bblen);
        
        /* Although we sometimes get called with APR_NONBLOCK_READs, 
         we seem to  fill our buffer blocking. Otherwise we get EAGAIN,
         return that to our caller and everyone throws up their hands,
         never calling us again. */
        status = h2_mplx_in_read(input->task->mplx, APR_BLOCK_READ,
                                 input->task->stream_id, input->bb, 
                                 input->task->io);
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                      "h2_task_input(%s): mplx in read returned",
                      input->task->id);
        if (status != APR_SUCCESS) {
            return status;
        }
        status = apr_brigade_length(input->bb, 1, &bblen);
        if (status != APR_SUCCESS) {
            return status;
        }
        if ((bblen == 0) && (block == APR_NONBLOCK_READ)) {
            return h2_util_has_eos(input->bb, -1)? APR_EOF : APR_EAGAIN;
        }
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                      "h2_task_input(%s): mplx in read, %ld bytes in brigade",
                      input->task->id, (long)bblen);
    }
    
    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                  "h2_task_input(%s): read, mode=%d, block=%d, "
                  "readbytes=%ld, queued=%ld",
                  input->task->id, mode, block, 
                  (long)readbytes, (long)bblen);
           
    if (!APR_BRIGADE_EMPTY(input->bb)) {
        if (mode == AP_MODE_EXHAUSTIVE) {
            /* return all we have */
            return h2_util_move(bb, input->bb, readbytes, NULL, 
                                "task_input_read(exhaustive)");
        }
        else if (mode == AP_MODE_READBYTES) {
            return h2_util_move(bb, input->bb, readbytes, NULL, 
                                "task_input_read(readbytes)");
        }
        else if (mode == AP_MODE_SPECULATIVE) {
            /* return not more than was asked for */
            return h2_util_copy(bb, input->bb, readbytes,  
                                "task_input_read(speculative)");
        }
        else if (mode == AP_MODE_GETLINE) {
            /* we are reading a single LF line, e.g. the HTTP headers */
            status = apr_brigade_split_line(bb, input->bb, block, 
                                            HUGE_STRING_LEN);
            if (APLOGctrace1(f->c)) {
                char buffer[1024];
                apr_size_t len = sizeof(buffer)-1;
                apr_brigade_flatten(bb, buffer, &len);
                buffer[len] = 0;
                ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                              "h2_task_input(%s): getline: %s",
                              input->task->id, buffer);
            }
            return status;
        }
        else {
            /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
             * to support it. Seems to work. */
            ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
                          APLOGNO(02942) 
                          "h2_task_input, unsupported READ mode %d", mode);
            return APR_ENOTIMPL;
        }
    }
    
    if (is_aborted(f)) {
        return APR_ECONNABORTED;
    }
    
    return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
}
Exemple #16
0
static int pgasp_handler (request_rec * r)
{
   char cursor_string[256];
   pgasp_config* config = (pgasp_config*) ap_get_module_config(r->server->module_config, &pgasp_module ) ;
   pgasp_dir_config* dir_config = (pgasp_dir_config*) ap_get_module_config(r->per_dir_config, &pgasp_module ) ;
   apr_table_t * GET = NULL, *GETargs = NULL;
   apr_array_header_t * POST;
   PGconn * pgc;
   PGresult * pgr;
   int i, j, allowed_to_serve, filename_length = 0;
   int field_count, tuple_count;
   char * requested_file;
   char *basename;
   params_t params;

   /* PQexecParams doesn't seem to like zero-length strings, so we feed it a dummy */
   const char * dummy_get = "nothing";
   const char * dummy_user = "******";

   const char * cursor_values[2] = { r -> args ? apr_pstrdup(r->pool, r -> args) : dummy_get, r->user ? r->user : dummy_user };
   int cursor_value_lengths[2] = { strlen(cursor_values[0]), strlen(cursor_values[1]) };
   int cursor_value_formats[2] = { 0, 0 };

   if (!r -> handler || strcmp (r -> handler, "pgasp-handler") ) return DECLINED;
   if (!r -> method || (strcmp (r -> method, "GET") && strcmp (r -> method, "POST")) ) return DECLINED;

   if (config->is_enabled != true) return OK; /* pretending we have responded, may return DECLINED in the future */

   requested_file = apr_pstrdup (r -> pool, r -> path_info /*filename*/);
   i = strlen(requested_file) - 1;

   while (i > 0)
   {
     if (requested_file[i] == '.') filename_length = i;
     if (requested_file[i] == '/') break;
     i--;
   }

   if (i >= 0) {
     requested_file += i+1; /* now pointing to foo.pgasp instead of /var/www/.../foo.pgasp */
     if (filename_length > i) filename_length -= i+1;
   }

   allowed_to_serve = false;

   for (i = 0; i < config->allowed_count; i++)
   {
      if (!strcmp(config->allowed[i], requested_file))
      {
         allowed_to_serve = true;
         break;
      }
   }
   if (config->allowed_count == 0) allowed_to_serve = true;

   if (!allowed_to_serve)
   {
      ap_set_content_type(r, "text/plain");
      ap_rprintf(r, "Hello there\nThis is PGASP\nEnabled: %s\n", config->is_enabled ? "On" : "Off");
      ap_rprintf(r, "Requested: %s\n", requested_file);
      ap_rprintf(r, "Allowed: %s\n", allowed_to_serve ? "Yes" : "No");

      return OK; /* pretending we have served the file, may return HTTP_FORDIDDEN in the future */
   }

   if (filename_length == 0) {
     basename = requested_file;
   } else {
     basename = apr_pstrndup(r->pool, requested_file, filename_length);
   }

   ap_args_to_table(r, &GETargs);
   if (OK != ap_parse_form_data(r, NULL, &POST, -1, (~((apr_size_t)0)))) {
     __(r->server, " ** ap_parse_form_data is NOT OK");
   }
   GET = (NULL == GET) ? GETargs : apr_table_overlay(r->pool, GETargs, GET);

   // move all POST parameters into GET table
   {
     ap_form_pair_t *pair;
     char *buffer;
     apr_off_t len;
     apr_size_t size;
     while (NULL != (pair = apr_array_pop(POST))) {
       apr_brigade_length(pair->value, 1, &len);
       size = (apr_size_t) len;
       buffer = apr_palloc(r->pool, size + 1);
       apr_brigade_flatten(pair->value, buffer, &size);
       buffer[len] = 0;
       apr_table_setn(GET, apr_pstrdup(r->pool, pair->name), buffer); //should name and value be ap_unescape_url() -ed?
       //       __(r->server, "POST[%s]: %s", pair->name, buffer);
     }
   }

   params.r = r;
   params.args = NULL;
   apr_table_do(tab_args, &params, GET, NULL);
   params.args = apr_pstrcat(r->pool, "&", params.args, "&", NULL);

   cursor_values[0] = params.args;
   cursor_value_lengths[0] = strlen(cursor_values[0]);

   /* set response content type according to configuration or to default value */
   ap_set_content_type(r, dir_config->content_type_set ? dir_config->content_type : "text/html");

   /* now connecting to Postgres, getting function output, and printing it */

   pgc = pgasp_pool_open (r->server);

   if (PQstatus(pgc) != CONNECTION_OK)
   {
      spit_pg_error ("connect");
      pgasp_pool_close(r->server, pgc);
      return OK;
   }

   /* removing extention (.pgasp or other) from file name, and adding "f_" for function name, i.e. foo.pgasp becomes psp_foo() */
   snprintf(cursor_string,
	    sizeof(cursor_string),
	    "select * from f_%s($1::varchar)",
	    basename);

   /* passing GET as first (and only) parameter */
   if (0 == PQsendQueryParams (pgc, cursor_string, 1, NULL, cursor_values, cursor_value_lengths, cursor_value_formats, 0)) {
      spit_pg_error ("sending async query with params");
      return clean_up_connection(r->server);
   }

   if (0 == PQsetSingleRowMode(pgc)) {
     ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server, "can not fall into single raw mode to fetch data");
   }

   while (NULL != (pgr = PQgetResult(pgc))) {

     if (PQresultStatus(pgr) != PGRES_TUPLES_OK && PQresultStatus(pgr) != PGRES_SINGLE_TUPLE) {
       spit_pg_error ("fetch data");
       return clean_up_connection(r->server);
     }

     /* the following counts and for-loop may seem excessive as it's just 1 row/1 field, but might need it in the future */

     field_count = PQnfields(pgr);
     tuple_count = PQntuples(pgr);

     for (i = 0; i < tuple_count; i++)
       {
	 for (j = 0; j < field_count; j++) ap_rprintf(r, "%s", PQgetvalue(pgr, i, j));
	 ap_rprintf(r, "\n");
       }
     PQclear (pgr);
   }
   pgasp_pool_close(r->server, pgc);

   return OK;
}
static ngx_inline ngx_int_t
ngx_http_modsecurity_save_request_body(ngx_http_request_t *r)
{
    ngx_http_modsecurity_ctx_t    *ctx;
    apr_off_t                      content_length;
    ngx_buf_t                     *buf;
    ngx_http_core_srv_conf_t      *cscf;
    size_t                         size;
    ngx_http_connection_t         *hc;

    ctx = ngx_http_get_module_ctx(r, ngx_http_modsecurity);

    apr_brigade_length(ctx->brigade, 0, &content_length);

    if (r->header_in->end - r->header_in->last >= content_length) {
        /* use r->header_in */

        if (ngx_buf_size(r->header_in)) {
            /* move to the end */
            ngx_memmove(r->header_in->pos + content_length,
                        r->header_in->pos,
                        ngx_buf_size(r->header_in));
        }

        if (apr_brigade_flatten(ctx->brigade,
                                (char *)r->header_in->pos,
                                (apr_size_t *)&content_length) != APR_SUCCESS) {
            return NGX_ERROR;
        }

        apr_brigade_cleanup(ctx->brigade);

        r->header_in->last += content_length;

        return NGX_OK;
    }

    if (ngx_buf_size(r->header_in)) {

        /*
         * ngx_http_set_keepalive will reuse r->header_in if
         * (r->header_in != c->buffer && r->header_in.last != r->header_in.end),
         * so we need this code block.
         * see ngx_http_set_keepalive, ngx_http_alloc_large_header_buffer
         */
        cscf = ngx_http_get_module_srv_conf(r, ngx_http_core_module);

        size = ngx_max(cscf->large_client_header_buffers.size,
                       (size_t)content_length + ngx_buf_size(r->header_in));

        hc = r->http_connection;

#if defined(nginx_version) && nginx_version >= 1011011
        if (hc->free && size == cscf->large_client_header_buffers.size) {

            buf = hc->free->buf;
#else
        if (hc->nfree && size == cscf->large_client_header_buffers.size) {

            buf = hc->free[--hc->nfree];
#endif

            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                           "ModSecurity: use http free large header buffer: %p %uz",
                           buf->pos, buf->end - buf->last);

        } else if (hc->nbusy < cscf->large_client_header_buffers.num) {

            if (hc->busy == NULL) {
                hc->busy = ngx_palloc(r->connection->pool,
                                      cscf->large_client_header_buffers.num * sizeof(ngx_buf_t *));
            }

            if (hc->busy == NULL) {
                return NGX_ERROR;
            } else {
                buf = ngx_create_temp_buf(r->connection->pool, size);
            }
        } else {
            /* TODO: how to deal this case ? */
            return NGX_ERROR;
        }

    } else {

        buf = ngx_create_temp_buf(r->pool, (size_t) content_length);
    }

    if (buf == NULL) {
        return NGX_ERROR;
    }

    if (apr_brigade_flatten(ctx->brigade, (char *)buf->pos,
                            (apr_size_t *)&content_length) != APR_SUCCESS) {
        return NGX_ERROR;
    }

    apr_brigade_cleanup(ctx->brigade);
    buf->last += content_length;

    ngx_memcpy(buf->last, r->header_in->pos, ngx_buf_size(r->header_in));
    buf->last += ngx_buf_size(r->header_in);

    r->header_in = buf;

    return NGX_OK;
}


static ngx_inline ngx_int_t
ngx_http_modsecurity_load_headers_out(ngx_http_request_t *r)
{

    ngx_http_modsecurity_ctx_t  *ctx;
    char                        *data;
    request_rec                 *req;
    ngx_http_variable_value_t   *vv;
    ngx_list_part_t             *part;
    ngx_table_elt_t             *h;
    ngx_uint_t                   i;
    char                        *key, *value;
    u_char                      *buf = NULL;
    size_t                       size = 0;

    ctx = ngx_http_get_module_ctx(r, ngx_http_modsecurity);
    req = ctx->req;

    req->status = r->headers_out.status;
    req->status_line = (char *)ngx_pstrdup0(r->pool, &r->headers_out.status_line);

    /* deep copy */
    part = &r->headers_out.headers.part;
    h = part->elts;

    for (i = 0; ; i++) {
        if (i >= part->nelts) {
            if (part->next == NULL)
                break;

            part = part->next;
            h = part->elts;
            i = 0;
        }
        size += h[i].key.len + h[i].value.len + 2;

        buf = ngx_palloc(r->pool, size);

        if (buf == NULL) {
            return NGX_ERROR;
        }

        key = (char *)buf;
        buf = ngx_cpymem(buf, h[i].key.data, h[i].key.len);
        *buf++ = '\0';

        value = (char *)buf;
        buf = ngx_cpymem(buf, h[i].value.data, h[i].value.len);
        *buf++ = '\0';

        apr_table_addn(req->headers_out, key, value);
        ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                       "ModSecurity: load headers out: \"%V: %V\"",
                       &h[i].key, &h[i].value);

    }

    for (i = 0; special_headers_out[i].name; i++) {

        vv = ngx_http_get_variable(r, &special_headers_out[i].variable_name,
                                   ngx_hash_key(special_headers_out[i].variable_name.data,
                                                special_headers_out[i].variable_name.len));

        if (vv && !vv->not_found) {

            data = ngx_palloc(r->pool, vv->len + 1);
            if (data == NULL) {
                return NGX_ERROR;
            }

            ngx_memcpy(data,vv->data, vv->len);
            data[vv->len] = '\0';

            apr_table_setn(req->headers_out, special_headers_out[i].name, data);
            ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                           "ModSecurity: load headers out: \"%s: %s\"",
                           special_headers_out[i].name, data);
        }
    }

    req->content_type = apr_table_get(ctx->req->headers_out, "Content-Type");
    req->content_encoding = apr_table_get(ctx->req->headers_out, "Content-Encoding");

    data = (char *)apr_table_get(ctx->req->headers_out, "Content-Languages");

    if(data != NULL)
    {
        ctx->req->content_languages = apr_array_make(ctx->req->pool, 1, sizeof(const char *));
        *(const char **)apr_array_push(ctx->req->content_languages) = data;
    }

    /* req->chunked = r->chunked; may be useless */
    req->clength = r->headers_out.content_length_n;
    req->mtime = apr_time_make(r->headers_out.last_modified_time, 0);

    ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
                   "ModSecurity: load headers out done");

    return NGX_OK;
}
static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers,
                                    apr_pool_t *pool)
{
    apr_finfo_t fi;
    apr_status_t rv;
    apr_file_t *fp;

    if (!path) {
        return APR_SUCCESS;
    }

    rv = apr_file_open(&fp, path, APR_READ|APR_BINARY|APR_BUFFERED,
                       APR_OS_DEFAULT, pool);

    if (rv) {
        return rv;
    }

    rv = apr_file_info_get(&fi, APR_FINFO_SIZE, fp);

    if (rv) {
        return rv;
    }

    {
        char *t;
        int lineno = 0;
        apr_bucket_alloc_t *ba = apr_bucket_alloc_create(pool);
        apr_bucket_brigade *bb = apr_brigade_create(pool, ba);
        apr_bucket_brigade *tmpbb = apr_brigade_create(pool, ba);
        apr_table_t *hbt = apr_table_make(pool, 10);

        apr_brigade_insert_file(bb, fp, 0, fi.size, pool);

        do {
            hb_server_t *server;
            char buf[4096];
            apr_size_t bsize = sizeof(buf);
            const char *ip;

            apr_brigade_cleanup(tmpbb);

            if (APR_BRIGADE_EMPTY(bb)) {
                break;
            }

            rv = apr_brigade_split_line(tmpbb, bb,
                                        APR_BLOCK_READ, sizeof(buf));
            lineno++;

            if (rv) {
                return rv;
            }

            apr_brigade_flatten(tmpbb, buf, &bsize);

            if (bsize == 0) {
                break;
            }

            buf[bsize - 1] = 0;

            /* comment */
            if (buf[0] == '#') {
                continue;
            }

            /* line format: <IP> <query_string>\n */
            t = strchr(buf, ' ');
            if (!t) {
                continue;
            }

            ip = apr_pstrmemdup(pool, buf, t - buf);
            t++;

            server = apr_hash_get(servers, ip, APR_HASH_KEY_STRING);

            if (server == NULL) {
                server = apr_pcalloc(pool, sizeof(hb_server_t));
                server->ip = ip;
                server->port = 80;
                server->seen = -1;

                apr_hash_set(servers, server->ip, APR_HASH_KEY_STRING, server);
            }

            apr_table_clear(hbt);

            argstr_to_table(pool, apr_pstrdup(pool, t), hbt);

            if (apr_table_get(hbt, "busy")) {
                server->busy = atoi(apr_table_get(hbt, "busy"));
            }

            if (apr_table_get(hbt, "ready")) {
                server->ready = atoi(apr_table_get(hbt, "ready"));
            }

            if (apr_table_get(hbt, "lastseen")) {
                server->seen = atoi(apr_table_get(hbt, "lastseen"));
            }

            if (apr_table_get(hbt, "port")) {
                server->port = atoi(apr_table_get(hbt, "port"));
            }

            if (server->busy == 0 && server->ready != 0) {
                /* Server has zero threads active, but lots of them ready,
                 * it likely just started up, so lets /4 the number ready,
                 * to prevent us from completely flooding it with all new
                 * requests.
                 */
                server->ready = server->ready / 4;
            }

        } while (1);
    }

    return APR_SUCCESS;
}
Exemple #19
0
static int parse_form_from_POST(request_rec* r, apr_hash_t** form) {
  int bytes, eos;
  apr_size_t count;
  apr_status_t rv;
  apr_bucket_brigade *bb;
  apr_bucket_brigade *bbin;
  char *buf;
  apr_bucket *b;
  apr_bucket *nextb;
  const char *clen = apr_table_get(r->headers_in, "Content-Length");
  if(clen != NULL) {
    bytes = strtol(clen, NULL, 0);
    if(bytes >= MAX_SIZE) {
      ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
		    "Request too big (%d bytes; limit %d)",
		    bytes, MAX_SIZE);
      return HTTP_REQUEST_ENTITY_TOO_LARGE;
    }
  } else {
    bytes = MAX_SIZE;
  }

  bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
  bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc);
  count = 0;

  do {
    rv = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES,
			APR_BLOCK_READ, bytes);
    if(rv != APR_SUCCESS) {
      ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
		    "failed to read from input");
      return HTTP_INTERNAL_SERVER_ERROR;
    }
    for (b = APR_BRIGADE_FIRST(bbin);
	 b != APR_BRIGADE_SENTINEL(bbin);
	 b = nextb ) {
      nextb = APR_BUCKET_NEXT(b);
      if(APR_BUCKET_IS_EOS(b) ) {
	eos = 1;
      }
      if (!APR_BUCKET_IS_METADATA(b)) {
	if(b->length != (apr_size_t)(-1)) {
	  count += b->length;
	  if(count > MAX_SIZE) {
	    /* This is more data than we accept, so we're
	     * going to kill the request. But we have to
	     * mop it up first.
	     */
	    apr_bucket_delete(b);
	  }
	}
      }
      if(count <= MAX_SIZE) {
	APR_BUCKET_REMOVE(b);
	APR_BRIGADE_INSERT_TAIL(bb, b);
      }
    }
  } while(!eos);

  /* OK, done with the data. Kill the request if we got too much data. */
  if(count > MAX_SIZE) {
    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
		  "Request too big (%d bytes; limit %d)",
		  bytes, MAX_SIZE);
    return HTTP_REQUEST_ENTITY_TOO_LARGE;
  }

  /* We've got all the data. Now put it in a buffer and parse it. */
  buf = apr_palloc(r->pool, count+1);
  rv = apr_brigade_flatten(bb, buf, &count);
  if(rv != APR_SUCCESS) {
    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
		  "Error (flatten) reading from data");
    return HTTP_INTERNAL_SERVER_ERROR;
  }
  buf[count] = '\0';
  *form = parse_form_from_string(r, buf);
  
  return OK;

}
Exemple #20
0
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
                             request_rec *r, apr_pool_t *setaside_pool,
                             apr_uint16_t request_id, const char **err,
                             int *bad_request, int *has_responded)
{
    apr_bucket_brigade *ib, *ob;
    int seen_end_of_headers = 0, done = 0, ignore_body = 0;
    apr_status_t rv = APR_SUCCESS;
    int script_error_status = HTTP_OK;
    conn_rec *c = r->connection;
    struct iovec vec[2];
    ap_fcgi_header header;
    unsigned char farray[AP_FCGI_HEADER_LEN];
    apr_pollfd_t pfd;
    int header_state = HDR_STATE_READING_HEADERS;
    char stack_iobuf[AP_IOBUFSIZE];
    apr_size_t iobuf_size = AP_IOBUFSIZE;
    char *iobuf = stack_iobuf;

    *err = NULL;
    if (conn->worker->s->io_buffer_size_set) {
        iobuf_size = conn->worker->s->io_buffer_size;
        iobuf = apr_palloc(r->pool, iobuf_size);
    }

    pfd.desc_type = APR_POLL_SOCKET;
    pfd.desc.s = conn->sock;
    pfd.p = r->pool;
    pfd.reqevents = APR_POLLIN | APR_POLLOUT;

    ib = apr_brigade_create(r->pool, c->bucket_alloc);
    ob = apr_brigade_create(r->pool, c->bucket_alloc);

    while (! done) {
        apr_interval_time_t timeout;
        apr_size_t len;
        int n;

        /* We need SOME kind of timeout here, or virtually anything will
         * cause timeout errors. */
        apr_socket_timeout_get(conn->sock, &timeout);

        rv = apr_poll(&pfd, 1, &n, timeout);
        if (rv != APR_SUCCESS) {
            if (APR_STATUS_IS_EINTR(rv)) {
                continue;
            }
            *err = "polling";
            break;
        }

        if (pfd.rtnevents & APR_POLLOUT) {
            apr_size_t to_send, writebuflen;
            int last_stdin = 0;
            char *iobuf_cursor;

            rv = ap_get_brigade(r->input_filters, ib,
                                AP_MODE_READBYTES, APR_BLOCK_READ,
                                iobuf_size);
            if (rv != APR_SUCCESS) {
                *err = "reading input brigade";
                *bad_request = 1;
                break;
            }

            if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(ib))) {
                last_stdin = 1;
            }

            writebuflen = iobuf_size;

            rv = apr_brigade_flatten(ib, iobuf, &writebuflen);

            apr_brigade_cleanup(ib);

            if (rv != APR_SUCCESS) {
                *err = "flattening brigade";
                break;
            }

            to_send = writebuflen;
            iobuf_cursor = iobuf;
            while (to_send > 0) {
                int nvec = 0;
                apr_size_t write_this_time;

                write_this_time =
                    to_send < AP_FCGI_MAX_CONTENT_LEN ? to_send : AP_FCGI_MAX_CONTENT_LEN;

                ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id,
                                       (apr_uint16_t)write_this_time, 0);
                ap_fcgi_header_to_array(&header, farray);

                vec[nvec].iov_base = (void *)farray;
                vec[nvec].iov_len = sizeof(farray);
                ++nvec;
                if (writebuflen) {
                    vec[nvec].iov_base = iobuf_cursor;
                    vec[nvec].iov_len = write_this_time;
                    ++nvec;
                }

                rv = send_data(conn, vec, nvec, &len);
                if (rv != APR_SUCCESS) {
                    *err = "sending stdin";
                    break;
                }

                to_send -= write_this_time;
                iobuf_cursor += write_this_time;
            }
            if (rv != APR_SUCCESS) {
                break;
            }

            if (last_stdin) {
                pfd.reqevents = APR_POLLIN; /* Done with input data */

                /* signal EOF (empty FCGI_STDIN) */
                ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id,
                                       0, 0);
                ap_fcgi_header_to_array(&header, farray);

                vec[0].iov_base = (void *)farray;
                vec[0].iov_len = sizeof(farray);

                rv = send_data(conn, vec, 1, &len);
                if (rv != APR_SUCCESS) {
                    *err = "sending empty stdin";
                    break;
                }
            }
        }

        if (pfd.rtnevents & APR_POLLIN) {
            apr_size_t readbuflen;
            apr_uint16_t clen, rid;
            apr_bucket *b;
            unsigned char plen;
            unsigned char type, version;

            /* First, we grab the header... */
            rv = get_data_full(conn, (char *) farray, AP_FCGI_HEADER_LEN);
            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01067)
                              "Failed to read FastCGI header");
                break;
            }

            ap_log_rdata(APLOG_MARK, APLOG_TRACE8, r, "FastCGI header",
                         farray, AP_FCGI_HEADER_LEN, 0);

            ap_fcgi_header_fields_from_array(&version, &type, &rid,
                                             &clen, &plen, farray);

            if (version != AP_FCGI_VERSION_1) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01068)
                              "Got bogus version %d", (int)version);
                rv = APR_EINVAL;
                break;
            }

            if (rid != request_id) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01069)
                              "Got bogus rid %d, expected %d",
                              rid, request_id);
                rv = APR_EINVAL;
                break;
            }

recv_again:
            if (clen > iobuf_size) {
                readbuflen = iobuf_size;
            } else {
                readbuflen = clen;
            }

            /* Now get the actual data.  Yes it sucks to do this in a second
             * recv call, this will eventually change when we move to real
             * nonblocking recv calls. */
            if (readbuflen != 0) {
                rv = get_data(conn, iobuf, &readbuflen);
                if (rv != APR_SUCCESS) {
                    *err = "reading response body";
                    break;
                }
            }

            switch (type) {
            case AP_FCGI_STDOUT:
                if (clen != 0) {
                    b = apr_bucket_transient_create(iobuf,
                                                    readbuflen,
                                                    c->bucket_alloc);

                    APR_BRIGADE_INSERT_TAIL(ob, b);

                    if (! seen_end_of_headers) {
                        int st = handle_headers(r, &header_state,
                                                iobuf, readbuflen);

                        if (st == 1) {
                            int status;
                            seen_end_of_headers = 1;

                            status = ap_scan_script_header_err_brigade_ex(r, ob,
                                NULL, APLOG_MODULE_INDEX);
                            /* suck in all the rest */
                            if (status != OK) {
                                apr_bucket *tmp_b;
                                apr_brigade_cleanup(ob);
                                tmp_b = apr_bucket_eos_create(c->bucket_alloc);
                                APR_BRIGADE_INSERT_TAIL(ob, tmp_b);

                                *has_responded = 1;
                                r->status = status;
                                rv = ap_pass_brigade(r->output_filters, ob);
                                if (rv != APR_SUCCESS) {
                                    *err = "passing headers brigade to output filters";
                                }
                                else if (status == HTTP_NOT_MODIFIED) {
                                    /* The 304 response MUST NOT contain
                                     * a message-body, ignore it. */
                                    ignore_body = 1;
                                }
                                else {
                                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070)
                                                    "Error parsing script headers");
                                    rv = APR_EINVAL;
                                }
                                break;
                            }

                            if (conf->error_override &&
                                ap_is_HTTP_ERROR(r->status)) {
                                /*
                                 * set script_error_status to discard
                                 * everything after the headers
                                 */
                                script_error_status = r->status;
                                /*
                                 * prevent ap_die() from treating this as a
                                 * recursive error, initially:
                                 */
                                r->status = HTTP_OK;
                            }

                            if (script_error_status == HTTP_OK
                                && !APR_BRIGADE_EMPTY(ob) && !ignore_body) {
                                /* Send the part of the body that we read while
                                 * reading the headers.
                                 */
                                *has_responded = 1;
                                rv = ap_pass_brigade(r->output_filters, ob);
                                if (rv != APR_SUCCESS) {
                                    *err = "passing brigade to output filters";
                                    break;
                                }
                            }
                            apr_brigade_cleanup(ob);

                            apr_pool_clear(setaside_pool);
                        }
                        else {
                            /* We're still looking for the end of the
                             * headers, so this part of the data will need
                             * to persist. */
                            apr_bucket_setaside(b, setaside_pool);
                        }
                    } else {
                        /* we've already passed along the headers, so now pass
                         * through the content.  we could simply continue to
                         * setaside the content and not pass until we see the
                         * 0 content-length (below, where we append the EOS),
                         * but that could be a huge amount of data; so we pass
                         * along smaller chunks
                         */
                        if (script_error_status == HTTP_OK && !ignore_body) {
                            *has_responded = 1;
                            rv = ap_pass_brigade(r->output_filters, ob);
                            if (rv != APR_SUCCESS) {
                                *err = "passing brigade to output filters";
                                break;
                            }
                        }
                        apr_brigade_cleanup(ob);
                    }

                    /* If we didn't read all the data, go back and get the
                     * rest of it. */
                    if (clen > readbuflen) {
                        clen -= readbuflen;
                        goto recv_again;
                    }
                } else {
                    /* XXX what if we haven't seen end of the headers yet? */

                    if (script_error_status == HTTP_OK) {
                        b = apr_bucket_eos_create(c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ob, b);

                        *has_responded = 1;
                        rv = ap_pass_brigade(r->output_filters, ob);
                        if (rv != APR_SUCCESS) {
                            *err = "passing brigade to output filters";
                            break;
                        }
                    }

                    /* XXX Why don't we cleanup here?  (logic from AJP) */
                }
                break;

            case AP_FCGI_STDERR:
                /* TODO: Should probably clean up this logging a bit... */
                if (clen) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01071)
                                  "Got error '%.*s'", (int)readbuflen, iobuf);
                }

                if (clen > readbuflen) {
                    clen -= readbuflen;
                    goto recv_again;
                }
                break;

            case AP_FCGI_END_REQUEST:
                done = 1;
                break;

            default:
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01072)
                              "Got bogus record %d", type);
                break;
            }
            /* Leave on above switch's inner error. */
            if (rv != APR_SUCCESS) {
                break;
            }

            if (plen) {
                rv = get_data_full(conn, iobuf, plen);
                if (rv != APR_SUCCESS) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02537)
                                  "Error occurred reading padding");
                    break;
                }
            }
        }
    }

    apr_brigade_destroy(ib);
    apr_brigade_destroy(ob);

    if (script_error_status != HTTP_OK) {
        ap_die(script_error_status, r); /* send ErrorDocument */
        *has_responded = 1;
    }

    return rv;
}
Exemple #21
0
/* read post body. code taken from "The apache modules book, Nick Kew" */
static void read_post_body(mapcache_context_apache_request *ctx, mapcache_request_proxy *p) {
  request_rec *r = ctx->request;
  mapcache_context *mctx = (mapcache_context*)ctx;
  int bytes,eos;
  apr_bucket_brigade *bb, *bbin;
  apr_bucket *b;
  apr_status_t rv;
  const char *clen = apr_table_get(r->headers_in, "Content-Length");
  if(clen) {
    bytes = strtol(clen, NULL, 0);
    if(bytes >= p->rule->max_post_len) {
      mctx->set_error(mctx, HTTP_REQUEST_ENTITY_TOO_LARGE, "post request too big");
      return;
    }
  } else {
    bytes = p->rule->max_post_len;
  } 

  bb = apr_brigade_create(mctx->pool, r->connection->bucket_alloc);
  bbin = apr_brigade_create(mctx->pool, r->connection->bucket_alloc);
  p->post_len = 0;

  do {
    rv = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, bytes);
    if(rv != APR_SUCCESS) {
      mctx->set_error(mctx, 500, "failed to read form input");
      return;
    }
    for(b = APR_BRIGADE_FIRST(bbin); b != APR_BRIGADE_SENTINEL(bbin); b = APR_BUCKET_NEXT(b)) {
      if(APR_BUCKET_IS_EOS(b)) {
        eos = 1;
      }
    }
    if(!APR_BUCKET_IS_METADATA(b)) {
      if(b->length != (apr_size_t)(-1)) {
        p->post_len += b->length;
        if(p->post_len > p->rule->max_post_len) {
          apr_bucket_delete(b);
        }
      }
    }
    if(p->post_len <= p->rule->max_post_len) {
      APR_BUCKET_REMOVE(b);
      APR_BRIGADE_INSERT_TAIL(bb, b);
    }
  } while (!eos);

  if(p->post_len > p->rule->max_post_len) {
    mctx->set_error(mctx, HTTP_REQUEST_ENTITY_TOO_LARGE, "request too big");
    return;
  }

  p->post_buf = apr_palloc(mctx->pool, p->post_len+1);

  rv = apr_brigade_flatten(bb, p->post_buf, &(p->post_len));
  if(rv != APR_SUCCESS) {
    mctx->set_error(mctx, 500, "error (flatten) reading form data");
    return;
  }
  p->post_buf[p->post_len] = 0;
}
Exemple #22
0
static apr_status_t h2_filter_slave_in(ap_filter_t* f,
                                       apr_bucket_brigade* bb,
                                       ap_input_mode_t mode,
                                       apr_read_type_e block,
                                       apr_off_t readbytes)
{
    h2_task *task;
    apr_status_t status = APR_SUCCESS;
    apr_bucket *b, *next;
    apr_off_t bblen;
    const int trace1 = APLOGctrace1(f->c);
    apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)? 
                       (apr_size_t)readbytes : APR_SIZE_MAX);
    
    task = h2_ctx_cget_task(f->c);
    ap_assert(task);

    if (trace1) {
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
                      "h2_slave_in(%s): read, mode=%d, block=%d, readbytes=%ld", 
                      task->id, mode, block, (long)readbytes);
    }
    
    if (mode == AP_MODE_INIT) {
        return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
    }
    
    if (f->c->aborted) {
        return APR_ECONNABORTED;
    }
    
    if (!task->input.bb) {
        return APR_EOF;
    }
    
    /* Cleanup brigades from those nasty 0 length non-meta buckets
     * that apr_brigade_split_line() sometimes produces. */
    for (b = APR_BRIGADE_FIRST(task->input.bb);
         b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
        next = APR_BUCKET_NEXT(b);
        if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) {
            apr_bucket_delete(b);
        } 
    }
    
    while (APR_BRIGADE_EMPTY(task->input.bb)) {
        /* Get more input data for our request. */
        if (trace1) {
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                          "h2_slave_in(%s): get more data from mplx, block=%d, "
                          "readbytes=%ld", task->id, block, (long)readbytes);
        }
        if (task->input.beam) {
            status = h2_beam_receive(task->input.beam, task->input.bb, block, 
                                     128*1024);
        }
        else {
            status = APR_EOF;
        }
        
        if (trace1) {
            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
                          "h2_slave_in(%s): read returned", task->id);
        }
        if (APR_STATUS_IS_EAGAIN(status) 
            && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
            /* chunked input handling does not seem to like it if we
             * return with APR_EAGAIN from a GETLINE read... 
             * upload 100k test on test-ser.example.org hangs */
            status = APR_SUCCESS;
        }
        else if (APR_STATUS_IS_EOF(status)) {
            break;
        }
        else if (status != APR_SUCCESS) {
            return status;
        }

        if (trace1) {
            h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, 
                        "input.beam recv raw", task->input.bb);
        }
        if (h2_task_logio_add_bytes_in) {
            apr_brigade_length(bb, 0, &bblen);
            h2_task_logio_add_bytes_in(f->c, bblen);
        }
    }
    
    /* Nothing there, no more data to get. Return APR_EAGAIN on
     * speculative reads, this is ap_check_pipeline()'s trick to
     * see if the connection needs closing. */
    if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) {
        return (mode == AP_MODE_SPECULATIVE)? APR_EAGAIN : APR_EOF;
    }

    if (trace1) {
        h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, 
                    "task_input.bb", task->input.bb);
    }
           
    if (APR_BRIGADE_EMPTY(task->input.bb)) {
        if (trace1) {
            ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
                          "h2_slave_in(%s): no data", task->id);
        }
        return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
    }
    
    if (mode == AP_MODE_EXHAUSTIVE) {
        /* return all we have */
        APR_BRIGADE_CONCAT(bb, task->input.bb);
    }
    else if (mode == AP_MODE_READBYTES) {
        status = h2_brigade_concat_length(bb, task->input.bb, rmax);
    }
    else if (mode == AP_MODE_SPECULATIVE) {
        status = h2_brigade_copy_length(bb, task->input.bb, rmax);
    }
    else if (mode == AP_MODE_GETLINE) {
        /* we are reading a single LF line, e.g. the HTTP headers. 
         * this has the nasty side effect to split the bucket, even
         * though it ends with CRLF and creates a 0 length bucket */
        status = apr_brigade_split_line(bb, task->input.bb, block, 
                                        HUGE_STRING_LEN);
        if (APLOGctrace1(f->c)) {
            char buffer[1024];
            apr_size_t len = sizeof(buffer)-1;
            apr_brigade_flatten(bb, buffer, &len);
            buffer[len] = 0;
            if (trace1) {
                ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                              "h2_slave_in(%s): getline: %s",
                              task->id, buffer);
            }
        }
    }
    else {
        /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
         * to support it. Seems to work. */
        ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
                      APLOGNO(03472) 
                      "h2_slave_in(%s), unsupported READ mode %d", 
                      task->id, mode);
        status = APR_ENOTIMPL;
    }
    
    if (trace1) {
        apr_brigade_length(bb, 0, &bblen);
        ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
                      "h2_slave_in(%s): %ld data bytes", task->id, (long)bblen);
    }
    return status;
}
Exemple #23
0
/*
 * process the request and write the response.
 */
static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
                                proxy_conn_rec *conn,
                                conn_rec *origin,
                                proxy_dir_conf *conf,
                                apr_uri_t *uri,
                                char *url, char *server_portstr)
{
    apr_status_t status;
    int result;
    apr_bucket *e;
    apr_bucket_brigade *input_brigade;
    apr_bucket_brigade *output_brigade;
    ajp_msg_t *msg;
    apr_size_t bufsiz = 0;
    char *buff;
    char *send_body_chunk_buff;
    apr_uint16_t size;
    apr_byte_t conn_reuse = 0;
    const char *tenc;
    int havebody = 1;
    int output_failed = 0;
    int backend_failed = 0;
    apr_off_t bb_len;
    int data_sent = 0;
    int request_ended = 0;
    int headers_sent = 0;
    int rv = 0;
    apr_int32_t conn_poll_fd;
    apr_pollfd_t *conn_poll;
    proxy_server_conf *psf =
    ap_get_module_config(r->server->module_config, &proxy_module);
    apr_size_t maxsize = AJP_MSG_BUFFER_SZ;
    int send_body = 0;
    apr_off_t content_length = 0;
    int original_status = r->status;
    const char *original_status_line = r->status_line;

    if (psf->io_buffer_size_set)
       maxsize = psf->io_buffer_size;
    if (maxsize > AJP_MAX_BUFFER_SZ)
       maxsize = AJP_MAX_BUFFER_SZ;
    else if (maxsize < AJP_MSG_BUFFER_SZ)
       maxsize = AJP_MSG_BUFFER_SZ;
    maxsize = APR_ALIGN(maxsize, 1024);
       
    /*
     * Send the AJP request to the remote server
     */

    /* send request headers */
    status = ajp_send_header(conn->sock, r, maxsize, uri);
    if (status != APR_SUCCESS) {
        conn->close++;
        ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
                     "proxy: AJP: request failed to %pI (%s)",
                     conn->worker->cp->addr,
                     conn->worker->hostname);
        if (status == AJP_EOVERFLOW)
            return HTTP_BAD_REQUEST;
        else if  (status == AJP_EBAD_METHOD) {
            return HTTP_NOT_IMPLEMENTED;
        } else {
            /*
             * This is only non fatal when the method is idempotent. In this
             * case we can dare to retry it with a different worker if we are
             * a balancer member.
             */
            if (is_idempotent(r) == METHOD_IDEMPOTENT) {
                return HTTP_SERVICE_UNAVAILABLE;
            }
            return HTTP_INTERNAL_SERVER_ERROR;
        }
    }

    /* allocate an AJP message to store the data of the buckets */
    bufsiz = maxsize;
    status = ajp_alloc_data_msg(r->pool, &buff, &bufsiz, &msg);
    if (status != APR_SUCCESS) {
        /* We had a failure: Close connection to backend */
        conn->close++;
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: ajp_alloc_data_msg failed");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    /* read the first bloc of data */
    input_brigade = apr_brigade_create(p, r->connection->bucket_alloc);
    tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
    if (tenc && (strcasecmp(tenc, "chunked") == 0)) {
        /* The AJP protocol does not want body data yet */
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: request is chunked");
    } else {
        /* Get client provided Content-Length header */
        content_length = get_content_length(r);
        status = ap_get_brigade(r->input_filters, input_brigade,
                                AP_MODE_READBYTES, APR_BLOCK_READ,
                                maxsize - AJP_HEADER_SZ);

        if (status != APR_SUCCESS) {
            /* We had a failure: Close connection to backend */
            conn->close++;
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                         "proxy: ap_get_brigade failed");
            apr_brigade_destroy(input_brigade);
            return HTTP_BAD_REQUEST;
        }

        /* have something */
        if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                         "proxy: APR_BUCKET_IS_EOS");
        }

        /* Try to send something */
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: data to read (max %" APR_SIZE_T_FMT
                     " at %" APR_SIZE_T_FMT ")", bufsiz, msg->pos);

        status = apr_brigade_flatten(input_brigade, buff, &bufsiz);
        if (status != APR_SUCCESS) {
            /* We had a failure: Close connection to backend */
            conn->close++;
            apr_brigade_destroy(input_brigade);
            ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
                         "proxy: apr_brigade_flatten");
            return HTTP_INTERNAL_SERVER_ERROR;
        }
        apr_brigade_cleanup(input_brigade);

        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: got %" APR_SIZE_T_FMT " bytes of data", bufsiz);
        if (bufsiz > 0) {
            status = ajp_send_data_msg(conn->sock, msg, bufsiz);
            if (status != APR_SUCCESS) {
                /* We had a failure: Close connection to backend */
                conn->close++;
                apr_brigade_destroy(input_brigade);
                ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
                             "proxy: send failed to %pI (%s)",
                             conn->worker->cp->addr,
                             conn->worker->hostname);
                /*
                 * It is fatal when we failed to send a (part) of the request
                 * body.
                 */
                return HTTP_INTERNAL_SERVER_ERROR;
            }
            conn->worker->s->transferred += bufsiz;
            send_body = 1;
        }
        else if (content_length > 0) {
            ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
                         "proxy: read zero bytes, expecting"
                         " %" APR_OFF_T_FMT " bytes",
                         content_length);
            /*
             * We can only get here if the client closed the connection
             * to us without sending the body.
             * Now the connection is in the wrong state on the backend.
             * Sending an empty data msg doesn't help either as it does
             * not move this connection to the correct state on the backend
             * for later resusage by the next request again.
             * Close it to clean things up.
             */
            conn->close++;
            return HTTP_BAD_REQUEST;
        }
    }

    /* read the response */
    conn->data = NULL;
    status = ajp_read_header(conn->sock, r, maxsize,
                             (ajp_msg_t **)&(conn->data));
    if (status != APR_SUCCESS) {
        /* We had a failure: Close connection to backend */
        conn->close++;
        apr_brigade_destroy(input_brigade);
        ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
                     "proxy: read response failed from %pI (%s)",
                     conn->worker->cp->addr,
                     conn->worker->hostname);

        /* If we had a successful cping/cpong and then a timeout
         * we assume it is a request that cause a back-end timeout,
         * but doesn't affect the whole worker.
         */
        if (APR_STATUS_IS_TIMEUP(status) && conn->worker->ping_timeout_set) {
            return HTTP_GATEWAY_TIME_OUT;
        }

        /*
         * This is only non fatal when we have not sent (parts) of a possible
         * request body so far (we do not store it and thus cannot sent it
         * again) and the method is idempotent. In this case we can dare to
         * retry it with a different worker if we are a balancer member.
         */
        if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) {
            return HTTP_SERVICE_UNAVAILABLE;
        }
        return HTTP_INTERNAL_SERVER_ERROR;
    }
    /* parse the reponse */
    result = ajp_parse_type(r, conn->data);
    output_brigade = apr_brigade_create(p, r->connection->bucket_alloc);

    /*
     * Prepare apr_pollfd_t struct for possible later check if there is currently
     * data available from the backend (do not flush response to client)
     * or not (flush response to client)
     */
    conn_poll = apr_pcalloc(p, sizeof(apr_pollfd_t));
    conn_poll->reqevents = APR_POLLIN;
    conn_poll->desc_type = APR_POLL_SOCKET;
    conn_poll->desc.s = conn->sock;

    bufsiz = maxsize;
    for (;;) {
        switch (result) {
            case CMD_AJP13_GET_BODY_CHUNK:
                if (havebody) {
                    if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) {
                        /* This is the end */
                        bufsiz = 0;
                        havebody = 0;
                        ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server,
                                     "proxy: APR_BUCKET_IS_EOS");
                    } else {
                        status = ap_get_brigade(r->input_filters, input_brigade,
                                                AP_MODE_READBYTES,
                                                APR_BLOCK_READ,
                                                maxsize - AJP_HEADER_SZ);
                        if (status != APR_SUCCESS) {
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, status,
                                         r->server,
                                         "ap_get_brigade failed");
                            output_failed = 1;
                            break;
                        }
                        bufsiz = maxsize;
                        status = apr_brigade_flatten(input_brigade, buff,
                                                     &bufsiz);
                        apr_brigade_cleanup(input_brigade);
                        if (status != APR_SUCCESS) {
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, status,
                                         r->server,
                                         "apr_brigade_flatten failed");
                            output_failed = 1;
                            break;
                        }
                    }

                    ajp_msg_reset(msg);
                    /* will go in ajp_send_data_msg */
                    status = ajp_send_data_msg(conn->sock, msg, bufsiz);
                    if (status != APR_SUCCESS) {
                        ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server,
                                     "ajp_send_data_msg failed");
                        backend_failed = 1;
                        break;
                    }
                    conn->worker->s->transferred += bufsiz;
                } else {
                    /*
                     * something is wrong TC asks for more body but we are
                     * already at the end of the body data
                     */
                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                                 "ap_proxy_ajp_request error read after end");
                    backend_failed = 1;
                }
                break;
            case CMD_AJP13_SEND_HEADERS:
                if (headers_sent) {
                    /* Do not send anything to the client.
                     * Backend already send us the headers.
                     */
                    backend_failed = 1;
                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                                 "proxy: Backend sent headers twice.");
                    break;
                }
                /* AJP13_SEND_HEADERS: process them */
                status = ajp_parse_header(r, conf, conn->data);
                if (status != APR_SUCCESS) {
                    backend_failed = 1;
                }
                else if ((r->status == 401) && psf->error_override) {
                    const char *buf;
                    const char *wa = "WWW-Authenticate";
                    if ((buf = apr_table_get(r->headers_out, wa))) {
                        apr_table_set(r->err_headers_out, wa, buf);
                    } else {
                        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                                     "ap_proxy_ajp_request: origin server "
                                     "sent 401 without WWW-Authenticate header");
                    }
                }
                headers_sent = 1;
                break;
            case CMD_AJP13_SEND_BODY_CHUNK:
                /* AJP13_SEND_BODY_CHUNK: piece of data */
                status = ajp_parse_data(r, conn->data, &size, &send_body_chunk_buff);
                if (status == APR_SUCCESS) {
                    /* If we are overriding the errors, we can't put the content
                     * of the page into the brigade.
                     */
                    if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) {
                    /* AJP13_SEND_BODY_CHUNK with zero length
                     * is explicit flush message
                     */
                    if (size == 0) {
                        if (headers_sent) {
                            e = apr_bucket_flush_create(r->connection->bucket_alloc);
                            APR_BRIGADE_INSERT_TAIL(output_brigade, e);
                        }
                        else {
                            ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                                 "Ignoring flush message received before headers");
                        }
                    }
                    else {
                        apr_status_t rv;

                        /* Handle the case where the error document is itself reverse
                         * proxied and was successful. We must maintain any previous
                         * error status so that an underlying error (eg HTTP_NOT_FOUND)
                         * doesn't become an HTTP_OK.
                         */
                        if (psf->error_override && !ap_is_HTTP_ERROR(r->status)
                                && ap_is_HTTP_ERROR(original_status)) {
                            r->status = original_status;
                            r->status_line = original_status_line;
                        }

                        e = apr_bucket_transient_create(send_body_chunk_buff, size,
                                                        r->connection->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(output_brigade, e);

                        if ((conn->worker->flush_packets == flush_on) ||
                            ((conn->worker->flush_packets == flush_auto) &&
                            ((rv = apr_poll(conn_poll, 1, &conn_poll_fd,
                                             conn->worker->flush_wait))
                                             != APR_SUCCESS) &&
                              APR_STATUS_IS_TIMEUP(rv))) {
                            e = apr_bucket_flush_create(r->connection->bucket_alloc);
                            APR_BRIGADE_INSERT_TAIL(output_brigade, e);
                        }
                        apr_brigade_length(output_brigade, 0, &bb_len);
                        if (bb_len != -1)
                            conn->worker->s->read += bb_len;
                    }
                    if (headers_sent) {
                        if (ap_pass_brigade(r->output_filters,
                                            output_brigade) != APR_SUCCESS) {
                            ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
                                          "proxy: error processing body.%s",
                                          r->connection->aborted ?
                                          " Client aborted connection." : "");
                            output_failed = 1;
                        }
                        data_sent = 1;
                        apr_brigade_cleanup(output_brigade);
                    }
                }
            }
                else {
                    backend_failed = 1;
                }
                break;
            case CMD_AJP13_END_RESPONSE:
                status = ajp_parse_reuse(r, conn->data, &conn_reuse);
                if (status != APR_SUCCESS) {
                    backend_failed = 1;
                }
                /* If we are overriding the errors, we must not send anything to
                 * the client, especially as the brigade already contains headers.
                 * So do nothing here, and it will be cleaned up below.
                 */
                if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) {
                    e = apr_bucket_eos_create(r->connection->bucket_alloc);
                    APR_BRIGADE_INSERT_TAIL(output_brigade, e);
                    if (ap_pass_brigade(r->output_filters,
                                        output_brigade) != APR_SUCCESS) {
                        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
                                      "proxy: error processing end");
                        output_failed = 1;
                    }
                    /* XXX: what about flush here? See mod_jk */
                    data_sent = 1;
                }
                request_ended = 1;
                break;
            default:
                backend_failed = 1;
                break;
        }

        /*
         * If connection has been aborted by client: Stop working.
         * Nevertheless, we regard our operation so far as a success:
         * So reset output_failed to 0 and set result to CMD_AJP13_END_RESPONSE
         * But: Close this connection to the backend.
         */
        if (r->connection->aborted) {
            conn->close++;
            output_failed = 0;
            result = CMD_AJP13_END_RESPONSE;
            request_ended = 1;
        }

        /*
         * We either have finished successfully or we failed.
         * So bail out
         */
        if ((result == CMD_AJP13_END_RESPONSE) || backend_failed
            || output_failed)
            break;

        /* read the response */
        status = ajp_read_header(conn->sock, r, maxsize,
                                 (ajp_msg_t **)&(conn->data));
        if (status != APR_SUCCESS) {
            backend_failed = 1;
            ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server,
                         "ajp_read_header failed");
            break;
        }
        result = ajp_parse_type(r, conn->data);
    }
    apr_brigade_destroy(input_brigade);

    /*
     * Clear output_brigade to remove possible buckets that remained there
     * after an error.
     */
    apr_brigade_cleanup(output_brigade);

    if (backend_failed || output_failed) {
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: Processing of request failed backend: %i, "
                     "output: %i", backend_failed, output_failed);
        /* We had a failure: Close connection to backend */
        conn->close++;
        /* Return DONE to avoid error messages being added to the stream */
        if (data_sent) {
            rv = DONE;
        }
    }
    else if (!request_ended) {
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: Processing of request didn't terminate cleanly");
        /* We had a failure: Close connection to backend */
        conn->close++;
        backend_failed = 1;
        /* Return DONE to avoid error messages being added to the stream */
        if (data_sent) {
            rv = DONE;
        }
    }
    else if (!conn_reuse) {
        /* Our backend signalled connection close */
        conn->close++;
    }
    else {
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "proxy: got response from %pI (%s)",
                     conn->worker->cp->addr,
                     conn->worker->hostname);

        if (psf->error_override && ap_is_HTTP_ERROR(r->status)) {
            /* clear r->status for override error, otherwise ErrorDocument
             * thinks that this is a recursive error, and doesn't find the
             * custom error page
             */
            rv = r->status;
            r->status = HTTP_OK;
        } else {
            rv = OK;
        }
    }

    if (backend_failed) {
        ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server,
                     "proxy: dialog to %pI (%s) failed",
                     conn->worker->cp->addr,
                     conn->worker->hostname);
        /*
         * If we already send data, signal a broken backend connection
         * upwards in the chain.
         */
        if (data_sent) {
            ap_proxy_backend_broke(r, output_brigade);
        } else if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) {
            /*
             * This is only non fatal when we have not sent (parts) of a possible
             * request body so far (we do not store it and thus cannot sent it
             * again) and the method is idempotent. In this case we can dare to
             * retry it with a different worker if we are a balancer member.
             */
            rv = HTTP_SERVICE_UNAVAILABLE;
        } else {
            rv = HTTP_INTERNAL_SERVER_ERROR;
        }
    }

    /*
     * Ensure that we sent an EOS bucket thru the filter chain, if we already
     * have sent some data. Maybe ap_proxy_backend_broke was called and added
     * one to the brigade already (no longer making it empty). So we should
     * not do this in this case.
     */
    if (data_sent && !r->eos_sent && APR_BRIGADE_EMPTY(output_brigade)) {
        e = apr_bucket_eos_create(r->connection->bucket_alloc);
        APR_BRIGADE_INSERT_TAIL(output_brigade, e);
    }

    /* If we have added something to the brigade above, sent it */
    if (!APR_BRIGADE_EMPTY(output_brigade))
        ap_pass_brigade(r->output_filters, output_brigade);

    apr_brigade_destroy(output_brigade);

    return rv;
}