static apr_status_t apreq_output_filter_test(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; apreq_handle_t *handle; apr_bucket_brigade *eos; struct ctx_t ctx = {r, bb}; const apr_table_t *t; if (!APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) return ap_pass_brigade(f->next,bb); eos = apr_brigade_split(bb, APR_BRIGADE_LAST(bb)); handle = apreq_handle_apache2(r); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, "appending parsed data"); apr_brigade_puts(bb, NULL, NULL, "\n--APREQ OUTPUT FILTER--\nARGS:\n"); apreq_args(handle, &t); if (t != NULL) apr_table_do(dump_table, &ctx, t, NULL); apreq_body(handle, &t); if (t != NULL) { apr_brigade_puts(bb,NULL,NULL,"BODY:\n"); apr_table_do(dump_table, &ctx, t, NULL); } APR_BRIGADE_CONCAT(bb,eos); return ap_pass_brigade(f->next,bb); }
apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, apr_size_t maxlen) { apr_off_t start_len = 0; apr_bucket *last; apr_status_t status; if (io->rst_error) { return APR_ECONNABORTED; } if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) { return io->eos_in? APR_EOF : APR_EAGAIN; } apr_brigade_length(bb, 1, &start_len); last = APR_BRIGADE_LAST(bb); status = h2_util_move(bb, io->bbin, maxlen, NULL, "h2_io_in_read"); if (status == APR_SUCCESS) { apr_bucket *nlast = APR_BRIGADE_LAST(bb); apr_off_t end_len = 0; apr_brigade_length(bb, 1, &end_len); if (last == nlast) { return APR_EAGAIN; } io->input_consumed += (end_len - start_len); } return status; }
AP_DECLARE(apr_status_t) ap_pass_brigade(ap_filter_t *next, apr_bucket_brigade *bb) { if (next) { apr_bucket *e; if ((e = APR_BRIGADE_LAST(bb)) && APR_BUCKET_IS_EOS(e) && next->r) { /* This is only safe because HTTP_HEADER filter is always in * the filter stack. This ensures that there is ALWAYS a * request-based filter that we can attach this to. If the * HTTP_FILTER is removed, and another filter is not put in its * place, then handlers like mod_cgi, which attach their own * EOS bucket to the brigade will be broken, because we will * get two EOS buckets on the same request. */ next->r->eos_sent = 1; /* remember the eos for internal redirects, too */ if (next->r->prev) { request_rec *prev = next->r->prev; while (prev) { prev->eos_sent = 1; prev = prev->prev; } } } return next->frec->filter_func.out_func(next, bb); } return AP_NOBODY_WROTE; }
APU_DECLARE(apr_status_t) apr_brigade_puts(apr_bucket_brigade *bb, apr_brigade_flush flush, void *ctx, const char *str) { apr_size_t len = strlen(str); apr_bucket *bkt = APR_BRIGADE_LAST(bb); if (!APR_BRIGADE_EMPTY(bb) && APR_BUCKET_IS_HEAP(bkt)) { /* If there is enough space available in a heap bucket * at the end of the brigade, copy the string directly * into the heap bucket */ apr_bucket_heap *h = bkt->data; apr_size_t bytes_avail = h->alloc_len - bkt->length; if (bytes_avail >= len) { char *buf = h->base + bkt->start + bkt->length; memcpy(buf, str, len); bkt->length += len; return APR_SUCCESS; } } /* If the string could not be copied into an existing heap * bucket, delegate the work to apr_brigade_write(), which * knows how to grow the brigade */ return apr_brigade_write(bb, flush, ctx, str, len); }
static void test_partition(abts_case *tc, void *data) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb = apr_brigade_create(p, ba); apr_bucket *e; e = apr_bucket_immortal_create(hello, strlen(hello), ba); APR_BRIGADE_INSERT_HEAD(bb, e); apr_assert_success(tc, "partition brigade", apr_brigade_partition(bb, 5, &e)); test_bucket_content(tc, APR_BRIGADE_FIRST(bb), "hello", 5); test_bucket_content(tc, APR_BRIGADE_LAST(bb), ", world", 7); ABTS_ASSERT(tc, "partition returns APR_INCOMPLETE", apr_brigade_partition(bb, 8192, &e)); ABTS_ASSERT(tc, "APR_INCOMPLETE partition returned sentinel", e == APR_BRIGADE_SENTINEL(bb)); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static triger_bucket_t *get_data_at_tail(ap_filter_t * f, apr_bucket_brigade * bb) { const char *data; apr_bucket *b = APR_BRIGADE_LAST(bb); apr_size_t len = 0; triger_module_ctx_t *ctx = f->ctx; triger_bucket_t *rv = ctx->triger_bucket; rv->len = 0; rv->data = NULL; rv->b = NULL; rv->body_end_tag_pos = rv->body_start_tag_pos = rv->html_start_tag_pos = rv->head_start_tag_pos = rv->body_start_tag_pos = -1; while (APR_BUCKET_IS_METADATA(b) && b != APR_BRIGADE_SENTINEL(bb)) b = APR_BUCKET_PREV(b); if (APR_BUCKET_IS_METADATA(b) || b == APR_BRIGADE_SENTINEL(bb)) return rv; apr_bucket_read(b, &data, &len, APR_BLOCK_READ); rv->len = len; rv->data = data; rv->b = b; return rv; }
APU_DECLARE(apr_status_t) apr_brigade_write(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const char *str, apr_size_t nbyte) { apr_bucket *e = APR_BRIGADE_LAST(b); apr_size_t remaining = APR_BUCKET_BUFF_SIZE; char *buf = NULL; /* * If the last bucket is a heap bucket and its buffer is not shared with * another bucket, we may write into that bucket. */ if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e) && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) { apr_bucket_heap *h = e->data; /* HEAP bucket start offsets are always in-memory, safe to cast */ remaining = h->alloc_len - (e->length + (apr_size_t)e->start); buf = h->base + e->start + e->length; } if (nbyte > remaining) { /* either a buffer bucket exists but is full, * or no buffer bucket exists and the data is too big * to buffer. In either case, we should flush. */ if (flush) { e = apr_bucket_transient_create(str, nbyte, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return flush(b, ctx); } else { e = apr_bucket_heap_create(str, nbyte, NULL, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); return APR_SUCCESS; } } else if (!buf) { /* we don't have a buffer, but the data is small enough * that we don't mind making a new buffer */ buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc); e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE, apr_bucket_free, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); e->length = 0; /* We are writing into the brigade, and * allocating more memory than we need. This * ensures that the bucket thinks it is empty just * after we create it. We'll fix the length * once we put data in it below. */ } /* there is a sufficiently big buffer bucket available now */ memcpy(buf, str, nbyte); e->length += nbyte; return APR_SUCCESS; }
static void test_insertfile(abts_case *tc, void *ctx) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb; const apr_off_t bignum = (APR_INT64_C(2) << 32) + 424242; apr_off_t count; apr_file_t *f; apr_bucket *e; ABTS_ASSERT(tc, "open test file", apr_file_open(&f, TIF_FNAME, APR_FOPEN_WRITE | APR_FOPEN_TRUNCATE | APR_FOPEN_CREATE | APR_FOPEN_SPARSE, APR_OS_DEFAULT, p) == APR_SUCCESS); if (apr_file_trunc(f, bignum)) { apr_file_close(f); apr_file_remove(TIF_FNAME, p); ABTS_NOT_IMPL(tc, "Skipped: could not create large file"); return; } bb = apr_brigade_create(p, ba); e = apr_brigade_insert_file(bb, f, 0, bignum, p); ABTS_ASSERT(tc, "inserted file was not at end of brigade", e == APR_BRIGADE_LAST(bb)); /* check that the total size of inserted buckets is equal to the * total size of the file. */ count = 0; for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { ABTS_ASSERT(tc, "bucket size sane", e->length != (apr_size_t)-1); count += e->length; } ABTS_ASSERT(tc, "total size of buckets incorrect", count == bignum); apr_brigade_destroy(bb); /* Truncate the file to zero size before close() so that we don't * actually write out the large file if we are on a non-sparse file * system - like Mac OS X's HFS. Otherwise, pity the poor user who * has to wait for the 8GB file to be written to disk. */ apr_file_trunc(f, 0); apr_file_close(f); apr_bucket_alloc_destroy(ba); apr_file_remove(TIF_FNAME, p); }
// Called when Apache outputs data: static apr_status_t firstbyte_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) { firstbyte_config_t *cf = ap_get_module_config(f->c->conn_config, &log_firstbyte_module); apr_bucket *b = APR_BRIGADE_LAST(bb); /* End of data, make sure we flush */ if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(f->c->bucket_alloc)); APR_BUCKET_REMOVE(b); apr_bucket_destroy(b); } if (cf->first_out==1) { cf->first_out_time = apr_time_now(); cf->first_out = 0; } return ap_pass_brigade(f->next, bb); }
static int insert_html_fragment_at_tail(ap_filter_t * f, apr_bucket_brigade * bb, triger_conf_t * cfg) { apr_bucket *tmp_b; int ret = 0; apr_size_t pos; apr_bucket *js; triger_module_ctx_t *ctx = f->ctx; if (ctx->find) goto last; js = apr_bucket_transient_create(cfg->js, (apr_size_t) strlen(cfg->js) + 1, f->r->connection->bucket_alloc); if (!js) goto last; if (ctx->triger_bucket->body_end_tag_pos == -1 && ctx->triger_bucket->html_end_tag_pos == -1) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Neither </body> nor </html> tag found, insert at the end: %s", cfg->js); tmp_b = APR_BRIGADE_LAST(bb); APR_BUCKET_INSERT_BEFORE(tmp_b, js); } else { tmp_b = ctx->triger_bucket->b; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "One of </body> and</html> tag are found, insert at there: %s", cfg->js); pos = ctx->triger_bucket->body_end_tag_pos != -1 ? ctx->triger_bucket->body_end_tag_pos : ctx-> triger_bucket->html_end_tag_pos; apr_bucket_split(tmp_b, pos); APR_BUCKET_INSERT_AFTER(tmp_b, js); } ctx->find = 1; last: return ret; }
static void test_split(abts_case *tc, void *data) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb, *bb2; apr_bucket *e; bb = make_simple_brigade(ba, "hello, ", "world"); /* split at the "world" bucket */ e = APR_BRIGADE_LAST(bb); bb2 = apr_brigade_split(bb, e); ABTS_ASSERT(tc, "split brigade contains one bucket", count_buckets(bb2) == 1); ABTS_ASSERT(tc, "original brigade contains one bucket", count_buckets(bb) == 1); flatten_match(tc, "match original brigade", bb, "hello, "); flatten_match(tc, "match split brigade", bb2, "world"); apr_brigade_destroy(bb2); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static apr_status_t triger_filter(ap_filter_t * f, apr_bucket_brigade * bb) { apr_status_t rv = APR_SUCCESS; triger_conf_t *cfg; apr_bucket *b; triger_module_ctx_t *ctx = f->ctx; if (APR_BRIGADE_EMPTY(bb)) return APR_SUCCESS; cfg = ap_get_module_config(f->r->per_dir_config, &triger_module); if (!cfg) goto last; if (!cfg->enabled) goto last; if (!ctx) { f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); if (!ctx) goto last; ctx->times = 1; ctx->unknown_start_tag_find = 0; ctx->unknown_end_tag_find = 0; ctx->find = 0; ctx->no_tag_find = 1; ctx->doctype_tag_find = 0; ctx->html_start_tag_find = ctx->head_start_tag_find = ctx->body_start_tag_find = ctx->body_end_tag_find = ctx->html_end_tag_find = 0; ctx->triger_bucket = apr_pcalloc(f->r->pool, sizeof(triger_bucket_t)); if (!ctx->triger_bucket) goto last; ctx->triger_bucket->limit = cfg->chk_len; ctx->head_check = 0; apr_table_unset(f->r->headers_out, "Content-Length"); } else ctx->times++; if (!is_this_html(f->r) || ctx->find) goto last; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Enter this filter %u times", ctx->times); if (!cfg->full_chk) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Only check the first and last data buckets"); if (!ctx->head_check) { ctx->head_check = 1; get_data_at_head(f, bb); where_to_insert_html_fragment_at_head(f); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Find the first data bucket. Content length: %d uri: %s path info: %s positions found: %d (<head>)", (int) ctx->triger_bucket->len, f->r->uri, f->r->path_info, (int) ctx->triger_bucket->head_start_tag_pos); insert_html_fragment_at_head(f, bb, cfg); } if (ctx->find || !APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) goto last; get_data_at_tail(f, bb); where_to_insert_html_fragment_at_tail(f); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Find the last data bucket. Content length: %d uri: %s path info: %s positions found: %d (</body>) %d (/html)", (int) ctx->triger_bucket->len, f->r->uri, f->r->path_info, (int) ctx->triger_bucket->body_end_tag_pos, (int) ctx->triger_bucket->html_end_tag_pos); insert_html_fragment_at_tail(f, bb, cfg); } else { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Check each data bucket"); for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { if (!APR_BUCKET_IS_METADATA(b)) { get_triger_bucket(f, b); where_to_insert_html_fragment_at_head(f); insert_html_fragment_at_head(f, bb, cfg); } if (!ctx->find && APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) { get_data_at_tail(f, bb); where_to_insert_html_fragment_at_tail(f); insert_html_fragment_at_tail(f, bb, cfg); } } } last: rv = ap_pass_brigade(f->next, bb); return rv; }
/* * process the request and write the response. */ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, proxy_conn_rec *conn, conn_rec *origin, proxy_dir_conf *conf, apr_uri_t *uri, char *url, char *server_portstr) { apr_status_t status; int result; apr_bucket *e; apr_bucket_brigade *input_brigade; apr_bucket_brigade *output_brigade; ajp_msg_t *msg; apr_size_t bufsiz = 0; char *buff; char *send_body_chunk_buff; apr_uint16_t size; apr_byte_t conn_reuse = 0; const char *tenc; int havebody = 1; int output_failed = 0; int backend_failed = 0; apr_off_t bb_len; int data_sent = 0; int request_ended = 0; int headers_sent = 0; int rv = 0; apr_int32_t conn_poll_fd; apr_pollfd_t *conn_poll; proxy_server_conf *psf = ap_get_module_config(r->server->module_config, &proxy_module); apr_size_t maxsize = AJP_MSG_BUFFER_SZ; int send_body = 0; apr_off_t content_length = 0; int original_status = r->status; const char *original_status_line = r->status_line; if (psf->io_buffer_size_set) maxsize = psf->io_buffer_size; if (maxsize > AJP_MAX_BUFFER_SZ) maxsize = AJP_MAX_BUFFER_SZ; else if (maxsize < AJP_MSG_BUFFER_SZ) maxsize = AJP_MSG_BUFFER_SZ; maxsize = APR_ALIGN(maxsize, 1024); /* * Send the AJP request to the remote server */ /* send request headers */ status = ajp_send_header(conn->sock, r, maxsize, uri); if (status != APR_SUCCESS) { conn->close++; ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: AJP: request failed to %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); if (status == AJP_EOVERFLOW) return HTTP_BAD_REQUEST; else if (status == AJP_EBAD_METHOD) { return HTTP_NOT_IMPLEMENTED; } else { /* * This is only non fatal when the method is idempotent. In this * case we can dare to retry it with a different worker if we are * a balancer member. */ if (is_idempotent(r) == METHOD_IDEMPOTENT) { return HTTP_SERVICE_UNAVAILABLE; } return HTTP_INTERNAL_SERVER_ERROR; } } /* allocate an AJP message to store the data of the buckets */ bufsiz = maxsize; status = ajp_alloc_data_msg(r->pool, &buff, &bufsiz, &msg); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ajp_alloc_data_msg failed"); return HTTP_INTERNAL_SERVER_ERROR; } /* read the first bloc of data */ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc); tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); if (tenc && (strcasecmp(tenc, "chunked") == 0)) { /* The AJP protocol does not want body data yet */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: request is chunked"); } else { /* Get client provided Content-Length header */ content_length = get_content_length(r); status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, maxsize - AJP_HEADER_SZ); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ap_get_brigade failed"); apr_brigade_destroy(input_brigade); return HTTP_BAD_REQUEST; } /* have something */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: APR_BUCKET_IS_EOS"); } /* Try to send something */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: data to read (max %" APR_SIZE_T_FMT " at %" APR_SIZE_T_FMT ")", bufsiz, msg->pos); status = apr_brigade_flatten(input_brigade, buff, &bufsiz); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: apr_brigade_flatten"); return HTTP_INTERNAL_SERVER_ERROR; } apr_brigade_cleanup(input_brigade); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: got %" APR_SIZE_T_FMT " bytes of data", bufsiz); if (bufsiz > 0) { status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: send failed to %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); /* * It is fatal when we failed to send a (part) of the request * body. */ return HTTP_INTERNAL_SERVER_ERROR; } conn->worker->s->transferred += bufsiz; send_body = 1; } else if (content_length > 0) { ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read zero bytes, expecting" " %" APR_OFF_T_FMT " bytes", content_length); /* * We can only get here if the client closed the connection * to us without sending the body. * Now the connection is in the wrong state on the backend. * Sending an empty data msg doesn't help either as it does * not move this connection to the correct state on the backend * for later resusage by the next request again. * Close it to clean things up. */ conn->close++; return HTTP_BAD_REQUEST; } } /* read the response */ conn->data = NULL; status = ajp_read_header(conn->sock, r, maxsize, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read response failed from %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); /* If we had a successful cping/cpong and then a timeout * we assume it is a request that cause a back-end timeout, * but doesn't affect the whole worker. */ if (APR_STATUS_IS_TIMEUP(status) && conn->worker->ping_timeout_set) { return HTTP_GATEWAY_TIME_OUT; } /* * This is only non fatal when we have not sent (parts) of a possible * request body so far (we do not store it and thus cannot sent it * again) and the method is idempotent. In this case we can dare to * retry it with a different worker if we are a balancer member. */ if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) { return HTTP_SERVICE_UNAVAILABLE; } return HTTP_INTERNAL_SERVER_ERROR; } /* parse the reponse */ result = ajp_parse_type(r, conn->data); output_brigade = apr_brigade_create(p, r->connection->bucket_alloc); /* * Prepare apr_pollfd_t struct for possible later check if there is currently * data available from the backend (do not flush response to client) * or not (flush response to client) */ conn_poll = apr_pcalloc(p, sizeof(apr_pollfd_t)); conn_poll->reqevents = APR_POLLIN; conn_poll->desc_type = APR_POLL_SOCKET; conn_poll->desc.s = conn->sock; bufsiz = maxsize; for (;;) { switch (result) { case CMD_AJP13_GET_BODY_CHUNK: if (havebody) { if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { /* This is the end */ bufsiz = 0; havebody = 0; ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "proxy: APR_BUCKET_IS_EOS"); } else { status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, maxsize - AJP_HEADER_SZ); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ap_get_brigade failed"); output_failed = 1; break; } bufsiz = maxsize; status = apr_brigade_flatten(input_brigade, buff, &bufsiz); apr_brigade_cleanup(input_brigade); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "apr_brigade_flatten failed"); output_failed = 1; break; } } ajp_msg_reset(msg); /* will go in ajp_send_data_msg */ status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ajp_send_data_msg failed"); backend_failed = 1; break; } conn->worker->s->transferred += bufsiz; } else { /* * something is wrong TC asks for more body but we are * already at the end of the body data */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "ap_proxy_ajp_request error read after end"); backend_failed = 1; } break; case CMD_AJP13_SEND_HEADERS: if (headers_sent) { /* Do not send anything to the client. * Backend already send us the headers. */ backend_failed = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Backend sent headers twice."); break; } /* AJP13_SEND_HEADERS: process them */ status = ajp_parse_header(r, conf, conn->data); if (status != APR_SUCCESS) { backend_failed = 1; } else if ((r->status == 401) && psf->error_override) { const char *buf; const char *wa = "WWW-Authenticate"; if ((buf = apr_table_get(r->headers_out, wa))) { apr_table_set(r->err_headers_out, wa, buf); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "ap_proxy_ajp_request: origin server " "sent 401 without WWW-Authenticate header"); } } headers_sent = 1; break; case CMD_AJP13_SEND_BODY_CHUNK: /* AJP13_SEND_BODY_CHUNK: piece of data */ status = ajp_parse_data(r, conn->data, &size, &send_body_chunk_buff); if (status == APR_SUCCESS) { /* If we are overriding the errors, we can't put the content * of the page into the brigade. */ if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) { /* AJP13_SEND_BODY_CHUNK with zero length * is explicit flush message */ if (size == 0) { if (headers_sent) { e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "Ignoring flush message received before headers"); } } else { apr_status_t rv; /* Handle the case where the error document is itself reverse * proxied and was successful. We must maintain any previous * error status so that an underlying error (eg HTTP_NOT_FOUND) * doesn't become an HTTP_OK. */ if (psf->error_override && !ap_is_HTTP_ERROR(r->status) && ap_is_HTTP_ERROR(original_status)) { r->status = original_status; r->status_line = original_status_line; } e = apr_bucket_transient_create(send_body_chunk_buff, size, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if ((conn->worker->flush_packets == flush_on) || ((conn->worker->flush_packets == flush_auto) && ((rv = apr_poll(conn_poll, 1, &conn_poll_fd, conn->worker->flush_wait)) != APR_SUCCESS) && APR_STATUS_IS_TIMEUP(rv))) { e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } apr_brigade_length(output_brigade, 0, &bb_len); if (bb_len != -1) conn->worker->s->read += bb_len; } if (headers_sent) { if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "proxy: error processing body.%s", r->connection->aborted ? " Client aborted connection." : ""); output_failed = 1; } data_sent = 1; apr_brigade_cleanup(output_brigade); } } } else { backend_failed = 1; } break; case CMD_AJP13_END_RESPONSE: status = ajp_parse_reuse(r, conn->data, &conn_reuse); if (status != APR_SUCCESS) { backend_failed = 1; } /* If we are overriding the errors, we must not send anything to * the client, especially as the brigade already contains headers. * So do nothing here, and it will be cleaned up below. */ if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "proxy: error processing end"); output_failed = 1; } /* XXX: what about flush here? See mod_jk */ data_sent = 1; } request_ended = 1; break; default: backend_failed = 1; break; } /* * If connection has been aborted by client: Stop working. * Nevertheless, we regard our operation so far as a success: * So reset output_failed to 0 and set result to CMD_AJP13_END_RESPONSE * But: Close this connection to the backend. */ if (r->connection->aborted) { conn->close++; output_failed = 0; result = CMD_AJP13_END_RESPONSE; request_ended = 1; } /* * We either have finished successfully or we failed. * So bail out */ if ((result == CMD_AJP13_END_RESPONSE) || backend_failed || output_failed) break; /* read the response */ status = ajp_read_header(conn->sock, r, maxsize, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { backend_failed = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ajp_read_header failed"); break; } result = ajp_parse_type(r, conn->data); } apr_brigade_destroy(input_brigade); /* * Clear output_brigade to remove possible buckets that remained there * after an error. */ apr_brigade_cleanup(output_brigade); if (backend_failed || output_failed) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Processing of request failed backend: %i, " "output: %i", backend_failed, output_failed); /* We had a failure: Close connection to backend */ conn->close++; /* Return DONE to avoid error messages being added to the stream */ if (data_sent) { rv = DONE; } } else if (!request_ended) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Processing of request didn't terminate cleanly"); /* We had a failure: Close connection to backend */ conn->close++; backend_failed = 1; /* Return DONE to avoid error messages being added to the stream */ if (data_sent) { rv = DONE; } } else if (!conn_reuse) { /* Our backend signalled connection close */ conn->close++; } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: got response from %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); if (psf->error_override && ap_is_HTTP_ERROR(r->status)) { /* clear r->status for override error, otherwise ErrorDocument * thinks that this is a recursive error, and doesn't find the * custom error page */ rv = r->status; r->status = HTTP_OK; } else { rv = OK; } } if (backend_failed) { ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: dialog to %pI (%s) failed", conn->worker->cp->addr, conn->worker->hostname); /* * If we already send data, signal a broken backend connection * upwards in the chain. */ if (data_sent) { ap_proxy_backend_broke(r, output_brigade); } else if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) { /* * This is only non fatal when we have not sent (parts) of a possible * request body so far (we do not store it and thus cannot sent it * again) and the method is idempotent. In this case we can dare to * retry it with a different worker if we are a balancer member. */ rv = HTTP_SERVICE_UNAVAILABLE; } else { rv = HTTP_INTERNAL_SERVER_ERROR; } } /* * Ensure that we sent an EOS bucket thru the filter chain, if we already * have sent some data. Maybe ap_proxy_backend_broke was called and added * one to the brigade already (no longer making it empty). So we should * not do this in this case. */ if (data_sent && !r->eos_sent && APR_BRIGADE_EMPTY(output_brigade)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } /* If we have added something to the brigade above, sent it */ if (!APR_BRIGADE_EMPTY(output_brigade)) ap_pass_brigade(r->output_filters, output_brigade); apr_brigade_destroy(output_brigade); return rv; }
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf, request_rec *r, apr_pool_t *setaside_pool, apr_uint16_t request_id, const char **err, int *bad_request, int *has_responded) { apr_bucket_brigade *ib, *ob; int seen_end_of_headers = 0, done = 0, ignore_body = 0; apr_status_t rv = APR_SUCCESS; int script_error_status = HTTP_OK; conn_rec *c = r->connection; struct iovec vec[2]; ap_fcgi_header header; unsigned char farray[AP_FCGI_HEADER_LEN]; apr_pollfd_t pfd; int header_state = HDR_STATE_READING_HEADERS; char stack_iobuf[AP_IOBUFSIZE]; apr_size_t iobuf_size = AP_IOBUFSIZE; char *iobuf = stack_iobuf; *err = NULL; if (conn->worker->s->io_buffer_size_set) { iobuf_size = conn->worker->s->io_buffer_size; iobuf = apr_palloc(r->pool, iobuf_size); } pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = conn->sock; pfd.p = r->pool; pfd.reqevents = APR_POLLIN | APR_POLLOUT; ib = apr_brigade_create(r->pool, c->bucket_alloc); ob = apr_brigade_create(r->pool, c->bucket_alloc); while (! done) { apr_interval_time_t timeout; apr_size_t len; int n; /* We need SOME kind of timeout here, or virtually anything will * cause timeout errors. */ apr_socket_timeout_get(conn->sock, &timeout); rv = apr_poll(&pfd, 1, &n, timeout); if (rv != APR_SUCCESS) { if (APR_STATUS_IS_EINTR(rv)) { continue; } *err = "polling"; break; } if (pfd.rtnevents & APR_POLLOUT) { apr_size_t to_send, writebuflen; int last_stdin = 0; char *iobuf_cursor; rv = ap_get_brigade(r->input_filters, ib, AP_MODE_READBYTES, APR_BLOCK_READ, iobuf_size); if (rv != APR_SUCCESS) { *err = "reading input brigade"; *bad_request = 1; break; } if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(ib))) { last_stdin = 1; } writebuflen = iobuf_size; rv = apr_brigade_flatten(ib, iobuf, &writebuflen); apr_brigade_cleanup(ib); if (rv != APR_SUCCESS) { *err = "flattening brigade"; break; } to_send = writebuflen; iobuf_cursor = iobuf; while (to_send > 0) { int nvec = 0; apr_size_t write_this_time; write_this_time = to_send < AP_FCGI_MAX_CONTENT_LEN ? to_send : AP_FCGI_MAX_CONTENT_LEN; ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id, (apr_uint16_t)write_this_time, 0); ap_fcgi_header_to_array(&header, farray); vec[nvec].iov_base = (void *)farray; vec[nvec].iov_len = sizeof(farray); ++nvec; if (writebuflen) { vec[nvec].iov_base = iobuf_cursor; vec[nvec].iov_len = write_this_time; ++nvec; } rv = send_data(conn, vec, nvec, &len); if (rv != APR_SUCCESS) { *err = "sending stdin"; break; } to_send -= write_this_time; iobuf_cursor += write_this_time; } if (rv != APR_SUCCESS) { break; } if (last_stdin) { pfd.reqevents = APR_POLLIN; /* Done with input data */ /* signal EOF (empty FCGI_STDIN) */ ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id, 0, 0); ap_fcgi_header_to_array(&header, farray); vec[0].iov_base = (void *)farray; vec[0].iov_len = sizeof(farray); rv = send_data(conn, vec, 1, &len); if (rv != APR_SUCCESS) { *err = "sending empty stdin"; break; } } } if (pfd.rtnevents & APR_POLLIN) { apr_size_t readbuflen; apr_uint16_t clen, rid; apr_bucket *b; unsigned char plen; unsigned char type, version; /* First, we grab the header... */ rv = get_data_full(conn, (char *) farray, AP_FCGI_HEADER_LEN); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01067) "Failed to read FastCGI header"); break; } ap_log_rdata(APLOG_MARK, APLOG_TRACE8, r, "FastCGI header", farray, AP_FCGI_HEADER_LEN, 0); ap_fcgi_header_fields_from_array(&version, &type, &rid, &clen, &plen, farray); if (version != AP_FCGI_VERSION_1) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01068) "Got bogus version %d", (int)version); rv = APR_EINVAL; break; } if (rid != request_id) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01069) "Got bogus rid %d, expected %d", rid, request_id); rv = APR_EINVAL; break; } recv_again: if (clen > iobuf_size) { readbuflen = iobuf_size; } else { readbuflen = clen; } /* Now get the actual data. Yes it sucks to do this in a second * recv call, this will eventually change when we move to real * nonblocking recv calls. */ if (readbuflen != 0) { rv = get_data(conn, iobuf, &readbuflen); if (rv != APR_SUCCESS) { *err = "reading response body"; break; } } switch (type) { case AP_FCGI_STDOUT: if (clen != 0) { b = apr_bucket_transient_create(iobuf, readbuflen, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ob, b); if (! seen_end_of_headers) { int st = handle_headers(r, &header_state, iobuf, readbuflen); if (st == 1) { int status; seen_end_of_headers = 1; status = ap_scan_script_header_err_brigade_ex(r, ob, NULL, APLOG_MODULE_INDEX); /* suck in all the rest */ if (status != OK) { apr_bucket *tmp_b; apr_brigade_cleanup(ob); tmp_b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ob, tmp_b); *has_responded = 1; r->status = status; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing headers brigade to output filters"; } else if (status == HTTP_NOT_MODIFIED) { /* The 304 response MUST NOT contain * a message-body, ignore it. */ ignore_body = 1; } else { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070) "Error parsing script headers"); rv = APR_EINVAL; } break; } if (conf->error_override && ap_is_HTTP_ERROR(r->status)) { /* * set script_error_status to discard * everything after the headers */ script_error_status = r->status; /* * prevent ap_die() from treating this as a * recursive error, initially: */ r->status = HTTP_OK; } if (script_error_status == HTTP_OK && !APR_BRIGADE_EMPTY(ob) && !ignore_body) { /* Send the part of the body that we read while * reading the headers. */ *has_responded = 1; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing brigade to output filters"; break; } } apr_brigade_cleanup(ob); apr_pool_clear(setaside_pool); } else { /* We're still looking for the end of the * headers, so this part of the data will need * to persist. */ apr_bucket_setaside(b, setaside_pool); } } else { /* we've already passed along the headers, so now pass * through the content. we could simply continue to * setaside the content and not pass until we see the * 0 content-length (below, where we append the EOS), * but that could be a huge amount of data; so we pass * along smaller chunks */ if (script_error_status == HTTP_OK && !ignore_body) { *has_responded = 1; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing brigade to output filters"; break; } } apr_brigade_cleanup(ob); } /* If we didn't read all the data, go back and get the * rest of it. */ if (clen > readbuflen) { clen -= readbuflen; goto recv_again; } } else { /* XXX what if we haven't seen end of the headers yet? */ if (script_error_status == HTTP_OK) { b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ob, b); *has_responded = 1; rv = ap_pass_brigade(r->output_filters, ob); if (rv != APR_SUCCESS) { *err = "passing brigade to output filters"; break; } } /* XXX Why don't we cleanup here? (logic from AJP) */ } break; case AP_FCGI_STDERR: /* TODO: Should probably clean up this logging a bit... */ if (clen) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01071) "Got error '%.*s'", (int)readbuflen, iobuf); } if (clen > readbuflen) { clen -= readbuflen; goto recv_again; } break; case AP_FCGI_END_REQUEST: done = 1; break; default: ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01072) "Got bogus record %d", type); break; } /* Leave on above switch's inner error. */ if (rv != APR_SUCCESS) { break; } if (plen) { rv = get_data_full(conn, iobuf, plen); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02537) "Error occurred reading padding"); break; } } } } apr_brigade_destroy(ib); apr_brigade_destroy(ob); if (script_error_status != HTTP_OK) { ap_die(script_error_status, r); /* send ErrorDocument */ *has_responded = 1; } return rv; }
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b) { apr_status_t rv; apr_bucket_brigade *more; conn_rec *c = f->c; core_net_rec *net = f->ctx; core_output_filter_ctx_t *ctx = net->out_ctx; apr_read_type_e eblock = APR_NONBLOCK_READ; apr_pool_t *input_pool = b->p; /* Fail quickly if the connection has already been aborted. */ if (c->aborted) { apr_brigade_cleanup(b); return APR_ECONNABORTED; } if (ctx == NULL) { ctx = apr_pcalloc(c->pool, sizeof(*ctx)); net->out_ctx = ctx; } /* If we have a saved brigade, concatenate the new brigade to it */ if (ctx->b) { APR_BRIGADE_CONCAT(ctx->b, b); b = ctx->b; ctx->b = NULL; } /* Perform multiple passes over the brigade, sending batches of output to the connection. */ while (b && !APR_BRIGADE_EMPTY(b)) { apr_size_t nbytes = 0; apr_bucket *last_e = NULL; /* initialized for debugging */ apr_bucket *e; /* one group of iovecs per pass over the brigade */ apr_size_t nvec = 0; apr_size_t nvec_trailers = 0; struct iovec vec[MAX_IOVEC_TO_WRITE]; struct iovec vec_trailers[MAX_IOVEC_TO_WRITE]; /* one file per pass over the brigade */ apr_file_t *fd = NULL; apr_size_t flen = 0; apr_off_t foffset = 0; /* keep track of buckets that we've concatenated * to avoid small writes */ apr_bucket *last_merged_bucket = NULL; /* tail of brigade if we need another pass */ more = NULL; /* Iterate over the brigade: collect iovecs and/or a file */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { /* keep track of the last bucket processed */ last_e = e; if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) { break; } else if (APR_BUCKET_IS_FLUSH(e)) { if (e != APR_BRIGADE_LAST(b)) { more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); } break; } /* It doesn't make any sense to use sendfile for a file bucket * that represents 10 bytes. */ else if (APR_BUCKET_IS_FILE(e) && (e->length >= AP_MIN_SENDFILE_BYTES)) { apr_bucket_file *a = e->data; /* We can't handle more than one file bucket at a time * so we split here and send the file we have already * found. */ if (fd) { more = apr_brigade_split(b, e); break; } fd = a->fd; flen = e->length; foffset = e->start; } else { const char *str; apr_size_t n; rv = apr_bucket_read(e, &str, &n, eblock); if (APR_STATUS_IS_EAGAIN(rv)) { /* send what we have so far since we shouldn't expect more * output for a while... next time we read, block */ more = apr_brigade_split(b, e); eblock = APR_BLOCK_READ; break; } eblock = APR_NONBLOCK_READ; if (n) { if (!fd) { if (nvec == MAX_IOVEC_TO_WRITE) { /* woah! too many. buffer them up, for use later. */ apr_bucket *temp, *next; apr_bucket_brigade *temp_brig; if (nbytes >= AP_MIN_BYTES_TO_WRITE) { /* We have enough data in the iovec * to justify doing a writev */ more = apr_brigade_split(b, e); break; } /* Create a temporary brigade as a means * of concatenating a bunch of buckets together */ temp_brig = apr_brigade_create(f->c->pool, f->c->bucket_alloc); if (last_merged_bucket) { /* If we've concatenated together small * buckets already in a previous pass, * the initial buckets in this brigade * are heap buckets that may have extra * space left in them (because they * were created by apr_brigade_write()). * We can take advantage of this by * building the new temp brigade out of * these buckets, so that the content * in them doesn't have to be copied again. */ APR_BRIGADE_PREPEND(b, temp_brig); brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket)); } temp = APR_BRIGADE_FIRST(b); while (temp != e) { apr_bucket *d; rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ); apr_brigade_write(temp_brig, NULL, NULL, str, n); d = temp; temp = APR_BUCKET_NEXT(temp); apr_bucket_delete(d); } nvec = 0; nbytes = 0; temp = APR_BRIGADE_FIRST(temp_brig); APR_BUCKET_REMOVE(temp); APR_BRIGADE_INSERT_HEAD(b, temp); apr_bucket_read(temp, &str, &n, APR_BLOCK_READ); vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; /* Just in case the temporary brigade has * multiple buckets, recover the rest of * them and put them in the brigade that * we're sending. */ for (next = APR_BRIGADE_FIRST(temp_brig); next != APR_BRIGADE_SENTINEL(temp_brig); next = APR_BRIGADE_FIRST(temp_brig)) { APR_BUCKET_REMOVE(next); APR_BUCKET_INSERT_AFTER(temp, next); temp = next; apr_bucket_read(next, &str, &n, APR_BLOCK_READ); vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; } apr_brigade_destroy(temp_brig); last_merged_bucket = temp; e = temp; last_e = e; } else { vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; } } else { /* The bucket is a trailer to a file bucket */ if (nvec_trailers == MAX_IOVEC_TO_WRITE) { /* woah! too many. stop now. */ more = apr_brigade_split(b, e); break; } vec_trailers[nvec_trailers].iov_base = (char*) str; vec_trailers[nvec_trailers].iov_len = n; nvec_trailers++; } nbytes += n; } } } /* Completed iterating over the brigade, now determine if we want * to buffer the brigade or send the brigade out on the network. * * Save if we haven't accumulated enough bytes to send, the connection * is not about to be closed, and: * * 1) we didn't see a file, we don't have more passes over the * brigade to perform, AND we didn't stop at a FLUSH bucket. * (IOW, we will save plain old bytes such as HTTP headers) * or * 2) we hit the EOS and have a keep-alive connection * (IOW, this response is a bit more complex, but we save it * with the hope of concatenating with another response) */ if (nbytes + flen < AP_MIN_BYTES_TO_WRITE && !AP_BUCKET_IS_EOC(last_e) && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e)) || (APR_BUCKET_IS_EOS(last_e) && c->keepalive == AP_CONN_KEEPALIVE))) { /* NEVER save an EOS in here. If we are saving a brigade with * an EOS bucket, then we are doing keepalive connections, and * we want to process to second request fully. */ if (APR_BUCKET_IS_EOS(last_e)) { apr_bucket *bucket; int file_bucket_saved = 0; apr_bucket_delete(last_e); for (bucket = APR_BRIGADE_FIRST(b); bucket != APR_BRIGADE_SENTINEL(b); bucket = APR_BUCKET_NEXT(bucket)) { /* Do a read on each bucket to pull in the * data from pipe and socket buckets, so * that we don't leave their file descriptors * open indefinitely. Do the same for file * buckets, with one exception: allow the * first file bucket in the brigade to remain * a file bucket, so that we don't end up * doing an mmap+memcpy every time a client * requests a <8KB file over a keepalive * connection. */ if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) { file_bucket_saved = 1; } else { const char *buf; apr_size_t len = 0; rv = apr_bucket_read(bucket, &buf, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, "core_output_filter:" " Error reading from bucket."); return HTTP_INTERNAL_SERVER_ERROR; } } } } if (!ctx->deferred_write_pool) { apr_pool_create(&ctx->deferred_write_pool, c->pool); apr_pool_tag(ctx->deferred_write_pool, "deferred_write"); } ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool); return APR_SUCCESS; } if (fd) { apr_hdtr_t hdtr; apr_size_t bytes_sent; #if APR_HAS_SENDFILE apr_int32_t flags = 0; #endif memset(&hdtr, '\0', sizeof(hdtr)); if (nvec) { hdtr.numheaders = nvec; hdtr.headers = vec; } if (nvec_trailers) { hdtr.numtrailers = nvec_trailers; hdtr.trailers = vec_trailers; } #if APR_HAS_SENDFILE if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) { if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) { /* Prepare the socket to be reused */ flags |= APR_SENDFILE_DISCONNECT_SOCKET; } rv = sendfile_it_all(net, /* the network information */ fd, /* the file to send */ &hdtr, /* header and trailer iovecs */ foffset, /* offset in the file to begin sending from */ flen, /* length of file */ nbytes + flen, /* total length including headers */ &bytes_sent, /* how many bytes were sent */ flags); /* apr_sendfile flags */ } else #endif { rv = emulate_sendfile(net, fd, &hdtr, foffset, flen, &bytes_sent); } if (logio_add_bytes_out && bytes_sent > 0) logio_add_bytes_out(c, bytes_sent); fd = NULL; } else { apr_size_t bytes_sent; rv = writev_it_all(net->client_socket, vec, nvec, nbytes, &bytes_sent); if (logio_add_bytes_out && bytes_sent > 0) logio_add_bytes_out(c, bytes_sent); } apr_brigade_cleanup(b); /* drive cleanups for resources which were set aside * this may occur before or after termination of the request which * created the resource */ if (ctx->deferred_write_pool) { if (more && more->p == ctx->deferred_write_pool) { /* "more" belongs to the deferred_write_pool, * which is about to be cleared. */ if (APR_BRIGADE_EMPTY(more)) { more = NULL; } else { /* uh oh... change more's lifetime * to the input brigade's lifetime */ apr_bucket_brigade *tmp_more = more; more = NULL; ap_save_brigade(f, &more, &tmp_more, input_pool); } } apr_pool_clear(ctx->deferred_write_pool); } if (rv != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, "core_output_filter: writing data to the network"); if (more) apr_brigade_cleanup(more); /* No need to check for SUCCESS, we did that above. */ if (!APR_STATUS_IS_EAGAIN(rv)) { c->aborted = 1; return APR_ECONNABORTED; } return APR_SUCCESS; } b = more; more = NULL; } /* end while () */ return APR_SUCCESS; }
apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b) { #define ASCII_CRLF "\015\012" #define ASCII_ZERO "\060" conn_rec *c = f->r->connection; apr_bucket_brigade *more; apr_bucket *e; apr_status_t rv; for (more = NULL; b; b = more, more = NULL) { apr_off_t bytes = 0; apr_bucket *eos = NULL; apr_bucket *flush = NULL; /* XXX: chunk_hdr must remain at this scope since it is used in a * transient bucket. */ char chunk_hdr[20]; /* enough space for the snprintf below */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if (APR_BUCKET_IS_EOS(e)) { /* there shouldn't be anything after the eos */ eos = e; break; } if (AP_BUCKET_IS_ERROR(e) && (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY)) { /* * We had a broken backend. Memorize this in the filter * context. */ f->ctx = &bad_gateway_seen; continue; } if (APR_BUCKET_IS_FLUSH(e)) { flush = e; if (e != APR_BRIGADE_LAST(b)) { more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); } break; } else if (e->length == (apr_size_t)-1) { /* unknown amount of data (e.g. a pipe) */ const char *data; apr_size_t len; rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { return rv; } if (len > 0) { /* * There may be a new next bucket representing the * rest of the data stream on which a read() may * block so we pass down what we have so far. */ bytes += len; more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); break; } else { /* If there was nothing in this bucket then we can * safely move on to the next one without pausing * to pass down what we have counted up so far. */ continue; } } else { bytes += e->length; } } /* * XXX: if there aren't very many bytes at this point it may * be a good idea to set them aside and return for more, * unless we haven't finished counting this brigade yet. */ /* if there are content bytes, then wrap them in a chunk */ if (bytes > 0) { apr_size_t hdr_len; /* * Insert the chunk header, specifying the number of bytes in * the chunk. */ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes); ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); e = apr_bucket_transient_create(chunk_hdr, hdr_len, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); /* * Insert the end-of-chunk CRLF before an EOS or * FLUSH bucket, or appended to the brigade */ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc); if (eos != NULL) { APR_BUCKET_INSERT_BEFORE(eos, e); } else if (flush != NULL) { APR_BUCKET_INSERT_BEFORE(flush, e); } else { APR_BRIGADE_INSERT_TAIL(b, e); } } /* RFC 2616, Section 3.6.1 * * If there is an EOS bucket, then prefix it with: * 1) the last-chunk marker ("0" CRLF) * 2) the trailer * 3) the end-of-chunked body CRLF * * We only do this if we have not seen an error bucket with * status HTTP_BAD_GATEWAY. We have memorized an * error bucket that we had seen in the filter context. * The error bucket with status HTTP_BAD_GATEWAY indicates that the * connection to the backend (mod_proxy) broke in the middle of the * response. In order to signal the client that something went wrong * we do not create the last-chunk marker and set c->keepalive to * AP_CONN_CLOSE in the core output filter. * * XXX: it would be nice to combine this with the end-of-chunk * marker above, but this is a bit more straight-forward for * now. */ if (eos && !f->ctx) { /* XXX: (2) trailers ... does not yet exist */ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF /* <trailers> */ ASCII_CRLF, 5, c->bucket_alloc); APR_BUCKET_INSERT_BEFORE(eos, e); } /* pass the brigade to the next filter. */ rv = ap_pass_brigade(f->next, b); if (rv != APR_SUCCESS || eos != NULL) { return rv; } } return APR_SUCCESS; }
APU_DECLARE(apr_status_t) apr_brigade_writev(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const struct iovec *vec, apr_size_t nvec) { apr_bucket *e; apr_size_t total_len; apr_size_t i; char *buf; /* Compute the total length of the data to be written. */ total_len = 0; for (i = 0; i < nvec; i++) { total_len += vec[i].iov_len; } /* If the data to be written is very large, try to convert * the iovec to transient buckets rather than copying. */ if (total_len > APR_BUCKET_BUFF_SIZE) { if (flush) { for (i = 0; i < nvec; i++) { e = apr_bucket_transient_create(vec[i].iov_base, vec[i].iov_len, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); } return flush(b, ctx); } else { for (i = 0; i < nvec; i++) { e = apr_bucket_heap_create((const char *) vec[i].iov_base, vec[i].iov_len, NULL, b->bucket_alloc); APR_BRIGADE_INSERT_TAIL(b, e); } return APR_SUCCESS; } } i = 0; /* If there is a heap bucket at the end of the brigade * already, and its refcount is 1, copy into the existing bucket. */ e = APR_BRIGADE_LAST(b); if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e) && ((apr_bucket_heap *)(e->data))->refcount.refcount == 1) { apr_bucket_heap *h = e->data; apr_size_t remaining = h->alloc_len - (e->length + (apr_size_t)e->start); buf = h->base + e->start + e->length; if (remaining >= total_len) { /* Simple case: all the data will fit in the * existing heap bucket */ for (; i < nvec; i++) { apr_size_t len = vec[i].iov_len; memcpy(buf, (const void *) vec[i].iov_base, len); buf += len; } e->length += total_len; return APR_SUCCESS; } else { /* More complicated case: not all of the data * will fit in the existing heap bucket. The * total data size is <= APR_BUCKET_BUFF_SIZE, * so we'll need only one additional bucket. */ const char *start_buf = buf; for (; i < nvec; i++) { apr_size_t len = vec[i].iov_len; if (len > remaining) { break; } memcpy(buf, (const void *) vec[i].iov_base, len); buf += len; remaining -= len; } e->length += (buf - start_buf); total_len -= (buf - start_buf); if (flush) { apr_status_t rv = flush(b, ctx); if (rv != APR_SUCCESS) { return rv; } } /* Now fall through into the case below to * allocate another heap bucket and copy the * rest of the array. (Note that i is not * reset to zero here; it holds the index * of the first vector element to be * written to the new bucket.) */ } } /* Allocate a new heap bucket, and copy the data into it. * The checks above ensure that the amount of data to be * written here is no larger than APR_BUCKET_BUFF_SIZE. */ buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc); e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE, apr_bucket_free, b->bucket_alloc); for (; i < nvec; i++) { apr_size_t len = vec[i].iov_len; memcpy(buf, (const void *) vec[i].iov_base, len); buf += len; } e->length = total_len; APR_BRIGADE_INSERT_TAIL(b, e); return APR_SUCCESS; }
static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *bb) { apr_bucket *e; apr_status_t rv; disk_cache_object_t *dobj = (disk_cache_object_t *) h->cache_obj->vobj; disk_cache_conf *conf = ap_get_module_config(r->server->module_config, &disk_cache_module); /* We write to a temp file and then atomically rename the file over * in file_cache_el_final(). */ if (!dobj->tfd) { rv = apr_file_mktemp(&dobj->tfd, dobj->tempfile, APR_CREATE | APR_WRITE | APR_BINARY | APR_BUFFERED | APR_EXCL, r->pool); if (rv != APR_SUCCESS) { return rv; } dobj->file_size = 0; } for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { const char *str; apr_size_t length, written; rv = apr_bucket_read(e, &str, &length, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, "cache_disk: Error when reading bucket for URL %s", h->cache_obj->key); /* Remove the intermediate cache file and return non-APR_SUCCESS */ file_cache_errorcleanup(dobj, r); return rv; } rv = apr_file_write_full(dobj->tfd, str, length, &written); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, "cache_disk: Error when writing cache file for URL %s", h->cache_obj->key); /* Remove the intermediate cache file and return non-APR_SUCCESS */ file_cache_errorcleanup(dobj, r); return rv; } dobj->file_size += written; if (dobj->file_size > conf->maxfs) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "cache_disk: URL %s failed the size check " "(%" APR_OFF_T_FMT " > %" APR_OFF_T_FMT ")", h->cache_obj->key, dobj->file_size, conf->maxfs); /* Remove the intermediate cache file and return non-APR_SUCCESS */ file_cache_errorcleanup(dobj, r); return APR_EGENERAL; } } /* Was this the final bucket? If yes, close the temp file and perform * sanity checks. */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) { if (r->connection->aborted || r->no_cache) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server, "disk_cache: Discarding body for URL %s " "because connection has been aborted.", h->cache_obj->key); /* Remove the intermediate cache file and return non-APR_SUCCESS */ file_cache_errorcleanup(dobj, r); return APR_EGENERAL; } if (dobj->file_size < conf->minfs) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "cache_disk: URL %s failed the size check " "(%" APR_OFF_T_FMT " < %" APR_OFF_T_FMT ")", h->cache_obj->key, dobj->file_size, conf->minfs); /* Remove the intermediate cache file and return non-APR_SUCCESS */ file_cache_errorcleanup(dobj, r); return APR_EGENERAL; } /* All checks were fine. Move tempfile to final destination */ /* Link to the perm file, and close the descriptor */ file_cache_el_final(dobj, r); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "disk_cache: Body for URL %s cached.", dobj->name); } return APR_SUCCESS; }
static apr_status_t rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_status_t rv = APR_SUCCESS; rl_ctx_t *ctx = f->ctx; apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc; /* Set up our rl_ctx_t on first use */ if (ctx == NULL) { const char *rl = NULL; int ratelimit; int burst = 0; /* no subrequests. */ if (f->r->main != NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: rate limit */ rl = apr_table_get(f->r->subprocess_env, "rate-limit"); if (rl == NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* rl is in kilo bytes / second */ ratelimit = atoi(rl) * 1024; if (ratelimit <= 0) { /* remove ourselves */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03488) "rl: disabling: rate-limit = %s (too high?)", rl); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: optional initial burst */ rl = apr_table_get(f->r->subprocess_env, "rate-initial-burst"); if (rl != NULL) { burst = atoi(rl) * 1024; if (burst <= 0) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03489) "rl: disabling burst: rate-initial-burst = %s (too high?)", rl); burst = 0; } } /* Set up our context */ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t)); f->ctx = ctx; ctx->state = RATE_LIMIT; ctx->speed = ratelimit; ctx->burst = burst; ctx->do_sleep = 0; /* calculate how many bytes / interval we want to send */ /* speed is bytes / second, so, how many (speed / 1000 % interval) */ ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS)); ctx->tmpbb = apr_brigade_create(f->r->pool, ba); ctx->holdingbb = apr_brigade_create(f->r->pool, ba); } else { APR_BRIGADE_PREPEND(bb, ctx->holdingbb); } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *e; if (ctx->state == RATE_FULLSPEED) { /* Find where we 'stop' going full speed. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_END(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_LIMIT; break; } } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, e); rv = ap_pass_brigade(f->next, bb); apr_brigade_cleanup(bb); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455) "rl: full speed brigade pass failed."); return rv; } } else { for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_START(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_FULLSPEED; break; } } while (!APR_BRIGADE_EMPTY(bb)) { apr_off_t len = ctx->chunk_size + ctx->burst; APR_BRIGADE_CONCAT(ctx->tmpbb, bb); /* * Pull next chunk of data; the initial amount is our * burst allotment (if any) plus a chunk. All subsequent * iterations are just chunks with whatever remaining * burst amounts we have left (in case not done in the * first bucket). */ rv = apr_brigade_partition(ctx->tmpbb, len, &e); if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456) "rl: partition failed."); return rv; } /* Send next metadata now if any */ while (e != APR_BRIGADE_SENTINEL(ctx->tmpbb) && APR_BUCKET_IS_METADATA(e)) { e = APR_BUCKET_NEXT(e); } if (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)) { apr_brigade_split_ex(ctx->tmpbb, e, bb); } else { apr_brigade_length(ctx->tmpbb, 1, &len); } /* * Adjust the burst amount depending on how much * we've done up to now. */ if (ctx->burst) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, APLOGNO(03485) "rl: burst %d; len %"APR_OFF_T_FMT, ctx->burst, len); if (len < ctx->burst) { ctx->burst -= len; } else { ctx->burst = 0; } } e = APR_BRIGADE_LAST(ctx->tmpbb); if (APR_BUCKET_IS_EOS(e)) { ap_remove_output_filter(f); } else if (!APR_BUCKET_IS_FLUSH(e)) { if (APR_BRIGADE_EMPTY(bb)) { /* Wait for more (or next call) */ break; } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e); } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); brigade_dump(f->r, bb); #endif /* RLFDEBUG */ if (ctx->do_sleep) { apr_sleep(RATE_INTERVAL_MS * 1000); } else { ctx->do_sleep = 1; } rv = ap_pass_brigade(f->next, ctx->tmpbb); apr_brigade_cleanup(ctx->tmpbb); if (rv != APR_SUCCESS) { /* Most often, user disconnects from stream */ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457) "rl: brigade pass failed."); return rv; } } } if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) { /* Any rate-limited data in tmpbb is sent unlimited along * with the rest. */ APR_BRIGADE_CONCAT(bb, ctx->tmpbb); APR_BRIGADE_CONCAT(bb, ctx->holdingbb); } } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); #endif /* RLFDEBUG */ /* Save remaining tmpbb with the correct lifetime for the next call */ return ap_save_brigade(f, &ctx->holdingbb, &ctx->tmpbb, f->r->pool); }