static void test_partition(abts_case *tc, void *data) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb = apr_brigade_create(p, ba); apr_bucket *e; e = apr_bucket_immortal_create(hello, strlen(hello), ba); APR_BRIGADE_INSERT_HEAD(bb, e); apr_assert_success(tc, "partition brigade", apr_brigade_partition(bb, 5, &e)); test_bucket_content(tc, APR_BRIGADE_FIRST(bb), "hello", 5); test_bucket_content(tc, APR_BRIGADE_LAST(bb), ", world", 7); ABTS_ASSERT(tc, "partition returns APR_INCOMPLETE", apr_brigade_partition(bb, 8192, &e)); ABTS_ASSERT(tc, "APR_INCOMPLETE partition returned sentinel", e == APR_BRIGADE_SENTINEL(bb)); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static ssize_t write_flush(mgs_handle_t * ctxt) { apr_bucket *e; if (!(ctxt->output_blen || ctxt->output_length)) { ctxt->output_rc = APR_SUCCESS; return 1; } if (ctxt->output_blen) { e = apr_bucket_transient_create(ctxt->output_buffer, ctxt->output_blen, ctxt->output_bb-> bucket_alloc); /* we filled this buffer first so add it to the * * head of the brigade * */ APR_BRIGADE_INSERT_HEAD(ctxt->output_bb, e); ctxt->output_blen = 0; } ctxt->output_length = 0; e = apr_bucket_flush_create(ctxt->output_bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctxt->output_bb, e); ctxt->output_rc = ap_pass_brigade(ctxt->output_filter->next, ctxt->output_bb); /* clear the brigade to be ready for next time */ apr_brigade_cleanup(ctxt->output_bb); return (ctxt->output_rc == APR_SUCCESS) ? 1 : -1; }
static int process_fortune_connection(conn_rec *c) { apr_status_t rv; apr_procattr_t *pattr; apr_pool_t *p = c->pool; apr_bucket *b; apr_bucket_brigade *bb; const char *err_msg = "200 OK\n"; fortune_conf_t *fconf = ap_get_module_config(c->base_server->module_config, &fortune_module); if (!fconf->enabled) { return DECLINED; } bb = apr_brigade_create(p, c->bucket_alloc); /* prepare process attribute */ if ((rv = apr_procattr_create(&pattr, c->pool)) != APR_SUCCESS) { goto error; } if ((rv = apr_procattr_io_set(pattr, APR_NO_PIPE, APR_FULL_BLOCK, APR_NO_PIPE)) != APR_SUCCESS) { goto error; } /* default value: APR_PROGRAM */ if ((rv = apr_procattr_cmdtype_set(pattr, APR_PROGRAM_ENV)) != APR_SUCCESS) { goto error; } /* run the program and read the output from the pipe */ if ((rv = fortune_process(c, pattr, bb)) != APR_SUCCESS) { apr_brigade_cleanup(bb); } error: if (rv != APR_SUCCESS) { err_msg = "500 ERROR\n"; } b = apr_bucket_pool_create(err_msg, strlen(err_msg), p, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, b); b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); rv = ap_pass_brigade(c->output_filters, bb); return OK; }
static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb) { apr_bucket *e; disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj; e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p, bb->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, e); e = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); return APR_SUCCESS; }
apr_status_t winnt_insert_network_bucket(conn_rec *c, apr_bucket_brigade *bb, apr_socket_t *socket) { apr_bucket *e; winnt_conn_ctx_t *context = ap_get_module_config(c->conn_config, &mpm_winnt_module); if (context == NULL || (e = context->overlapped.Pointer) == NULL) return AP_DECLINED; /* seed the brigade with AcceptEx read heap bucket */ APR_BRIGADE_INSERT_HEAD(bb, e); /* also seed the brigade with the client socket. */ e = apr_bucket_socket_create(socket, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); return APR_SUCCESS; }
static void test_simple(abts_case *tc, void *data) { apr_bucket_alloc_t *ba; apr_bucket_brigade *bb; apr_bucket *fb, *tb; ba = apr_bucket_alloc_create(p); bb = apr_brigade_create(p, ba); fb = APR_BRIGADE_FIRST(bb); ABTS_ASSERT(tc, "first bucket of empty brigade is sentinel", fb == APR_BRIGADE_SENTINEL(bb)); fb = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_HEAD(bb, fb); ABTS_ASSERT(tc, "first bucket of brigade is flush", APR_BRIGADE_FIRST(bb) == fb); ABTS_ASSERT(tc, "bucket after flush is sentinel", APR_BUCKET_NEXT(fb) == APR_BRIGADE_SENTINEL(bb)); tb = apr_bucket_transient_create("aaa", 3, ba); APR_BUCKET_INSERT_BEFORE(fb, tb); ABTS_ASSERT(tc, "bucket before flush now transient", APR_BUCKET_PREV(fb) == tb); ABTS_ASSERT(tc, "bucket after transient is flush", APR_BUCKET_NEXT(tb) == fb); ABTS_ASSERT(tc, "bucket before transient is sentinel", APR_BUCKET_PREV(tb) == APR_BRIGADE_SENTINEL(bb)); apr_brigade_cleanup(bb); ABTS_ASSERT(tc, "cleaned up brigade was empty", APR_BRIGADE_EMPTY(bb)); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
/** * Handles outgoing data. If the filter state indicates that a cross-domain * policy should be sent then it is added to the outgoing brigade of data. If * a policy request was not detected, then this filter makes no changes to * the outgoing data. * * @param f the output filter. * @param bb the outgoing brigade of data. * * @return APR_SUCCESS on success, some other status on error. */ static int fsp_output_filter(ap_filter_t* f, apr_bucket_brigade* bb) { apr_status_t rval = APR_SUCCESS; filter_state* state = f->ctx; if(state->found) { // found policy-file-request, add response bucket // bucket is immortal because the data is stored in the configuration // and doesn't need to be copied apr_bucket* head = apr_bucket_immortal_create( state->cfg->policy, state->cfg->policy_length, bb->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, head); } if(rval == APR_SUCCESS) { // pass brigade to next filter rval = ap_pass_brigade(f->next, bb); } return rval; }
apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b) { #define ASCII_CRLF "\015\012" #define ASCII_ZERO "\060" conn_rec *c = f->r->connection; apr_bucket_brigade *more; apr_bucket *e; apr_status_t rv; for (more = NULL; b; b = more, more = NULL) { apr_off_t bytes = 0; apr_bucket *eos = NULL; apr_bucket *flush = NULL; /* XXX: chunk_hdr must remain at this scope since it is used in a * transient bucket. */ char chunk_hdr[20]; /* enough space for the snprintf below */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if (APR_BUCKET_IS_EOS(e)) { /* there shouldn't be anything after the eos */ eos = e; break; } if (AP_BUCKET_IS_ERROR(e) && (((ap_bucket_error *)(e->data))->status == HTTP_BAD_GATEWAY)) { /* * We had a broken backend. Memorize this in the filter * context. */ f->ctx = &bad_gateway_seen; continue; } if (APR_BUCKET_IS_FLUSH(e)) { flush = e; more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); break; } else if (e->length == (apr_size_t)-1) { /* unknown amount of data (e.g. a pipe) */ const char *data; apr_size_t len; rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { return rv; } if (len > 0) { /* * There may be a new next bucket representing the * rest of the data stream on which a read() may * block so we pass down what we have so far. */ bytes += len; more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); break; } else { /* If there was nothing in this bucket then we can * safely move on to the next one without pausing * to pass down what we have counted up so far. */ continue; } } else { bytes += e->length; } } /* * XXX: if there aren't very many bytes at this point it may * be a good idea to set them aside and return for more, * unless we haven't finished counting this brigade yet. */ /* if there are content bytes, then wrap them in a chunk */ if (bytes > 0) { apr_size_t hdr_len; /* * Insert the chunk header, specifying the number of bytes in * the chunk. */ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes); ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); e = apr_bucket_transient_create(chunk_hdr, hdr_len, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); /* * Insert the end-of-chunk CRLF before an EOS or * FLUSH bucket, or appended to the brigade */ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc); if (eos != NULL) { APR_BUCKET_INSERT_BEFORE(eos, e); } else if (flush != NULL) { APR_BUCKET_INSERT_BEFORE(flush, e); } else { APR_BRIGADE_INSERT_TAIL(b, e); } } /* RFC 2616, Section 3.6.1 * * If there is an EOS bucket, then prefix it with: * 1) the last-chunk marker ("0" CRLF) * 2) the trailer * 3) the end-of-chunked body CRLF * * We only do this if we have not seen an error bucket with * status HTTP_BAD_GATEWAY. We have memorized an * error bucket that we had seen in the filter context. * The error bucket with status HTTP_BAD_GATEWAY indicates that the * connection to the backend (mod_proxy) broke in the middle of the * response. In order to signal the client that something went wrong * we do not create the last-chunk marker and set c->keepalive to * AP_CONN_CLOSE in the core output filter. * * XXX: it would be nice to combine this with the end-of-chunk * marker above, but this is a bit more straight-forward for * now. */ if (eos && !f->ctx) { /* XXX: (2) trailers ... does not yet exist */ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF /* <trailers> */ ASCII_CRLF, 5, c->bucket_alloc); APR_BUCKET_INSERT_BEFORE(eos, e); } /* pass the brigade to the next filter. */ rv = ap_pass_brigade(f->next, b); if (rv != APR_SUCCESS || eos != NULL) { return rv; } } return APR_SUCCESS; }
/** * Process input stream */ static apr_status_t helocon_filter_in(ap_filter_t *f, apr_bucket_brigade *b, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { conn_rec *c = f->c; my_ctx *ctx = f->ctx; // Fail quickly if the connection has already been aborted. if (c->aborted) { apr_brigade_cleanup(b); return APR_ECONNABORTED; } // Fast passthrough if (ctx->phase == PHASE_DONE) { return ap_get_brigade(f->next, b, mode, block, readbytes); } // Process Head do { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif if (APR_BRIGADE_EMPTY(b)) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif apr_status_t s = ap_get_brigade(f->next, b, ctx->mode, APR_BLOCK_READ, ctx->need); if (s != APR_SUCCESS) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (fail)(1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return s; } } if (ctx->phase == PHASE_DONE) { return APR_SUCCESS; } if (APR_BRIGADE_EMPTY(b)) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (empty)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return APR_SUCCESS; } apr_bucket *e = NULL; for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if (e->type == NULL) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (type=NULL)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return APR_SUCCESS; } // We need more data if (ctx->need > 0) { const char *str = NULL; apr_size_t length = 0; apr_status_t s = apr_bucket_read(e, &str, &length, APR_BLOCK_READ); if (s != APR_SUCCESS) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (fail)(2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length); #endif return s; } #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (3)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length); #endif if (length > 0) { if ((ctx->offset + length) > PROXY_MAX_LENGTH) { // Overflow ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header overflow from=%s to port=%d length=%" APR_OFF_T_FMT, _CLIENT_IP, c->local_addr->port, (ctx->offset + length)); goto ABORT_CONN2; } memcpy(ctx->buf + ctx->offset, str, length); if (ctx->pad != ctx->magic) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in padding magic fail (bad=%d vs good=%d)", ctx->pad, ctx->magic); goto ABORT_CONN; } ctx->offset += length; ctx->recv += length; ctx->need -= length; ctx->buf[ctx->offset] = 0; // delete HEAD if (e->length > length) { apr_bucket_split(e, length); } } apr_bucket_delete(e); if (length == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG bucket flush=%d meta=%d", APR_BUCKET_IS_FLUSH(e) ? 1 : 0, APR_BUCKET_IS_METADATA(e) ? 1 : 0); #endif continue; } } // Handle GETLINE mode if (ctx->mode == AP_MODE_GETLINE) { if ((ctx->need > 0) && (ctx->recv > 2)) { char *end = memchr(ctx->buf, '\r', ctx->offset - 1); if (end) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: GETLINE OK"); #endif if ((end[0] == '\r') && (end[1] == '\n')) { ctx->need = 0; } } } } if (ctx->need <= 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d (4)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase); #endif switch (ctx->phase) { case PHASE_WANT_HEAD: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "HEAD", ctx->buf); #endif // TEST Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST CHECK"); #endif if (strncmp(TEST, ctx->buf, 4) == 0) { apr_socket_t *csd = ap_get_module_config(c->conn_config, &core_module); apr_size_t length = strlen(TEST_RES_OK); apr_socket_send(csd, TEST_RES_OK, &length); apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE); apr_socket_close(csd); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST OK"); #endif // No need to check for SUCCESS, we did that above c->aborted = 1; apr_brigade_cleanup(b); return APR_ECONNABORTED; } // HELO Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO CHECK"); #endif if (strncmp(HELO, ctx->buf, 4) == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO OK"); #endif ctx->phase = PHASE_WANT_BINIP; ctx->mode = AP_MODE_READBYTES; ctx->need = 4; ctx->recv = 0; break; } // PROXY Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY CHECK"); #endif if (strncmp(PROXY, ctx->buf, 4) == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY OK"); #endif ctx->phase = PHASE_WANT_LINE; ctx->mode = AP_MODE_GETLINE; ctx->need = PROXY_MAX_LENGTH - ctx->offset; ctx->recv = 0; break; } // ELSE... GET / POST / etc ctx->phase = PHASE_DONE; #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (1) size=%" APR_OFF_T_FMT, _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->offset); #endif // Restore original data if (ctx->offset) { e = apr_bucket_heap_create(ctx->buf, ctx->offset, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); goto END_CONN; } break; } case PHASE_WANT_BINIP: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "BINIP"); #endif // REWRITE CLIENT IP const char *new_ip = fromBinIPtoString(c->pool, ctx->buf+4); if (!new_ip) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: HELO+IP invalid"); goto ABORT_CONN; } apr_table_set(c->notes, NOTE_REWRITE_IP, new_ip); ctx->phase = PHASE_DONE; #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newip=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, new_ip); #endif break; } case PHASE_WANT_LINE: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "LINE", ctx->buf); #endif ctx->phase = PHASE_DONE; char *end = memchr(ctx->buf, '\r', ctx->offset - 1); if (!end) { goto ABORT_CONN; } if ((end[0] != '\r') || (end[1] != '\n')) { goto ABORT_CONN; } if (!process_proxy_header(f)) { goto ABORT_CONN; } // Restore original data int count = (ctx->offset - ((end - ctx->buf) + 2)); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (2) size=%d rest=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, count, end + 2); #endif if (count > 0) { e = apr_bucket_heap_create(end + 2, count, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); goto END_CONN; } break; } } if (ctx->phase == PHASE_DONE) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d (DONE)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase); #endif ctx->mode = mode; ctx->need = 0; ctx->recv = 0; } break; } } } while (ctx->phase != PHASE_DONE); END_CONN: return ap_get_brigade(f->next, b, mode, block, readbytes); ABORT_CONN: ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header invalid from=%s to port=%d", _CLIENT_IP, c->local_addr->port); ABORT_CONN2: c->aborted = 1; apr_brigade_cleanup(b); return APR_ECONNABORTED; }
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b) { apr_status_t rv; apr_bucket_brigade *more; conn_rec *c = f->c; core_net_rec *net = f->ctx; core_output_filter_ctx_t *ctx = net->out_ctx; apr_read_type_e eblock = APR_NONBLOCK_READ; apr_pool_t *input_pool = b->p; /* Fail quickly if the connection has already been aborted. */ if (c->aborted) { apr_brigade_cleanup(b); return APR_ECONNABORTED; } if (ctx == NULL) { ctx = apr_pcalloc(c->pool, sizeof(*ctx)); net->out_ctx = ctx; } /* If we have a saved brigade, concatenate the new brigade to it */ if (ctx->b) { APR_BRIGADE_CONCAT(ctx->b, b); b = ctx->b; ctx->b = NULL; } /* Perform multiple passes over the brigade, sending batches of output to the connection. */ while (b && !APR_BRIGADE_EMPTY(b)) { apr_size_t nbytes = 0; apr_bucket *last_e = NULL; /* initialized for debugging */ apr_bucket *e; /* one group of iovecs per pass over the brigade */ apr_size_t nvec = 0; apr_size_t nvec_trailers = 0; struct iovec vec[MAX_IOVEC_TO_WRITE]; struct iovec vec_trailers[MAX_IOVEC_TO_WRITE]; /* one file per pass over the brigade */ apr_file_t *fd = NULL; apr_size_t flen = 0; apr_off_t foffset = 0; /* keep track of buckets that we've concatenated * to avoid small writes */ apr_bucket *last_merged_bucket = NULL; /* tail of brigade if we need another pass */ more = NULL; /* Iterate over the brigade: collect iovecs and/or a file */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { /* keep track of the last bucket processed */ last_e = e; if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) { break; } else if (APR_BUCKET_IS_FLUSH(e)) { if (e != APR_BRIGADE_LAST(b)) { more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); } break; } /* It doesn't make any sense to use sendfile for a file bucket * that represents 10 bytes. */ else if (APR_BUCKET_IS_FILE(e) && (e->length >= AP_MIN_SENDFILE_BYTES)) { apr_bucket_file *a = e->data; /* We can't handle more than one file bucket at a time * so we split here and send the file we have already * found. */ if (fd) { more = apr_brigade_split(b, e); break; } fd = a->fd; flen = e->length; foffset = e->start; } else { const char *str; apr_size_t n; rv = apr_bucket_read(e, &str, &n, eblock); if (APR_STATUS_IS_EAGAIN(rv)) { /* send what we have so far since we shouldn't expect more * output for a while... next time we read, block */ more = apr_brigade_split(b, e); eblock = APR_BLOCK_READ; break; } eblock = APR_NONBLOCK_READ; if (n) { if (!fd) { if (nvec == MAX_IOVEC_TO_WRITE) { /* woah! too many. buffer them up, for use later. */ apr_bucket *temp, *next; apr_bucket_brigade *temp_brig; if (nbytes >= AP_MIN_BYTES_TO_WRITE) { /* We have enough data in the iovec * to justify doing a writev */ more = apr_brigade_split(b, e); break; } /* Create a temporary brigade as a means * of concatenating a bunch of buckets together */ temp_brig = apr_brigade_create(f->c->pool, f->c->bucket_alloc); if (last_merged_bucket) { /* If we've concatenated together small * buckets already in a previous pass, * the initial buckets in this brigade * are heap buckets that may have extra * space left in them (because they * were created by apr_brigade_write()). * We can take advantage of this by * building the new temp brigade out of * these buckets, so that the content * in them doesn't have to be copied again. */ APR_BRIGADE_PREPEND(b, temp_brig); brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket)); } temp = APR_BRIGADE_FIRST(b); while (temp != e) { apr_bucket *d; rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ); apr_brigade_write(temp_brig, NULL, NULL, str, n); d = temp; temp = APR_BUCKET_NEXT(temp); apr_bucket_delete(d); } nvec = 0; nbytes = 0; temp = APR_BRIGADE_FIRST(temp_brig); APR_BUCKET_REMOVE(temp); APR_BRIGADE_INSERT_HEAD(b, temp); apr_bucket_read(temp, &str, &n, APR_BLOCK_READ); vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; /* Just in case the temporary brigade has * multiple buckets, recover the rest of * them and put them in the brigade that * we're sending. */ for (next = APR_BRIGADE_FIRST(temp_brig); next != APR_BRIGADE_SENTINEL(temp_brig); next = APR_BRIGADE_FIRST(temp_brig)) { APR_BUCKET_REMOVE(next); APR_BUCKET_INSERT_AFTER(temp, next); temp = next; apr_bucket_read(next, &str, &n, APR_BLOCK_READ); vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; } apr_brigade_destroy(temp_brig); last_merged_bucket = temp; e = temp; last_e = e; } else { vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; } } else { /* The bucket is a trailer to a file bucket */ if (nvec_trailers == MAX_IOVEC_TO_WRITE) { /* woah! too many. stop now. */ more = apr_brigade_split(b, e); break; } vec_trailers[nvec_trailers].iov_base = (char*) str; vec_trailers[nvec_trailers].iov_len = n; nvec_trailers++; } nbytes += n; } } } /* Completed iterating over the brigade, now determine if we want * to buffer the brigade or send the brigade out on the network. * * Save if we haven't accumulated enough bytes to send, the connection * is not about to be closed, and: * * 1) we didn't see a file, we don't have more passes over the * brigade to perform, AND we didn't stop at a FLUSH bucket. * (IOW, we will save plain old bytes such as HTTP headers) * or * 2) we hit the EOS and have a keep-alive connection * (IOW, this response is a bit more complex, but we save it * with the hope of concatenating with another response) */ if (nbytes + flen < AP_MIN_BYTES_TO_WRITE && !AP_BUCKET_IS_EOC(last_e) && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e)) || (APR_BUCKET_IS_EOS(last_e) && c->keepalive == AP_CONN_KEEPALIVE))) { /* NEVER save an EOS in here. If we are saving a brigade with * an EOS bucket, then we are doing keepalive connections, and * we want to process to second request fully. */ if (APR_BUCKET_IS_EOS(last_e)) { apr_bucket *bucket; int file_bucket_saved = 0; apr_bucket_delete(last_e); for (bucket = APR_BRIGADE_FIRST(b); bucket != APR_BRIGADE_SENTINEL(b); bucket = APR_BUCKET_NEXT(bucket)) { /* Do a read on each bucket to pull in the * data from pipe and socket buckets, so * that we don't leave their file descriptors * open indefinitely. Do the same for file * buckets, with one exception: allow the * first file bucket in the brigade to remain * a file bucket, so that we don't end up * doing an mmap+memcpy every time a client * requests a <8KB file over a keepalive * connection. */ if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) { file_bucket_saved = 1; } else { const char *buf; apr_size_t len = 0; rv = apr_bucket_read(bucket, &buf, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, "core_output_filter:" " Error reading from bucket."); return HTTP_INTERNAL_SERVER_ERROR; } } } } if (!ctx->deferred_write_pool) { apr_pool_create(&ctx->deferred_write_pool, c->pool); apr_pool_tag(ctx->deferred_write_pool, "deferred_write"); } ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool); return APR_SUCCESS; } if (fd) { apr_hdtr_t hdtr; apr_size_t bytes_sent; #if APR_HAS_SENDFILE apr_int32_t flags = 0; #endif memset(&hdtr, '\0', sizeof(hdtr)); if (nvec) { hdtr.numheaders = nvec; hdtr.headers = vec; } if (nvec_trailers) { hdtr.numtrailers = nvec_trailers; hdtr.trailers = vec_trailers; } #if APR_HAS_SENDFILE if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) { if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) { /* Prepare the socket to be reused */ flags |= APR_SENDFILE_DISCONNECT_SOCKET; } rv = sendfile_it_all(net, /* the network information */ fd, /* the file to send */ &hdtr, /* header and trailer iovecs */ foffset, /* offset in the file to begin sending from */ flen, /* length of file */ nbytes + flen, /* total length including headers */ &bytes_sent, /* how many bytes were sent */ flags); /* apr_sendfile flags */ } else #endif { rv = emulate_sendfile(net, fd, &hdtr, foffset, flen, &bytes_sent); } if (logio_add_bytes_out && bytes_sent > 0) logio_add_bytes_out(c, bytes_sent); fd = NULL; } else { apr_size_t bytes_sent; rv = writev_it_all(net->client_socket, vec, nvec, nbytes, &bytes_sent); if (logio_add_bytes_out && bytes_sent > 0) logio_add_bytes_out(c, bytes_sent); } apr_brigade_cleanup(b); /* drive cleanups for resources which were set aside * this may occur before or after termination of the request which * created the resource */ if (ctx->deferred_write_pool) { if (more && more->p == ctx->deferred_write_pool) { /* "more" belongs to the deferred_write_pool, * which is about to be cleared. */ if (APR_BRIGADE_EMPTY(more)) { more = NULL; } else { /* uh oh... change more's lifetime * to the input brigade's lifetime */ apr_bucket_brigade *tmp_more = more; more = NULL; ap_save_brigade(f, &more, &tmp_more, input_pool); } } apr_pool_clear(ctx->deferred_write_pool); } if (rv != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, "core_output_filter: writing data to the network"); if (more) apr_brigade_cleanup(more); /* No need to check for SUCCESS, we did that above. */ if (!APR_STATUS_IS_EAGAIN(rv)) { c->aborted = 1; return APR_ECONNABORTED; } return APR_SUCCESS; } b = more; more = NULL; } /* end while () */ return APR_SUCCESS; }
static apr_status_t xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { apr_status_t rv; charset_req_t *reqinfo = ap_get_module_config(f->r->request_config, &charset_lite_module); charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config, &charset_lite_module); charset_filter_ctx_t *ctx = f->ctx; apr_size_t buffer_size; int hit_eos; if (!ctx) { /* this is SetInputFilter path; grab the preallocated context, * if any; note that if we decided not to do anything in an earlier * handler, we won't even have a reqinfo */ if (reqinfo) { ctx = f->ctx = reqinfo->input_ctx; reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice * in the filter chain; we can't have two * instances using the same context */ } if (!ctx) { /* no idea how to translate; don't do anything */ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t)); ctx->dc = dc; ctx->noop = 1; } } ap_log_rerror(APLOG_MARK, APLOG_TRACE6, 0, f->r, "xlate_in_filter() - " "charset_source: %s charset_default: %s", dc && dc->charset_source ? dc->charset_source : "(none)", dc && dc->charset_default ? dc->charset_default : "(none)"); if (!ctx->ran) { /* filter never ran before */ chk_filter_chain(f); ctx->ran = 1; if (!ctx->noop && !ctx->is_sb && apr_table_get(f->r->headers_in, "Content-Length")) { /* A Content-Length header is present, but it won't be valid after * conversion because we're not converting between two single-byte * charsets. This will affect most CGI scripts and may affect * some modules. * Content-Length can't be unset here because that would break * being able to read the request body. * Processing of chunked request bodies is not impacted by this * filter since the the length was not declared anyway. */ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, f->r, "Request body length may change, resulting in " "misprocessing by some modules or scripts"); } } if (ctx->noop) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } if (APR_BRIGADE_EMPTY(ctx->bb)) { if ((rv = ap_get_brigade(f->next, bb, mode, block, readbytes)) != APR_SUCCESS) { return rv; } } else { APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */ } buffer_size = INPUT_XLATE_BUF_SIZE; rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos); if (rv == APR_SUCCESS) { if (!hit_eos) { /* move anything leftover into our context for next time; * we don't currently "set aside" since the data came from * down below, but I suspect that for long-term we need to * do that */ APR_BRIGADE_CONCAT(ctx->bb, bb); } if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */ apr_bucket *e; e = apr_bucket_heap_create(ctx->tmp, INPUT_XLATE_BUF_SIZE - buffer_size, NULL, f->r->connection->bucket_alloc); /* make sure we insert at the head, because there may be * an eos bucket already there, and the eos bucket should * come after the data */ APR_BRIGADE_INSERT_HEAD(bb, e); } else { /* XXX need to get some more data... what if the last brigade * we got had only the first byte of a multibyte char? we need * to grab more data from the network instead of returning an * empty brigade */ } /* If we have any metadata at the head of ctx->bb, go ahead and move it * onto the end of bb to be returned to our caller. */ if (!APR_BRIGADE_EMPTY(ctx->bb)) { apr_bucket *b = APR_BRIGADE_FIRST(ctx->bb); while (b != APR_BRIGADE_SENTINEL(ctx->bb) && APR_BUCKET_IS_METADATA(b)) { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bb, b); b = APR_BRIGADE_FIRST(ctx->bb); } } } else { log_xlate_error(f, rv); } return rv; }
/* * HTTP/1.1 chunked transfer encoding filter. */ static apr_status_t chunk_filter(ap_filter_t *f, apr_bucket_brigade *b) { #define ASCII_CRLF "\015\012" #define ASCII_ZERO "\060" conn_rec *c = f->r->connection; apr_bucket_brigade *more; apr_bucket *e; apr_status_t rv; for (more = NULL; b; b = more, more = NULL) { apr_off_t bytes = 0; apr_bucket *eos = NULL; apr_bucket *flush = NULL; /* XXX: chunk_hdr must remain at this scope since it is used in a * transient bucket. */ char chunk_hdr[20]; /* enough space for the snprintf below */ APR_BRIGADE_FOREACH(e, b) { if (APR_BUCKET_IS_EOS(e)) { /* there shouldn't be anything after the eos */ eos = e; break; } if (APR_BUCKET_IS_FLUSH(e)) { flush = e; more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); break; } else if (e->length == (apr_size_t)-1) { /* unknown amount of data (e.g. a pipe) */ const char *data; apr_size_t len; rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { return rv; } if (len > 0) { /* * There may be a new next bucket representing the * rest of the data stream on which a read() may * block so we pass down what we have so far. */ bytes += len; more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); break; } else { /* If there was nothing in this bucket then we can * safely move on to the next one without pausing * to pass down what we have counted up so far. */ continue; } } else { bytes += e->length; } } /* * XXX: if there aren't very many bytes at this point it may * be a good idea to set them aside and return for more, * unless we haven't finished counting this brigade yet. */ /* if there are content bytes, then wrap them in a chunk */ if (bytes > 0) { apr_size_t hdr_len; /* * Insert the chunk header, specifying the number of bytes in * the chunk. */ /* XXX might be nice to have APR_OFF_T_FMT_HEX */ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), "%qx" CRLF, (apr_uint64_t)bytes); ap_xlate_proto_to_ascii(chunk_hdr, hdr_len); e = apr_bucket_transient_create(chunk_hdr, hdr_len, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); /* * Insert the end-of-chunk CRLF before an EOS or * FLUSH bucket, or appended to the brigade */ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc); if (eos != NULL) { APR_BUCKET_INSERT_BEFORE(eos, e); } else if (flush != NULL) { APR_BUCKET_INSERT_BEFORE(flush, e); } else { APR_BRIGADE_INSERT_TAIL(b, e); } } /* RFC 2616, Section 3.6.1 * * If there is an EOS bucket, then prefix it with: * 1) the last-chunk marker ("0" CRLF) * 2) the trailer * 3) the end-of-chunked body CRLF * * If there is no EOS bucket, then do nothing. * * XXX: it would be nice to combine this with the end-of-chunk * marker above, but this is a bit more straight-forward for * now. */ if (eos != NULL) { /* XXX: (2) trailers ... does not yet exist */ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF /* <trailers> */ ASCII_CRLF, 5, c->bucket_alloc); APR_BUCKET_INSERT_BEFORE(eos, e); } /* pass the brigade to the next filter. */ rv = ap_pass_brigade(f->next, b); if (rv != APR_SUCCESS || eos != NULL) { return rv; } } return APR_SUCCESS; }
static int xlate_in_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { apr_status_t rv; charset_req_t *reqinfo = ap_get_module_config(f->r->request_config, &charset_lite_module); charset_dir_t *dc = ap_get_module_config(f->r->per_dir_config, &charset_lite_module); charset_filter_ctx_t *ctx = f->ctx; apr_size_t buffer_size; int hit_eos; if (!ctx) { /* this is SetInputFilter path; grab the preallocated context, * if any; note that if we decided not to do anything in an earlier * handler, we won't even have a reqinfo */ if (reqinfo) { ctx = f->ctx = reqinfo->input_ctx; reqinfo->input_ctx = NULL; /* prevent SNAFU if user coded us twice * in the filter chain; we can't have two * instances using the same context */ } if (!ctx) { /* no idea how to translate; don't do anything */ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(charset_filter_ctx_t)); ctx->dc = dc; ctx->noop = 1; } } if (dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "xlate_in_filter() - " "charset_source: %s charset_default: %s", dc && dc->charset_source ? dc->charset_source : "(none)", dc && dc->charset_default ? dc->charset_default : "(none)"); } if (!ctx->ran) { /* filter never ran before */ chk_filter_chain(f); ctx->ran = 1; } if (ctx->noop) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } if (APR_BRIGADE_EMPTY(ctx->bb)) { if ((rv = ap_get_brigade(f->next, bb, mode, block, readbytes)) != APR_SUCCESS) { return rv; } } else { APR_BRIGADE_PREPEND(bb, ctx->bb); /* first use the leftovers */ } buffer_size = INPUT_XLATE_BUF_SIZE; rv = xlate_brigade(ctx, bb, ctx->tmp, &buffer_size, &hit_eos); if (rv == APR_SUCCESS) { if (!hit_eos) { /* move anything leftover into our context for next time; * we don't currently "set aside" since the data came from * down below, but I suspect that for long-term we need to * do that */ APR_BRIGADE_CONCAT(ctx->bb, bb); } if (buffer_size < INPUT_XLATE_BUF_SIZE) { /* do we have output? */ apr_bucket *e; e = apr_bucket_heap_create(ctx->tmp, INPUT_XLATE_BUF_SIZE - buffer_size, NULL, f->r->connection->bucket_alloc); /* make sure we insert at the head, because there may be * an eos bucket already there, and the eos bucket should * come after the data */ APR_BRIGADE_INSERT_HEAD(bb, e); } else { /* XXX need to get some more data... what if the last brigade * we got had only the first byte of a multibyte char? we need * to grab more data from the network instead of returning an * empty brigade */ } } else { log_xlate_error(f, rv); } return rv; }