static PyObject *filter_flush(filterobject *self, PyObject *args) { conn_rec *c = self->request_obj->request_rec->connection; /* does the output brigade exist? */ if (!self->bb_out) { self->bb_out = apr_brigade_create(self->f->r->pool, c->bucket_alloc); } APR_BRIGADE_INSERT_TAIL(self->bb_out, apr_bucket_flush_create(c->bucket_alloc)); if (!self->is_input) { Py_BEGIN_ALLOW_THREADS; self->rc = ap_pass_brigade(self->f->next, self->bb_out); apr_brigade_destroy(self->bb_out); Py_END_ALLOW_THREADS; if(self->rc != APR_SUCCESS) { PyErr_SetString(PyExc_IOError, "Flush failed."); return NULL; } } Py_INCREF(Py_None); return Py_None; }
static ssize_t write_flush(mgs_handle_t * ctxt) { apr_bucket *e; if (!(ctxt->output_blen || ctxt->output_length)) { ctxt->output_rc = APR_SUCCESS; return 1; } if (ctxt->output_blen) { e = apr_bucket_transient_create(ctxt->output_buffer, ctxt->output_blen, ctxt->output_bb-> bucket_alloc); /* we filled this buffer first so add it to the * * head of the brigade * */ APR_BRIGADE_INSERT_HEAD(ctxt->output_bb, e); ctxt->output_blen = 0; } ctxt->output_length = 0; e = apr_bucket_flush_create(ctxt->output_bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctxt->output_bb, e); ctxt->output_rc = ap_pass_brigade(ctxt->output_filter->next, ctxt->output_bb); /* clear the brigade to be ready for next time */ apr_brigade_cleanup(ctxt->output_bb); return (ctxt->output_rc == APR_SUCCESS) ? 1 : -1; }
/** * This works right now because all timers are invoked in the single listener * thread in the Event MPM -- the same thread that serf callbacks are made * from, so we don't technically need a mutex yet, but with the Simple MPM, * invocations are made from worker threads, and we need to figure out locking */ static void timed_cleanup_callback(void *baton) { s_baton_t *ctx = baton; /* Causes all serf connections to unregister from the event mpm */ if (ctx->rstatus) { ap_log_rerror(APLOG_MARK, APLOG_ERR, ctx->rstatus, ctx->r, APLOGNO(01119) "serf: request returned: %d", ctx->rstatus); ctx->r->status = HTTP_OK; apr_pool_destroy(ctx->serf_pool); ap_die(ctx->rstatus, ctx->r); } else { apr_bucket *e; apr_brigade_cleanup(ctx->tmpbb); e = apr_bucket_flush_create(ctx->r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e); e = apr_bucket_eos_create(ctx->r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e); /* TODO: return code? bleh */ ap_pass_brigade(ctx->r->output_filters, ctx->tmpbb); apr_pool_destroy(ctx->serf_pool); ap_finalize_request_protocol(ctx->r); ap_process_request_after_handler(ctx->r); return; } }
apr_status_t h2_conn_io_flush(h2_conn_io *io) { if (io->unflushed) { if (io->buflen > 0) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection, "h2_conn_io: flush, flushing %ld bytes", (long)io->buflen); apr_bucket *b = apr_bucket_transient_create(io->buffer, io->buflen, io->output->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); io->buflen = 0; } /* Append flush. */ APR_BRIGADE_INSERT_TAIL(io->output, apr_bucket_flush_create(io->output->bucket_alloc)); /* Send it out through installed filters (TLS) to the client */ apr_status_t status = flush_out(io->output, io); if (status == APR_SUCCESS || APR_STATUS_IS_ECONNABORTED(status) || APR_STATUS_IS_EPIPE(status)) { /* These are all fine and no reason for concern. Everything else * is interesting. */ io->unflushed = 0; } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->connection, "h2_conn_io: flush error"); } return status; } return APR_SUCCESS; }
static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int force) { if (io->unflushed || force) { if (io->buflen > 0) { /* something in the buffer, put it in the output brigade */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection, "h2_conn_io: flush, flushing %ld bytes", (long)io->buflen); bucketeer_buffer(io); io->buflen = 0; } if (force) { APR_BRIGADE_INSERT_TAIL(io->output, apr_bucket_flush_create(io->output->bucket_alloc)); } ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io: flush"); /* Send it out */ io->unflushed = 0; return pass_out(io->output, io); /* no more access after this, as we might have flushed an EOC bucket * that de-allocated us all. */ } return APR_SUCCESS; }
apr_status_t h2_conn_io_flush(h2_conn_io *io) { if (io->unflushed) { apr_status_t status; if (io->buflen > 0) { /* something in the buffer, put it in the output brigade */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection, "h2_conn_io: flush, flushing %ld bytes", (long)io->buflen); bucketeer_buffer(io); io->buflen = 0; } APR_BRIGADE_INSERT_TAIL(io->output, apr_bucket_flush_create(io->output->bucket_alloc)); /* Send it out */ status = pass_out(io->output, io); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->connection, "h2_conn_io: flush"); return status; } io->unflushed = 0; } return APR_SUCCESS; }
MP_INLINE static apr_status_t send_input_flush(modperl_filter_t *filter) { apr_bucket_alloc_t *ba = filter->f->c->bucket_alloc; apr_bucket *b = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(filter->bb_out, b); MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT "write out: FLUSH bucket", MP_FILTER_NAME(filter->f)); return APR_SUCCESS; }
static int process_fortune_connection(conn_rec *c) { apr_status_t rv; apr_procattr_t *pattr; apr_pool_t *p = c->pool; apr_bucket *b; apr_bucket_brigade *bb; const char *err_msg = "200 OK\n"; fortune_conf_t *fconf = ap_get_module_config(c->base_server->module_config, &fortune_module); if (!fconf->enabled) { return DECLINED; } bb = apr_brigade_create(p, c->bucket_alloc); /* prepare process attribute */ if ((rv = apr_procattr_create(&pattr, c->pool)) != APR_SUCCESS) { goto error; } if ((rv = apr_procattr_io_set(pattr, APR_NO_PIPE, APR_FULL_BLOCK, APR_NO_PIPE)) != APR_SUCCESS) { goto error; } /* default value: APR_PROGRAM */ if ((rv = apr_procattr_cmdtype_set(pattr, APR_PROGRAM_ENV)) != APR_SUCCESS) { goto error; } /* run the program and read the output from the pipe */ if ((rv = fortune_process(c, pattr, bb)) != APR_SUCCESS) { apr_brigade_cleanup(bb); } error: if (rv != APR_SUCCESS) { err_msg = "500 ERROR\n"; } b = apr_bucket_pool_create(err_msg, strlen(err_msg), p, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, b); b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); rv = ap_pass_brigade(c->output_filters, bb); return OK; }
MP_INLINE static apr_status_t send_output_flush(ap_filter_t *f) { apr_bucket_alloc_t *ba = f->c->bucket_alloc; apr_bucket_brigade *bb = apr_brigade_create(MP_FILTER_POOL(f), ba); apr_bucket *b = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, b); MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT "write out: FLUSH bucket in separate bb", MP_FILTER_NAME(f)); return ap_pass_brigade(f, bb); }
apr_status_t h2_conn_io_close(h2_conn_io *io, void *session) { apr_bucket *b; /* Send out anything in our buffers */ h2_conn_io_flush_int(io, 0); b = h2_bucket_eoc_create(io->connection->bucket_alloc, session); APR_BRIGADE_INSERT_TAIL(io->output, b); b = apr_bucket_flush_create(io->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); return ap_pass_brigade(io->connection->output_filters, io->output); /* and all is gone */ }
AP_CORE_DECLARE(void) ap_flush_conn(conn_rec *c) { apr_bucket_brigade *bb; apr_bucket *b; bb = apr_brigade_create(c->pool, c->bucket_alloc); /* FLUSH bucket */ b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); /* End Of Connection bucket */ b = ap_bucket_eoc_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); ap_pass_brigade(c->output_filters, bb); }
/** * @internal * * "Sniffs" the output (response) data from the connection stream. */ static int ironbee_output_filter (ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *b; #if 0 conn_rec *c = f->c; ironbee_conn_context *ctx = f->ctx; ib_conn_t *iconn = ctx->iconn; ib_core_cfg_t *corecfg; int buffering = 0; /* Configure. */ ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg); if (corecfg != NULL) { buffering = (int)corecfg->buffer_res; } #endif for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { #if 0 /// @todo Should this be done? Maybe only for proxy? if (APR_BUCKET_IS_EOS(b)) { /// @todo Do we need to do this? Maybe only for proxy. apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc); APR_BUCKET_INSERT_BEFORE(b, flush); } if (buffering) { /// @todo setaside into our own pool to destroy later??? apr_bucket_setaside(b, c->pool); process_bucket(f, b); APR_BUCKET_REMOVE(b); } else { #endif process_bucket(f, b); #if 0 } #endif } return ap_pass_brigade(f->next, bb); }
static int dumpio_output_filter (ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *b; conn_rec *c = f->c; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "mod_dumpio: %s", f->frec->name) ; for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { /* * If we ever see an EOS, make sure to FLUSH. */ if (APR_BUCKET_IS_EOS(b)) { apr_bucket *flush = apr_bucket_flush_create(f->c->bucket_alloc); APR_BUCKET_INSERT_BEFORE(b, flush); } dumpit(f, b); } return ap_pass_brigade(f->next, bb) ; }
// Called when Apache outputs data: static apr_status_t firstbyte_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) { firstbyte_config_t *cf = ap_get_module_config(f->c->conn_config, &log_firstbyte_module); apr_bucket *b = APR_BRIGADE_LAST(bb); /* End of data, make sure we flush */ if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(f->c->bucket_alloc)); APR_BUCKET_REMOVE(b); apr_bucket_destroy(b); } if (cf->first_out==1) { cf->first_out_time = apr_time_now(); cf->first_out = 0; } return ap_pass_brigade(f->next, bb); }
static void test_simple(abts_case *tc, void *data) { apr_bucket_alloc_t *ba; apr_bucket_brigade *bb; apr_bucket *fb, *tb; ba = apr_bucket_alloc_create(p); bb = apr_brigade_create(p, ba); fb = APR_BRIGADE_FIRST(bb); ABTS_ASSERT(tc, "first bucket of empty brigade is sentinel", fb == APR_BRIGADE_SENTINEL(bb)); fb = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_HEAD(bb, fb); ABTS_ASSERT(tc, "first bucket of brigade is flush", APR_BRIGADE_FIRST(bb) == fb); ABTS_ASSERT(tc, "bucket after flush is sentinel", APR_BUCKET_NEXT(fb) == APR_BRIGADE_SENTINEL(bb)); tb = apr_bucket_transient_create("aaa", 3, ba); APR_BUCKET_INSERT_BEFORE(fb, tb); ABTS_ASSERT(tc, "bucket before flush now transient", APR_BUCKET_PREV(fb) == tb); ABTS_ASSERT(tc, "bucket after transient is flush", APR_BUCKET_NEXT(tb) == fb); ABTS_ASSERT(tc, "bucket before transient is sentinel", APR_BUCKET_PREV(tb) == APR_BRIGADE_SENTINEL(bb)); apr_brigade_cleanup(bb); ABTS_ASSERT(tc, "cleaned up brigade was empty", APR_BRIGADE_EMPTY(bb)); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static PyObject * conn_write(connobject *self, PyObject *args) { char *buff; int len; apr_bucket_brigade *bb; apr_bucket *b; PyObject *s; conn_rec *c = self->conn; if (! PyArg_ParseTuple(args, "O", &s)) return NULL; if (! PyString_Check(s)) { PyErr_SetString(PyExc_TypeError, "Argument to write() must be a string"); return NULL; } /* PYTHON 2.5: 'PyString_Size' uses Py_ssize_t for return values (may need overflow check) */ len = PyString_Size(s); if (len) { buff = apr_pmemdup(c->pool, PyString_AS_STRING(s), len); bb = apr_brigade_create(c->pool, c->bucket_alloc); b = apr_bucket_pool_create(buff, len, c->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); /* Make sure the data is flushed to the client */ b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); ap_pass_brigade(c->output_filters, bb); } Py_INCREF(Py_None); return Py_None; }
PROXY_DECLARE(int) ap_proxy_pass_brigade(apr_bucket_alloc_t *bucket_alloc, request_rec *r, proxy_conn_rec *p_conn, conn_rec *origin, apr_bucket_brigade *bb, int flush) { apr_status_t status; apr_off_t transferred; if (flush) { apr_bucket *e = apr_bucket_flush_create(bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); } apr_brigade_length(bb, 0, &transferred); if (transferred != -1) p_conn->worker->s->transferred += transferred; status = ap_pass_brigade(origin->output_filters, bb); if (status != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, "AH01084: " "pass request body failed to %pI (%s)", p_conn->addr, p_conn->hostname); if (origin->aborted) { const char *ssl_note; if (((ssl_note = apr_table_get(origin->notes, "SSL_connect_rv")) != NULL) && (strcmp(ssl_note, "err") == 0)) { return ap_proxyerror(r, HTTP_INTERNAL_SERVER_ERROR, "Error during SSL Handshake with" " remote server"); } return APR_STATUS_IS_TIMEUP(status) ? HTTP_GATEWAY_TIME_OUT : HTTP_BAD_GATEWAY; } else { return HTTP_BAD_REQUEST; } } apr_brigade_cleanup(bb); return OK; }
static int standard_flush(request_rec *r) { int status; apr_bucket_brigade *bb; apr_bucket *e; r->connection->keepalive = AP_CONN_CLOSE; bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); status = ap_pass_brigade(r->output_filters, bb); if (status != OK) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01156) "ap_pass_brigade failed:"); return status; } return OK; }
static void php_apache_sapi_flush(void *server_context) { php_struct *ctx; apr_bucket_brigade *bb; apr_bucket_alloc_t *ba; apr_bucket *b; ap_filter_t *f; /* output filters */ ctx = server_context; /* If we haven't registered a server_context yet, * then don't bother flushing. */ if (!server_context) return; sapi_send_headers(); ctx->r->status = SG(sapi_headers).http_response_code; SG(headers_sent) = 1; f = ctx->f; /* Send a flush bucket down the filter chain. The current default * handler seems to act on the first flush bucket, but ignores * all further flush buckets. */ ba = ctx->r->connection->bucket_alloc; bb = apr_brigade_create(ctx->r->pool, ba); b = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, b); if (ap_pass_brigade(f->next, bb) != APR_SUCCESS || ctx->r->connection->aborted) { php_handle_aborted_connection(); } }
static apr_status_t substitute_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_size_t bytes; apr_size_t len; apr_size_t fbytes; const char *buff; const char *nl = NULL; char *bflat; apr_bucket *b; apr_bucket *tmp_b; apr_bucket_brigade *tmp_bb = NULL; apr_status_t rv; subst_dir_conf *cfg = (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config, &substitute_module); substitute_module_ctx *ctx = f->ctx; /* * First time around? Create the saved bb that we used for each pass * through. Note that we can also get here when we explicitly clear ctx, * for error handling */ if (!ctx) { f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); /* * Create all the temporary brigades we need and reuse them to avoid * creating them over and over again from r->pool which would cost a * lot of memory in some cases. */ ctx->linebb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); ctx->linesbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); ctx->pattbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); /* * Everything to be passed to the next filter goes in * here, our pass brigade. */ ctx->passbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); /* Create our temporary pool only once */ apr_pool_create(&(ctx->tpool), f->r->pool); apr_table_unset(f->r->headers_out, "Content-Length"); } /* * Shortcircuit processing */ if (APR_BRIGADE_EMPTY(bb)) return APR_SUCCESS; /* * Here's the concept: * Read in the data and look for newlines. Once we * find a full "line", add it to our working brigade. * If we've finished reading the brigade and we have * any left over data (not a "full" line), store that * for the next pass. * * Note: anything stored in ctx->linebb for sure does not have * a newline char, so we don't concat that bb with the * new bb, since we would spending time searching for the newline * in data we know it doesn't exist. So instead, we simply scan * our current bb and, if we see a newline, prepend ctx->linebb * to the front of it. This makes the code much less straight- * forward (otherwise we could APR_BRIGADE_CONCAT(ctx->linebb, bb) * and just scan for newlines and not bother with needing to know * when ctx->linebb needs to be reset) but also faster. We'll take * the speed. * * Note: apr_brigade_split_line would be nice here, but we * really can't use it since we need more control and we want * to re-use already read bucket data. * * See mod_include if still confused :) */ while ((b = APR_BRIGADE_FIRST(bb)) && (b != APR_BRIGADE_SENTINEL(bb))) { if (APR_BUCKET_IS_EOS(b)) { /* * if we see the EOS, then we need to pass along everything we * have. But if the ctx->linebb isn't empty, then we need to add * that to the end of what we'll be passing. */ if (!APR_BRIGADE_EMPTY(ctx->linebb)) { rv = apr_brigade_pflatten(ctx->linebb, &bflat, &fbytes, ctx->tpool); if (rv != APR_SUCCESS) goto err; if (fbytes > cfg->max_line_length) { rv = APR_ENOMEM; goto err; } tmp_b = apr_bucket_transient_create(bflat, fbytes, f->r->connection->bucket_alloc); rv = do_pattmatch(f, tmp_b, ctx->pattbb, ctx->tpool); if (rv != APR_SUCCESS) goto err; APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb); apr_brigade_cleanup(ctx->linebb); } APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->passbb, b); } /* * No need to handle FLUSH buckets separately as we call * ap_pass_brigade anyway at the end of the loop. */ else if (APR_BUCKET_IS_METADATA(b)) { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->passbb, b); } else { /* * We have actual "data" so read in as much as we can and start * scanning and splitting from our read buffer */ rv = apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ); if (rv != APR_SUCCESS || bytes == 0) { apr_bucket_delete(b); } else { int num = 0; while (bytes > 0) { nl = memchr(buff, APR_ASCII_LF, bytes); if (nl) { len = (apr_size_t) (nl - buff) + 1; /* split *after* the newline */ apr_bucket_split(b, len); /* * We've likely read more data, so bypass rereading * bucket data and continue scanning through this * buffer */ bytes -= len; buff += len; /* * we need b to be updated for future potential * splitting */ tmp_b = APR_BUCKET_NEXT(b); APR_BUCKET_REMOVE(b); /* * Hey, we found a newline! Don't forget the old * stuff that needs to be added to the front. So we * add the split bucket to the end, flatten the whole * bb, morph the whole shebang into a bucket which is * then added to the tail of the newline bb. */ if (!APR_BRIGADE_EMPTY(ctx->linebb)) { APR_BRIGADE_INSERT_TAIL(ctx->linebb, b); rv = apr_brigade_pflatten(ctx->linebb, &bflat, &fbytes, ctx->tpool); if (rv != APR_SUCCESS) goto err; if (fbytes > cfg->max_line_length) { /* Avoid pflattening further lines, we will * abort later on anyway. */ rv = APR_ENOMEM; goto err; } b = apr_bucket_transient_create(bflat, fbytes, f->r->connection->bucket_alloc); apr_brigade_cleanup(ctx->linebb); } rv = do_pattmatch(f, b, ctx->pattbb, ctx->tpool); if (rv != APR_SUCCESS) goto err; /* * Count how many buckets we have in ctx->passbb * so far. Yes, this is correct we count ctx->passbb * and not ctx->pattbb as we do not reset num on every * iteration. */ for (b = APR_BRIGADE_FIRST(ctx->pattbb); b != APR_BRIGADE_SENTINEL(ctx->pattbb); b = APR_BUCKET_NEXT(b)) { num++; } APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb); /* * If the number of buckets in ctx->passbb reaches an * "insane" level, we consume much memory for all the * buckets as such. So lets flush them down the chain * in this case and thus clear ctx->passbb. This frees * the buckets memory for further processing. * Usually this condition should not become true, but * it is a safety measure for edge cases. */ if (num > AP_MAX_BUCKETS) { b = apr_bucket_flush_create( f->r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->passbb, b); rv = ap_pass_brigade(f->next, ctx->passbb); apr_brigade_cleanup(ctx->passbb); num = 0; apr_pool_clear(ctx->tpool); if (rv != APR_SUCCESS) goto err; } b = tmp_b; } else { /* * no newline in whatever is left of this buffer so * tuck data away and get next bucket */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->linebb, b); bytes = 0; } } } } if (!APR_BRIGADE_EMPTY(ctx->passbb)) { rv = ap_pass_brigade(f->next, ctx->passbb); apr_brigade_cleanup(ctx->passbb); if (rv != APR_SUCCESS) goto err; } apr_pool_clear(ctx->tpool); } /* Anything left we want to save/setaside for the next go-around */ if (!APR_BRIGADE_EMPTY(ctx->linebb)) { /* * Provide ap_save_brigade with an existing empty brigade * (ctx->linesbb) to avoid creating a new one. */ ap_save_brigade(f, &(ctx->linesbb), &(ctx->linebb), f->r->pool); tmp_bb = ctx->linebb; ctx->linebb = ctx->linesbb; ctx->linesbb = tmp_bb; } return APR_SUCCESS; err: if (rv == APR_ENOMEM) ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01328) "Line too long, URI %s", f->r->uri); apr_pool_clear(ctx->tpool); return rv; }
static apr_status_t rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *input_bb) { apr_status_t rv = APR_SUCCESS; rl_ctx_t *ctx = f->ctx; apr_bucket *fb; int do_sleep = 0; apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc; apr_bucket_brigade *bb = input_bb; if (f->c->aborted) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01454) "rl: conn aborted"); apr_brigade_cleanup(bb); return APR_ECONNABORTED; } if (ctx == NULL) { const char *rl = NULL; int ratelimit; /* no subrequests. */ if (f->r->main != NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } rl = apr_table_get(f->r->subprocess_env, "rate-limit"); if (rl == NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* rl is in kilo bytes / second */ ratelimit = atoi(rl) * 1024; if (ratelimit <= 0) { /* remove ourselves */ ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* first run, init stuff */ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t)); f->ctx = ctx; ctx->state = RATE_LIMIT; ctx->speed = ratelimit; /* calculate how many bytes / interval we want to send */ /* speed is bytes / second, so, how many (speed / 1000 % interval) */ ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS)); ctx->tmpbb = apr_brigade_create(f->r->pool, ba); ctx->holdingbb = apr_brigade_create(f->r->pool, ba); } while (ctx->state != RATE_ERROR && (!APR_BRIGADE_EMPTY(bb) || !APR_BRIGADE_EMPTY(ctx->holdingbb))) { apr_bucket *e; if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) { APR_BRIGADE_CONCAT(bb, ctx->holdingbb); } while (ctx->state == RATE_FULLSPEED && !APR_BRIGADE_EMPTY(bb)) { /* Find where we 'stop' going full speed. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_END(e)) { apr_bucket *f; f = APR_RING_LAST(&bb->list); APR_RING_UNSPLICE(e, f, link); APR_RING_SPLICE_TAIL(&ctx->holdingbb->list, e, f, apr_bucket, link); ctx->state = RATE_LIMIT; break; } } if (f->c->aborted) { apr_brigade_cleanup(bb); ctx->state = RATE_ERROR; break; } fb = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, fb); rv = ap_pass_brigade(f->next, bb); if (rv != APR_SUCCESS) { ctx->state = RATE_ERROR; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455) "rl: full speed brigade pass failed."); } } while (ctx->state == RATE_LIMIT && !APR_BRIGADE_EMPTY(bb)) { for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_START(e)) { apr_bucket *f; f = APR_RING_LAST(&bb->list); APR_RING_UNSPLICE(e, f, link); APR_RING_SPLICE_TAIL(&ctx->holdingbb->list, e, f, apr_bucket, link); ctx->state = RATE_FULLSPEED; break; } } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *stop_point; apr_off_t len = 0; if (f->c->aborted) { apr_brigade_cleanup(bb); ctx->state = RATE_ERROR; break; } if (do_sleep) { apr_sleep(RATE_INTERVAL_MS * 1000); } else { do_sleep = 1; } apr_brigade_length(bb, 1, &len); rv = apr_brigade_partition(bb, ctx->chunk_size, &stop_point); if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) { ctx->state = RATE_ERROR; ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456) "rl: partition failed."); break; } if (stop_point != APR_BRIGADE_SENTINEL(bb)) { apr_bucket *f; apr_bucket *e = APR_BUCKET_PREV(stop_point); f = APR_RING_FIRST(&bb->list); APR_RING_UNSPLICE(f, e, link); APR_RING_SPLICE_HEAD(&ctx->tmpbb->list, f, e, apr_bucket, link); } else { APR_BRIGADE_CONCAT(ctx->tmpbb, bb); } fb = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, fb); #if 0 brigade_dump(f->r, ctx->tmpbb); brigade_dump(f->r, bb); #endif rv = ap_pass_brigade(f->next, ctx->tmpbb); apr_brigade_cleanup(ctx->tmpbb); if (rv != APR_SUCCESS) { ctx->state = RATE_ERROR; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457) "rl: brigade pass failed."); break; } } } } return rv; }
static int process_echo_connection(conn_rec *c) { apr_bucket_brigade *bb; apr_bucket *b; apr_socket_t *csd = NULL; EchoConfig *pConfig = ap_get_module_config(c->base_server->module_config, &echo_module); if (!pConfig->bEnabled) { return DECLINED; } ap_time_process_request(c->sbh, START_PREQUEST); update_echo_child_status(c->sbh, SERVER_BUSY_READ, c, NULL); bb = apr_brigade_create(c->pool, c->bucket_alloc); for ( ; ; ) { apr_status_t rv; /* Get a single line of input from the client */ if (((rv = ap_get_brigade(c->input_filters, bb, AP_MODE_GETLINE, APR_BLOCK_READ, 0)) != APR_SUCCESS)) { apr_brigade_cleanup(bb); if (!APR_STATUS_IS_EOF(rv) && ! APR_STATUS_IS_TIMEUP(rv)) ap_log_error(APLOG_MARK, APLOG_INFO, rv, c->base_server, APLOGNO(01611) "ProtocolEcho: Failure reading from %s", c->client_ip); break; } /* Something horribly wrong happened. Someone didn't block! */ if (APR_BRIGADE_EMPTY(bb)) { apr_brigade_cleanup(bb); ap_log_error(APLOG_MARK, APLOG_INFO, rv, c->base_server, APLOGNO(01612) "ProtocolEcho: Error - read empty brigade from %s!", c->client_ip); break; } if (!csd) { csd = ap_get_conn_socket(c); apr_socket_timeout_set(csd, c->base_server->keep_alive_timeout); } update_echo_child_status(c->sbh, SERVER_BUSY_WRITE, NULL, bb); /* Make sure the data is flushed to the client */ b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); rv = ap_pass_brigade(c->output_filters, bb); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_INFO, rv, c->base_server, APLOGNO(01613) "ProtocolEcho: Failure writing to %s", c->client_ip); break; } apr_brigade_cleanup(bb); /* Announce our intent to loop */ update_echo_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, NULL, NULL); } apr_brigade_destroy(bb); ap_time_process_request(c->sbh, STOP_PREQUEST); update_echo_child_status(c->sbh, SERVER_CLOSING, c, NULL); return OK; }
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, int *pfile_handles_allowed, const char *msg) { apr_status_t status = APR_SUCCESS; int same_alloc; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); same_alloc = (to->bucket_alloc == from->bucket_alloc); if (!FILE_MOVE) { pfile_handles_allowed = NULL; } if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end; status = last_not_included(from, maxlen, same_alloc, pfile_handles_allowed, &end); if (status != APR_SUCCESS) { return status; } while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) { b = APR_BRIGADE_FIRST(from); if (b == end) { break; } if (same_alloc || (b->list == to->bucket_alloc)) { /* both brigades use the same bucket_alloc and auto-cleanups * have the same life time. It's therefore safe to just move * directly. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed bucket(same bucket_alloc) " "%ld-%ld, type=%s", msg, (long)b->start, (long)b->length, APR_BUCKET_IS_METADATA(b)? (APR_BUCKET_IS_EOS(b)? "EOS": (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA")); #endif } else if (DEEP_COPY) { /* we have not managed the magic of passing buckets from * one thread to another. Any attempts result in * cleanup of pools scrambling memory. */ if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); } else { /* ignore */ } } else if (pfile_handles_allowed && *pfile_handles_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* We do not want to read files when passing buckets, if * we can avoid it. However, what we've come up so far * is not working corrently, resulting either in crashes or * too many open file descriptors. */ apr_bucket_file *f = (apr_bucket_file *)b->data; apr_file_t *fd = f->fd; int setaside = (f->readpool != to->p); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, moving FILE bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p, setaside); #endif if (setaside) { status = apr_file_setaside(&fd, fd, to->p); if (status != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p, APLOGNO(02947) "h2_util: %s, setaside FILE", msg); return status; } } apr_brigade_insert_file(to, fd, b->start, b->length, to->p); --(*pfile_handles_allowed); } else { const char *data; apr_size_t len; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } apr_bucket_delete(b); } else { apr_bucket_setaside(b, to->p); APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed setaside bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } } return status; }
MP_INLINE apr_status_t modperl_wbucket_pass(modperl_wbucket_t *wb, const char *buf, apr_size_t len, int add_flush_bucket) { apr_bucket_alloc_t *ba = (*wb->filters)->c->bucket_alloc; apr_bucket_brigade *bb; apr_bucket *bucket; /* reset the counter to 0 as early as possible and in one place, * since this function will always either pass the data out (and * it has 'len' already) or return an error. */ wb->outcnt = 0; if (wb->header_parse) { request_rec *r = wb->r; const char *body; int status; MP_TRACE_f(MP_FUNC, "parsing headers: %db [%s]", len, MP_TRACE_STR_TRUNC(wb->pool, buf, len)); status = modperl_cgi_header_parse(r, (char *)buf, &len, &body); wb->header_parse = 0; /* only once per-request */ if (status == HTTP_MOVED_TEMPORARILY) { return APR_SUCCESS; /* XXX: HTTP_MOVED_TEMPORARILY ? */ } else if (status != OK) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, r->server, "%s did not send an HTTP header", r->uri); r->status = status; /* XXX: body == NULL here */ return APR_SUCCESS; } else if (!len) { return APR_SUCCESS; } buf = body; } /* this is a note for filter writers who may decide that there is * a bug in mod_perl. We send a transient bucket. That means that * this bucket can't be stored inside a filter without copying the * data in it. This is done automatically by apr_bucket_setaside, * which is written exactly for the purpose to make setaside * operation transparent to the kind of bucket. */ bucket = apr_bucket_transient_create(buf, len, ba); bb = apr_brigade_create(wb->pool, ba); APR_BRIGADE_INSERT_TAIL(bb, bucket); if (add_flush_bucket) { /* append the flush bucket rather then calling ap_rflush, to * prevent a creation of yet another bb, which will cause an * extra call for each filter in the chain */ apr_bucket *bucket = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, bucket); } MP_TRACE_f(MP_FUNC, "\n\n\twrite out: %db [%s]" "\t\tfrom %s\n\t\tto %s filter handler", len, MP_TRACE_STR_TRUNC(wb->pool, buf, len), ((wb->r && wb->filters == &wb->r->output_filters) ? "response handler" : "current filter handler"), MP_FILTER_NAME(*(wb->filters))); return ap_pass_brigade(*(wb->filters), bb); }
static apr_status_t bucketeer_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *e; request_rec *r = f->r; bucketeer_ctx_t *ctx = f->ctx; bucketeer_filter_config_t *c; c = ap_get_module_config(r->server->module_config, &bucketeer_module); /* If have a context, it means we've done this before successfully. */ if (!ctx) { if (!r->content_type || strncmp(r->content_type, "text/", 5)) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* We're cool with filtering this. */ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); apr_table_unset(f->r->headers_out, "Content-Length"); } for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { const char *data; apr_size_t len, i, lastpos; if (APR_BUCKET_IS_EOS(e)) { APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(ctx->bb, e); /* Okay, we've seen the EOS. * Time to pass it along down the chain. */ return ap_pass_brigade(f->next, ctx->bb); } if (APR_BUCKET_IS_FLUSH(e)) { /* * Ignore flush buckets for the moment.. * we decide what to stream */ continue; } if (APR_BUCKET_IS_METADATA(e)) { /* metadata bucket */ apr_bucket *cpy; apr_bucket_copy(e, &cpy); APR_BRIGADE_INSERT_TAIL(ctx->bb, cpy); continue; } /* read */ apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (len > 0) { lastpos = 0; for (i = 0; i < len; i++) { if (data[i] == c->flushdelimiter || data[i] == c->bucketdelimiter || data[i] == c->passdelimiter) { apr_bucket *p; if (i - lastpos > 0) { p = apr_bucket_pool_create(apr_pmemdup(f->r->pool, &data[lastpos], i - lastpos), i - lastpos, f->r->pool, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, p); } lastpos = i + 1; if (data[i] == c->flushdelimiter) { p = apr_bucket_flush_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, p); } if (data[i] == c->passdelimiter) { apr_status_t rv; rv = ap_pass_brigade(f->next, ctx->bb); if (rv) { return rv; } } } } /* XXX: really should append this to the next 'real' bucket */ if (lastpos < i) { apr_bucket *p; p = apr_bucket_pool_create(apr_pmemdup(f->r->pool, &data[lastpos], i - lastpos), i - lastpos, f->r->pool, f->c->bucket_alloc); lastpos = i; APR_BRIGADE_INSERT_TAIL(ctx->bb, p); } } } return APR_SUCCESS; }
apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, const char *msg) { apr_status_t status = APR_SUCCESS; int same_alloc; (void)msg; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); same_alloc = (to->bucket_alloc == from->bucket_alloc); if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end, *cpy; status = last_not_included(from, maxlen, 0, 0, &end); if (status != APR_SUCCESS) { return status; } for (b = APR_BRIGADE_FIRST(from); b != APR_BRIGADE_SENTINEL(from) && b != end; b = APR_BUCKET_NEXT(b)) { if (same_alloc) { status = apr_bucket_copy(b, &cpy); if (status != APR_SUCCESS) { break; } APR_BRIGADE_INSERT_TAIL(to, cpy); } else { if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); } else { /* ignore */ } } else { const char *data; apr_size_t len; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_copy: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } } } } return status; }
/** * @internal * * "Sniffs" the input (request) data from the connection stream and tries * to determine who closed a connection and why. */ static int ironbee_input_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { conn_rec *c = f->c; ironbee_conn_context *ctx = f->ctx; ib_conn_t *iconn = ctx->iconn; ib_core_cfg_t *corecfg; ib_stream_t *istream; apr_bucket *b; apr_status_t rc; int buffering = 0; /* Any mode not handled just gets passed through. */ if ((mode != AP_MODE_GETLINE) && (mode != AP_MODE_READBYTES)) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } /* Configure. */ ib_context_module_config(iconn->ctx, ib_core_module(), (void *)&corecfg); if (corecfg != NULL) { buffering = (int)corecfg->buffer_req; } /* When buffering, data is removed from the brigade and handed * to IronBee. The filter must not return an empty brigade in this * case and keeps reading until there is processed data that comes * back from IronBee. */ do { ib_tx_t *itx = iconn->tx; /* If there is any processed data, then send it now. */ if (buffering && (itx != NULL)) { ib_sdata_t *sdata; /* Take any data from the drain (processed data) and * inject it back into the filter brigade. */ ib_fctl_drain(itx->fctl, &istream); if ((istream != NULL) && (istream->nelts > 0)) { int done = 0; while (!done) { apr_bucket *ibucket = NULL; /// @todo Handle multi-bucket lines if (mode == AP_MODE_GETLINE) { done = 1; } ib_stream_pull(istream, &sdata); if (sdata == NULL) { /* No more data left. */ break; } switch (sdata->type) { case IB_STREAM_DATA: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": DATA[%d]: %.*s", (int)sdata->dlen, (int)sdata->dlen, (char *)sdata->data); #endif /// @todo Is this creating a copy? Just need a reference. ibucket = apr_bucket_heap_create(sdata->data, sdata->dlen, NULL, bb->bucket_alloc); break; case IB_STREAM_FLUSH: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": FLUSH"); #endif ibucket = apr_bucket_flush_create(bb->bucket_alloc); break; case IB_STREAM_EOH: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": EOH"); #endif /// @todo Do something here??? break; case IB_STREAM_EOB: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": EOB"); #endif /// @todo Do something here??? break; case IB_STREAM_EOS: #ifdef IB_DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": EOS"); #endif ibucket = apr_bucket_eos_create(bb->bucket_alloc); break; default: ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": UNKNOWN stream data type %d", sdata->type); } if (ibucket != NULL) { APR_BRIGADE_INSERT_TAIL(bb, ibucket); } } /* Need to send any processed data to avoid deadlock. */ if (!APR_BRIGADE_EMPTY(bb)) { return APR_SUCCESS; } } } /* Fetch data from the next filter. */ if (buffering) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "FETCH BRIGADE (buffering)"); /* Normally Apache will request the headers line-by-line, but * IronBee does not require this. So, here the request is * fetched with READBYTES and IronBee will then break * it back up into lines when it is injected back into * the brigade after the data is processed. */ rc = ap_get_brigade(f->next, bb, AP_MODE_READBYTES, block, HUGE_STRING_LEN); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, "FETCH BRIGADE (non-buffering)"); rc = ap_get_brigade(f->next, bb, mode, block, readbytes); } /* Check for any timeouts/disconnects/errors. */ if (APR_STATUS_IS_TIMEUP(rc)) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": %s server closed connection (%d)", f->frec->name, rc); ap_remove_input_filter(f); return rc; } else if (APR_STATUS_IS_EOF(rc) || apr_get_os_error() == ECONNRESET) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": %s client closed connection (%d)", f->frec->name, rc); ap_remove_input_filter(f); return rc; } else if (rc != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, c->base_server, IB_PRODUCT_NAME ": %s returned %d (0x%08x) - %s", f->frec->name, rc, rc, strerror(apr_get_os_error())); return rc; } /* Process data. */ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { if (buffering) { /// @todo setaside into our own pool to destroy later??? apr_bucket_setaside(b, c->pool); process_bucket(f, b); APR_BUCKET_REMOVE(b); } else { process_bucket(f, b); } } } while (buffering); return APR_SUCCESS; }
static apr_status_t rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_status_t rv = APR_SUCCESS; rl_ctx_t *ctx = f->ctx; apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc; /* Set up our rl_ctx_t on first use */ if (ctx == NULL) { const char *rl = NULL; int ratelimit; int burst = 0; /* no subrequests. */ if (f->r->main != NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: rate limit */ rl = apr_table_get(f->r->subprocess_env, "rate-limit"); if (rl == NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* rl is in kilo bytes / second */ ratelimit = atoi(rl) * 1024; if (ratelimit <= 0) { /* remove ourselves */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03488) "rl: disabling: rate-limit = %s (too high?)", rl); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: optional initial burst */ rl = apr_table_get(f->r->subprocess_env, "rate-initial-burst"); if (rl != NULL) { burst = atoi(rl) * 1024; if (burst <= 0) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03489) "rl: disabling burst: rate-initial-burst = %s (too high?)", rl); burst = 0; } } /* Set up our context */ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t)); f->ctx = ctx; ctx->state = RATE_LIMIT; ctx->speed = ratelimit; ctx->burst = burst; ctx->do_sleep = 0; /* calculate how many bytes / interval we want to send */ /* speed is bytes / second, so, how many (speed / 1000 % interval) */ ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS)); ctx->tmpbb = apr_brigade_create(f->r->pool, ba); ctx->holdingbb = apr_brigade_create(f->r->pool, ba); } else { APR_BRIGADE_PREPEND(bb, ctx->holdingbb); } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *e; if (ctx->state == RATE_FULLSPEED) { /* Find where we 'stop' going full speed. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_END(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_LIMIT; break; } } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, e); rv = ap_pass_brigade(f->next, bb); apr_brigade_cleanup(bb); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455) "rl: full speed brigade pass failed."); return rv; } } else { for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_START(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_FULLSPEED; break; } } while (!APR_BRIGADE_EMPTY(bb)) { apr_off_t len = ctx->chunk_size + ctx->burst; APR_BRIGADE_CONCAT(ctx->tmpbb, bb); /* * Pull next chunk of data; the initial amount is our * burst allotment (if any) plus a chunk. All subsequent * iterations are just chunks with whatever remaining * burst amounts we have left (in case not done in the * first bucket). */ rv = apr_brigade_partition(ctx->tmpbb, len, &e); if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456) "rl: partition failed."); return rv; } /* Send next metadata now if any */ while (e != APR_BRIGADE_SENTINEL(ctx->tmpbb) && APR_BUCKET_IS_METADATA(e)) { e = APR_BUCKET_NEXT(e); } if (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)) { apr_brigade_split_ex(ctx->tmpbb, e, bb); } else { apr_brigade_length(ctx->tmpbb, 1, &len); } /* * Adjust the burst amount depending on how much * we've done up to now. */ if (ctx->burst) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, APLOGNO(03485) "rl: burst %d; len %"APR_OFF_T_FMT, ctx->burst, len); if (len < ctx->burst) { ctx->burst -= len; } else { ctx->burst = 0; } } e = APR_BRIGADE_LAST(ctx->tmpbb); if (APR_BUCKET_IS_EOS(e)) { ap_remove_output_filter(f); } else if (!APR_BUCKET_IS_FLUSH(e)) { if (APR_BRIGADE_EMPTY(bb)) { /* Wait for more (or next call) */ break; } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e); } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); brigade_dump(f->r, bb); #endif /* RLFDEBUG */ if (ctx->do_sleep) { apr_sleep(RATE_INTERVAL_MS * 1000); } else { ctx->do_sleep = 1; } rv = ap_pass_brigade(f->next, ctx->tmpbb); apr_brigade_cleanup(ctx->tmpbb); if (rv != APR_SUCCESS) { /* Most often, user disconnects from stream */ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457) "rl: brigade pass failed."); return rv; } } } if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) { /* Any rate-limited data in tmpbb is sent unlimited along * with the rest. */ APR_BRIGADE_CONCAT(bb, ctx->tmpbb); APR_BRIGADE_CONCAT(bb, ctx->holdingbb); } } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); #endif /* RLFDEBUG */ /* Save remaining tmpbb with the correct lifetime for the next call */ return ap_save_brigade(f, &ctx->holdingbb, &ctx->tmpbb, f->r->pool); }
/* * process the request and write the response. */ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r, proxy_conn_rec *conn, conn_rec *origin, proxy_dir_conf *conf, apr_uri_t *uri, char *url, char *server_portstr) { apr_status_t status; int result; apr_bucket *e; apr_bucket_brigade *input_brigade; apr_bucket_brigade *output_brigade; ajp_msg_t *msg; apr_size_t bufsiz = 0; char *buff; char *send_body_chunk_buff; apr_uint16_t size; apr_byte_t conn_reuse = 0; const char *tenc; int havebody = 1; int output_failed = 0; int backend_failed = 0; apr_off_t bb_len; int data_sent = 0; int request_ended = 0; int headers_sent = 0; int rv = 0; apr_int32_t conn_poll_fd; apr_pollfd_t *conn_poll; proxy_server_conf *psf = ap_get_module_config(r->server->module_config, &proxy_module); apr_size_t maxsize = AJP_MSG_BUFFER_SZ; int send_body = 0; apr_off_t content_length = 0; int original_status = r->status; const char *original_status_line = r->status_line; if (psf->io_buffer_size_set) maxsize = psf->io_buffer_size; if (maxsize > AJP_MAX_BUFFER_SZ) maxsize = AJP_MAX_BUFFER_SZ; else if (maxsize < AJP_MSG_BUFFER_SZ) maxsize = AJP_MSG_BUFFER_SZ; maxsize = APR_ALIGN(maxsize, 1024); /* * Send the AJP request to the remote server */ /* send request headers */ status = ajp_send_header(conn->sock, r, maxsize, uri); if (status != APR_SUCCESS) { conn->close++; ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: AJP: request failed to %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); if (status == AJP_EOVERFLOW) return HTTP_BAD_REQUEST; else if (status == AJP_EBAD_METHOD) { return HTTP_NOT_IMPLEMENTED; } else { /* * This is only non fatal when the method is idempotent. In this * case we can dare to retry it with a different worker if we are * a balancer member. */ if (is_idempotent(r) == METHOD_IDEMPOTENT) { return HTTP_SERVICE_UNAVAILABLE; } return HTTP_INTERNAL_SERVER_ERROR; } } /* allocate an AJP message to store the data of the buckets */ bufsiz = maxsize; status = ajp_alloc_data_msg(r->pool, &buff, &bufsiz, &msg); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ajp_alloc_data_msg failed"); return HTTP_INTERNAL_SERVER_ERROR; } /* read the first bloc of data */ input_brigade = apr_brigade_create(p, r->connection->bucket_alloc); tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); if (tenc && (strcasecmp(tenc, "chunked") == 0)) { /* The AJP protocol does not want body data yet */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: request is chunked"); } else { /* Get client provided Content-Length header */ content_length = get_content_length(r); status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, maxsize - AJP_HEADER_SZ); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: ap_get_brigade failed"); apr_brigade_destroy(input_brigade); return HTTP_BAD_REQUEST; } /* have something */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: APR_BUCKET_IS_EOS"); } /* Try to send something */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: data to read (max %" APR_SIZE_T_FMT " at %" APR_SIZE_T_FMT ")", bufsiz, msg->pos); status = apr_brigade_flatten(input_brigade, buff, &bufsiz); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: apr_brigade_flatten"); return HTTP_INTERNAL_SERVER_ERROR; } apr_brigade_cleanup(input_brigade); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: got %" APR_SIZE_T_FMT " bytes of data", bufsiz); if (bufsiz > 0) { status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: send failed to %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); /* * It is fatal when we failed to send a (part) of the request * body. */ return HTTP_INTERNAL_SERVER_ERROR; } conn->worker->s->transferred += bufsiz; send_body = 1; } else if (content_length > 0) { ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read zero bytes, expecting" " %" APR_OFF_T_FMT " bytes", content_length); /* * We can only get here if the client closed the connection * to us without sending the body. * Now the connection is in the wrong state on the backend. * Sending an empty data msg doesn't help either as it does * not move this connection to the correct state on the backend * for later resusage by the next request again. * Close it to clean things up. */ conn->close++; return HTTP_BAD_REQUEST; } } /* read the response */ conn->data = NULL; status = ajp_read_header(conn->sock, r, maxsize, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { /* We had a failure: Close connection to backend */ conn->close++; apr_brigade_destroy(input_brigade); ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: read response failed from %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); /* If we had a successful cping/cpong and then a timeout * we assume it is a request that cause a back-end timeout, * but doesn't affect the whole worker. */ if (APR_STATUS_IS_TIMEUP(status) && conn->worker->ping_timeout_set) { return HTTP_GATEWAY_TIME_OUT; } /* * This is only non fatal when we have not sent (parts) of a possible * request body so far (we do not store it and thus cannot sent it * again) and the method is idempotent. In this case we can dare to * retry it with a different worker if we are a balancer member. */ if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) { return HTTP_SERVICE_UNAVAILABLE; } return HTTP_INTERNAL_SERVER_ERROR; } /* parse the reponse */ result = ajp_parse_type(r, conn->data); output_brigade = apr_brigade_create(p, r->connection->bucket_alloc); /* * Prepare apr_pollfd_t struct for possible later check if there is currently * data available from the backend (do not flush response to client) * or not (flush response to client) */ conn_poll = apr_pcalloc(p, sizeof(apr_pollfd_t)); conn_poll->reqevents = APR_POLLIN; conn_poll->desc_type = APR_POLL_SOCKET; conn_poll->desc.s = conn->sock; bufsiz = maxsize; for (;;) { switch (result) { case CMD_AJP13_GET_BODY_CHUNK: if (havebody) { if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(input_brigade))) { /* This is the end */ bufsiz = 0; havebody = 0; ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "proxy: APR_BUCKET_IS_EOS"); } else { status = ap_get_brigade(r->input_filters, input_brigade, AP_MODE_READBYTES, APR_BLOCK_READ, maxsize - AJP_HEADER_SZ); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ap_get_brigade failed"); output_failed = 1; break; } bufsiz = maxsize; status = apr_brigade_flatten(input_brigade, buff, &bufsiz); apr_brigade_cleanup(input_brigade); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "apr_brigade_flatten failed"); output_failed = 1; break; } } ajp_msg_reset(msg); /* will go in ajp_send_data_msg */ status = ajp_send_data_msg(conn->sock, msg, bufsiz); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ajp_send_data_msg failed"); backend_failed = 1; break; } conn->worker->s->transferred += bufsiz; } else { /* * something is wrong TC asks for more body but we are * already at the end of the body data */ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "ap_proxy_ajp_request error read after end"); backend_failed = 1; } break; case CMD_AJP13_SEND_HEADERS: if (headers_sent) { /* Do not send anything to the client. * Backend already send us the headers. */ backend_failed = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Backend sent headers twice."); break; } /* AJP13_SEND_HEADERS: process them */ status = ajp_parse_header(r, conf, conn->data); if (status != APR_SUCCESS) { backend_failed = 1; } else if ((r->status == 401) && psf->error_override) { const char *buf; const char *wa = "WWW-Authenticate"; if ((buf = apr_table_get(r->headers_out, wa))) { apr_table_set(r->err_headers_out, wa, buf); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "ap_proxy_ajp_request: origin server " "sent 401 without WWW-Authenticate header"); } } headers_sent = 1; break; case CMD_AJP13_SEND_BODY_CHUNK: /* AJP13_SEND_BODY_CHUNK: piece of data */ status = ajp_parse_data(r, conn->data, &size, &send_body_chunk_buff); if (status == APR_SUCCESS) { /* If we are overriding the errors, we can't put the content * of the page into the brigade. */ if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) { /* AJP13_SEND_BODY_CHUNK with zero length * is explicit flush message */ if (size == 0) { if (headers_sent) { e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "Ignoring flush message received before headers"); } } else { apr_status_t rv; /* Handle the case where the error document is itself reverse * proxied and was successful. We must maintain any previous * error status so that an underlying error (eg HTTP_NOT_FOUND) * doesn't become an HTTP_OK. */ if (psf->error_override && !ap_is_HTTP_ERROR(r->status) && ap_is_HTTP_ERROR(original_status)) { r->status = original_status; r->status_line = original_status_line; } e = apr_bucket_transient_create(send_body_chunk_buff, size, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if ((conn->worker->flush_packets == flush_on) || ((conn->worker->flush_packets == flush_auto) && ((rv = apr_poll(conn_poll, 1, &conn_poll_fd, conn->worker->flush_wait)) != APR_SUCCESS) && APR_STATUS_IS_TIMEUP(rv))) { e = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } apr_brigade_length(output_brigade, 0, &bb_len); if (bb_len != -1) conn->worker->s->read += bb_len; } if (headers_sent) { if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "proxy: error processing body.%s", r->connection->aborted ? " Client aborted connection." : ""); output_failed = 1; } data_sent = 1; apr_brigade_cleanup(output_brigade); } } } else { backend_failed = 1; } break; case CMD_AJP13_END_RESPONSE: status = ajp_parse_reuse(r, conn->data, &conn_reuse); if (status != APR_SUCCESS) { backend_failed = 1; } /* If we are overriding the errors, we must not send anything to * the client, especially as the brigade already contains headers. * So do nothing here, and it will be cleaned up below. */ if (!psf->error_override || !ap_is_HTTP_ERROR(r->status)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); if (ap_pass_brigade(r->output_filters, output_brigade) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "proxy: error processing end"); output_failed = 1; } /* XXX: what about flush here? See mod_jk */ data_sent = 1; } request_ended = 1; break; default: backend_failed = 1; break; } /* * If connection has been aborted by client: Stop working. * Nevertheless, we regard our operation so far as a success: * So reset output_failed to 0 and set result to CMD_AJP13_END_RESPONSE * But: Close this connection to the backend. */ if (r->connection->aborted) { conn->close++; output_failed = 0; result = CMD_AJP13_END_RESPONSE; request_ended = 1; } /* * We either have finished successfully or we failed. * So bail out */ if ((result == CMD_AJP13_END_RESPONSE) || backend_failed || output_failed) break; /* read the response */ status = ajp_read_header(conn->sock, r, maxsize, (ajp_msg_t **)&(conn->data)); if (status != APR_SUCCESS) { backend_failed = 1; ap_log_error(APLOG_MARK, APLOG_DEBUG, status, r->server, "ajp_read_header failed"); break; } result = ajp_parse_type(r, conn->data); } apr_brigade_destroy(input_brigade); /* * Clear output_brigade to remove possible buckets that remained there * after an error. */ apr_brigade_cleanup(output_brigade); if (backend_failed || output_failed) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Processing of request failed backend: %i, " "output: %i", backend_failed, output_failed); /* We had a failure: Close connection to backend */ conn->close++; /* Return DONE to avoid error messages being added to the stream */ if (data_sent) { rv = DONE; } } else if (!request_ended) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: Processing of request didn't terminate cleanly"); /* We had a failure: Close connection to backend */ conn->close++; backend_failed = 1; /* Return DONE to avoid error messages being added to the stream */ if (data_sent) { rv = DONE; } } else if (!conn_reuse) { /* Our backend signalled connection close */ conn->close++; } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy: got response from %pI (%s)", conn->worker->cp->addr, conn->worker->hostname); if (psf->error_override && ap_is_HTTP_ERROR(r->status)) { /* clear r->status for override error, otherwise ErrorDocument * thinks that this is a recursive error, and doesn't find the * custom error page */ rv = r->status; r->status = HTTP_OK; } else { rv = OK; } } if (backend_failed) { ap_log_error(APLOG_MARK, APLOG_ERR, status, r->server, "proxy: dialog to %pI (%s) failed", conn->worker->cp->addr, conn->worker->hostname); /* * If we already send data, signal a broken backend connection * upwards in the chain. */ if (data_sent) { ap_proxy_backend_broke(r, output_brigade); } else if (!send_body && (is_idempotent(r) == METHOD_IDEMPOTENT)) { /* * This is only non fatal when we have not sent (parts) of a possible * request body so far (we do not store it and thus cannot sent it * again) and the method is idempotent. In this case we can dare to * retry it with a different worker if we are a balancer member. */ rv = HTTP_SERVICE_UNAVAILABLE; } else { rv = HTTP_INTERNAL_SERVER_ERROR; } } /* * Ensure that we sent an EOS bucket thru the filter chain, if we already * have sent some data. Maybe ap_proxy_backend_broke was called and added * one to the brigade already (no longer making it empty). So we should * not do this in this case. */ if (data_sent && !r->eos_sent && APR_BRIGADE_EMPTY(output_brigade)) { e = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(output_brigade, e); } /* If we have added something to the brigade above, sent it */ if (!APR_BRIGADE_EMPTY(output_brigade)) ap_pass_brigade(r->output_filters, output_brigade); apr_brigade_destroy(output_brigade); return rv; }
apr_status_t h2_beam_receive(h2_bucket_beam *beam, apr_bucket_brigade *bb, apr_read_type_e block, apr_off_t readbytes) { h2_beam_lock bl; apr_bucket *bred, *bgreen, *ng; int transferred = 0; apr_status_t status = APR_SUCCESS; apr_off_t remain = readbytes; /* Called from the green thread to take buckets from the beam */ if (enter_yellow(beam, &bl) == APR_SUCCESS) { transfer: if (beam->aborted) { if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) { apr_brigade_cleanup(beam->green); } status = APR_ECONNABORTED; goto leave; } /* transfer enough buckets from our green brigade, if we have one */ while (beam->green && !APR_BRIGADE_EMPTY(beam->green) && (readbytes <= 0 || remain >= 0)) { bgreen = APR_BRIGADE_FIRST(beam->green); if (readbytes > 0 && bgreen->length > 0 && remain <= 0) { break; } APR_BUCKET_REMOVE(bgreen); APR_BRIGADE_INSERT_TAIL(bb, bgreen); remain -= bgreen->length; ++transferred; } /* transfer from our red brigade, transforming red buckets to * green ones until we have enough */ while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) { bred = H2_BLIST_FIRST(&beam->red); bgreen = NULL; if (readbytes > 0 && bred->length > 0 && remain <= 0) { break; } if (APR_BUCKET_IS_METADATA(bred)) { if (APR_BUCKET_IS_EOS(bred)) { bgreen = apr_bucket_eos_create(bb->bucket_alloc); beam->close_sent = 1; } else if (APR_BUCKET_IS_FLUSH(bred)) { bgreen = apr_bucket_flush_create(bb->bucket_alloc); } else { /* put red into hold, no green sent out */ } } else if (APR_BUCKET_IS_FILE(bred)) { /* This is set aside into the target brigade pool so that * any read operation messes with that pool and not * the red one. */ apr_bucket_file *f = (apr_bucket_file *)bred->data; apr_file_t *fd = f->fd; int setaside = (f->readpool != bb->p); if (setaside) { status = apr_file_setaside(&fd, fd, bb->p); if (status != APR_SUCCESS) { goto leave; } ++beam->files_beamed; } ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length, bb->p); #if APR_HAS_MMAP /* disable mmap handling as this leads to segfaults when * the underlying file is changed while memory pointer has * been handed out. See also PR 59348 */ apr_bucket_file_enable_mmap(ng, 0); #endif remain -= bred->length; ++transferred; APR_BUCKET_REMOVE(bred); H2_BLIST_INSERT_TAIL(&beam->hold, bred); ++transferred; continue; } else { /* create a "green" standin bucket. we took care about the * underlying red bucket and its data when we placed it into * the red brigade. * the beam bucket will notify us on destruction that bred is * no longer needed. */ bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc, beam->buckets_sent++); } /* Place the red bucket into our hold, to be destroyed when no * green bucket references it any more. */ APR_BUCKET_REMOVE(bred); H2_BLIST_INSERT_TAIL(&beam->hold, bred); beam->received_bytes += bred->length; if (bgreen) { APR_BRIGADE_INSERT_TAIL(bb, bgreen); remain -= bgreen->length; ++transferred; } } if (readbytes > 0 && remain < 0) { /* too much, put some back */ remain = readbytes; for (bgreen = APR_BRIGADE_FIRST(bb); bgreen != APR_BRIGADE_SENTINEL(bb); bgreen = APR_BUCKET_NEXT(bgreen)) { remain -= bgreen->length; if (remain < 0) { apr_bucket_split(bgreen, bgreen->length+remain); beam->green = apr_brigade_split_ex(bb, APR_BUCKET_NEXT(bgreen), beam->green); break; } } } if (beam->closed && (!beam->green || APR_BRIGADE_EMPTY(beam->green)) && H2_BLIST_EMPTY(&beam->red)) { /* beam is closed and we have nothing more to receive */ if (!beam->close_sent) { apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); beam->close_sent = 1; ++transferred; status = APR_SUCCESS; } } if (transferred) { status = APR_SUCCESS; } else if (beam->closed) { status = APR_EOF; } else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) { status = wait_cond(beam, bl.mutex); if (status != APR_SUCCESS) { goto leave; } goto transfer; } else { status = APR_EAGAIN; } leave: leave_yellow(beam, &bl); } return status; }