static int php_apache_sapi_ub_write(const char *str, uint str_length TSRMLS_DC) { apr_bucket *b; apr_bucket_brigade *bb; apr_bucket_alloc_t *ba; ap_filter_t *f; /* remaining output filters */ php_struct *ctx; ctx = SG(server_context); f = ctx->f; if (str_length == 0) return 0; ba = f->c->bucket_alloc; bb = apr_brigade_create(ctx->r->pool, ba); b = apr_bucket_transient_create(str, str_length, ba); APR_BRIGADE_INSERT_TAIL(bb, b); if (ap_pass_brigade(f->next, bb) != APR_SUCCESS || ctx->r->connection->aborted) { php_handle_aborted_connection(); } return str_length; /* we always consume all the data passed to us. */ }
/* drain_available_output(): * * if any data is available from the filter, read it and append it * to the the bucket brigade */ static apr_status_t drain_available_output(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; conn_rec *c = r->connection; ef_ctx_t *ctx = f->ctx; apr_size_t len; char buf[4096]; apr_status_t rv; apr_bucket *b; while (1) { int lvl = APLOG_TRACE5; len = sizeof(buf); rv = apr_file_read(ctx->proc->out, buf, &len); if (rv && !APR_STATUS_IS_EAGAIN(rv)) lvl = APLOG_DEBUG; ap_log_rerror(APLOG_MARK, lvl, rv, r, APLOGNO(01460) "apr_file_read(child output), len %" APR_SIZE_T_FMT, !rv ? len : -1); if (rv != APR_SUCCESS) { return rv; } b = apr_bucket_heap_create(buf, len, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); return APR_SUCCESS; } /* we should never get here; if we do, a bogus error message would be * the least of our problems */ return APR_ANONYMOUS; }
static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int force) { if (io->unflushed || force) { if (io->buflen > 0) { /* something in the buffer, put it in the output brigade */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection, "h2_conn_io: flush, flushing %ld bytes", (long)io->buflen); bucketeer_buffer(io); io->buflen = 0; } if (force) { APR_BRIGADE_INSERT_TAIL(io->output, apr_bucket_flush_create(io->output->bucket_alloc)); } ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io: flush"); /* Send it out */ io->unflushed = 0; return pass_out(io->output, io); /* no more access after this, as we might have flushed an EOC bucket * that de-allocated us all. */ } return APR_SUCCESS; }
static int php_input_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { php_struct *ctx; apr_status_t rv; if (f->r->proxyreq) { return ap_get_brigade(f->next, bb, mode, block, readbytes); } ctx = SG(server_context); if (ctx == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "php failed to get server context"); return HTTP_INTERNAL_SERVER_ERROR; } if ((rv = ap_get_brigade(f->next, bb, mode, block, readbytes)) != APR_SUCCESS) { return rv; } if (!ctx->post_data) { ctx->post_data = apr_brigade_create(f->r->pool, f->c->bucket_alloc); } if ((rv = ap_save_brigade(f, &ctx->post_data, &bb, f->r->pool)) != APR_SUCCESS) { return rv; } apr_brigade_cleanup(bb); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(bb->bucket_alloc)); return APR_SUCCESS; }
static apr_status_t read_complete_body(request_rec *r, apr_bucket_brigade *kept_body) { apr_bucket_brigade *tmp_bb; apr_bucket *t_bucket1, *t_bucket2; unsigned short eos_seen = 0; apr_status_t status; tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); while (!eos_seen) { status = ap_get_brigade( r->input_filters, tmp_bb, AP_MODE_READBYTES, APR_BLOCK_READ, HUGE_STRING_LEN); /* This means the filter discovered an error. * Furthermore input-filter already handeld the error and sends * something to the output chain. * For example ap_http_filter does this if LimitRequestBody is reached */ if (status == AP_FILTER_ERROR) { apr_brigade_destroy(tmp_bb); return AP_FILTER_ERROR; } /* Cool no need to search for the eos bucket */ if (APR_STATUS_IS_EOF(status)) { apr_brigade_destroy(tmp_bb); return APR_SUCCESS; } if (status != APR_SUCCESS) { apr_brigade_destroy(tmp_bb); return status; } ITER_BRIGADE(t_bucket1, tmp_bb) { apr_bucket_copy(t_bucket1, &t_bucket2); /* If SSL is used TRANSIENT buckets are returned. * However we need this bucket for a longer period than * this function call, hence 'setaside' the bucket. */ if APR_BUCKET_IS_TRANSIENT(t_bucket2) { apr_bucket_setaside(t_bucket2, r->pool); } APR_BRIGADE_INSERT_TAIL(kept_body, t_bucket2); if (!eos_seen && APR_BUCKET_IS_EOS(t_bucket1)) { eos_seen = 1; } } apr_brigade_cleanup(tmp_bb); }
AP_CORE_DECLARE(void) ap_flush_conn(conn_rec *c) { apr_bucket_brigade *bb; apr_bucket *b; bb = apr_brigade_create(c->pool, c->bucket_alloc); /* FLUSH bucket */ b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); /* End Of Connection bucket */ b = ap_bucket_eoc_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); ap_pass_brigade(c->output_filters, bb); }
MP_INLINE static apr_status_t send_input_flush(modperl_filter_t *filter) { apr_bucket_alloc_t *ba = filter->f->c->bucket_alloc; apr_bucket *b = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(filter->bb_out, b); MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT "write out: FLUSH bucket", MP_FILTER_NAME(filter->f)); return APR_SUCCESS; }
apr_status_t h2_io_in_close(h2_io *io) { if (io->bbin) { APR_BRIGADE_INSERT_TAIL(io->bbin, apr_bucket_eos_create(io->bbin->bucket_alloc)); } io->eos_in = 1; return APR_SUCCESS; }
static int process_fortune_connection(conn_rec *c) { apr_status_t rv; apr_procattr_t *pattr; apr_pool_t *p = c->pool; apr_bucket *b; apr_bucket_brigade *bb; const char *err_msg = "200 OK\n"; fortune_conf_t *fconf = ap_get_module_config(c->base_server->module_config, &fortune_module); if (!fconf->enabled) { return DECLINED; } bb = apr_brigade_create(p, c->bucket_alloc); /* prepare process attribute */ if ((rv = apr_procattr_create(&pattr, c->pool)) != APR_SUCCESS) { goto error; } if ((rv = apr_procattr_io_set(pattr, APR_NO_PIPE, APR_FULL_BLOCK, APR_NO_PIPE)) != APR_SUCCESS) { goto error; } /* default value: APR_PROGRAM */ if ((rv = apr_procattr_cmdtype_set(pattr, APR_PROGRAM_ENV)) != APR_SUCCESS) { goto error; } /* run the program and read the output from the pipe */ if ((rv = fortune_process(c, pattr, bb)) != APR_SUCCESS) { apr_brigade_cleanup(bb); } error: if (rv != APR_SUCCESS) { err_msg = "500 ERROR\n"; } b = apr_bucket_pool_create(err_msg, strlen(err_msg), p, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, b); b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); rv = ap_pass_brigade(c->output_filters, bb); return OK; }
MP_INLINE static apr_status_t send_input_eos(modperl_filter_t *filter) { apr_bucket_alloc_t *ba = filter->f->c->bucket_alloc; apr_bucket *b = apr_bucket_eos_create(ba); APR_BRIGADE_INSERT_TAIL(filter->bb_out, b); ((modperl_filter_ctx_t *)filter->f->ctx)->sent_eos = 1; MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT "write out: EOS bucket", MP_FILTER_NAME(filter->f)); return APR_SUCCESS; }
static apr_status_t fortune_process(conn_rec *c, apr_procattr_t *pattr, apr_bucket_brigade *bb) { apr_status_t rv; int argc = 0; const char *argv[APP_MAX_ARGC]; apr_proc_t proc; apr_bucket *b; apr_pool_t *p = c->pool; fortune_conf_t *fconf = ap_get_module_config(c->base_server->module_config, &fortune_module); argv[argc++] = fconf->progname; argv[argc++] = NULL; /* @argvs should be null-terminated */ if ((rv = apr_proc_create(&proc, fconf->progname, (const char *const *) argv, NULL, (apr_procattr_t *) pattr, p)) != APR_SUCCESS) { return rv; } while (TRUE) { char buf[BUFSIZE] = { 0, }; /* read the command's output through the pipe */ rv = apr_file_gets(buf, sizeof(buf), proc.out); if (APR_STATUS_IS_EOF(rv)) { break; } b = apr_bucket_pool_create(apr_pstrdup(p, buf), strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); } apr_file_close(proc.out); { int st; apr_exit_why_e why; rv = apr_proc_wait(&proc, &st, &why, APR_WAIT); if (APR_STATUS_IS_CHILD_DONE(rv)) { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, "child done: why = %d, exit status = %d", why, st); } else { ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, c, "child notdone"); return APR_EGENERAL; } } return APR_SUCCESS; }
static apr_status_t amagent_post_filter(ap_filter_t *f, apr_bucket_brigade *bucket_out, ap_input_mode_t emode, apr_read_type_e eblock, apr_off_t nbytes) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *bucket; apr_size_t sz; char *clean; const char *data = apr_table_get(r->notes, amagent_post_filter_name); do { if (data == NULL) break; sz = strlen(data); clean = base64_decode(data, &sz); if (clean == NULL) break; apr_table_unset(r->notes, amagent_post_filter_name); LOG_R(APLOG_DEBUG, r, "amagent_post_filter(): reposting %ld bytes", sz); bucket = apr_bucket_heap_create((const char *) clean, sz, NULL, c->bucket_alloc); if (bucket == NULL) { free(clean); return APR_EGENERAL; } APR_BRIGADE_INSERT_TAIL(bucket_out, bucket); free(clean); bucket = apr_bucket_eos_create(c->bucket_alloc); if (bucket == NULL) { return APR_EGENERAL; } APR_BRIGADE_INSERT_TAIL(bucket_out, bucket); ap_remove_input_filter(f); return APR_SUCCESS; } while (0); apr_table_unset(r->notes, amagent_post_filter_name); ap_remove_input_filter(f); return ap_get_brigade(f->next, bucket_out, emode, eblock, nbytes); }
//------- void write_brigade(apr_bucket_brigade *bb_out,request_rec *r,char *data){ APR_BRIGADE_INSERT_TAIL( bb_out, apr_bucket_pool_create( data, strlen(data), r->pool, r->connection->bucket_alloc ) ); }
MP_INLINE static apr_status_t send_output_flush(ap_filter_t *f) { apr_bucket_alloc_t *ba = f->c->bucket_alloc; apr_bucket_brigade *bb = apr_brigade_create(MP_FILTER_POOL(f), ba); apr_bucket *b = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, b); MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT "write out: FLUSH bucket in separate bb", MP_FILTER_NAME(f)); return ap_pass_brigade(f, bb); }
apr_status_t h2_io_out_close(h2_io *io) { if (io->rst_error) { return APR_ECONNABORTED; } if (!io->eos_out && !h2_util_has_eos(io->bbout, 0)) { APR_BRIGADE_INSERT_TAIL(io->bbout, apr_bucket_eos_create(io->bbout->bucket_alloc)); } return APR_SUCCESS; }
static int mmap_handler(request_rec *r, a_file *file) { #if APR_HAS_MMAP conn_rec *c = r->connection; apr_bucket *b; apr_mmap_t *mm; apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc); apr_mmap_dup(&mm, file->mm, r->pool); b = apr_bucket_mmap_create(mm, 0, (apr_size_t)file->finfo.size, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); if (ap_pass_brigade(r->output_filters, bb) != APR_SUCCESS) return AP_FILTER_ERROR; #endif return OK; }
static apr_status_t urlReplaceFilterOutFilter(ap_filter_t *f, apr_bucket_brigade *pbbIn) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *pbktIn; apr_bucket_brigade *pbbOut; pbbOut=apr_brigade_create(r->pool, c->bucket_alloc); for (pbktIn = APR_BRIGADE_FIRST(pbbIn); pbktIn != APR_BRIGADE_SENTINEL(pbbIn); pbktIn = APR_BUCKET_NEXT(pbktIn)) { const char *data; apr_size_t len; char *buf; apr_size_t n; apr_bucket *pbktOut; if (APR_BUCKET_IS_EOS(pbktIn)) { apr_bucket *pbktEOS=apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktEOS); continue; } /* read */ apr_bucket_read(pbktIn,&data,&len,APR_BLOCK_READ); /* write */ buf = apr_bucket_alloc(len, c->bucket_alloc); for (n=0 ; n < len ; ++n) buf[n] = apr_toupper(data[n]); pbktOut = apr_bucket_heap_create(buf, len, apr_bucket_free, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(pbbOut,pbktOut); } apr_brigade_cleanup(pbbIn); return ap_pass_brigade(f->next,pbbOut); }
/* Bring the current buffer content into the output brigade, appropriately * chunked. */ static apr_status_t bucketeer_buffer(h2_conn_io *io) { const char *data = io->buffer; apr_size_t remaining = io->buflen; apr_bucket *b; int bcount, i; if (io->write_size > WRITE_SIZE_INITIAL && (io->cooldown_usecs > 0) && (apr_time_now() - io->last_write) >= io->cooldown_usecs) { /* long time not written, reset write size */ io->write_size = WRITE_SIZE_INITIAL; io->bytes_written = 0; ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io(%ld): timeout write size reset to %ld", (long)io->connection->id, (long)io->write_size); } else if (io->write_size < WRITE_SIZE_MAX && io->bytes_written >= io->warmup_size) { /* connection is hot, use max size */ io->write_size = WRITE_SIZE_MAX; ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io(%ld): threshold reached, write size now %ld", (long)io->connection->id, (long)io->write_size); } bcount = (int)(remaining / io->write_size); for (i = 0; i < bcount; ++i) { b = apr_bucket_transient_create(data, io->write_size, io->output->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); data += io->write_size; remaining -= io->write_size; } if (remaining > 0) { b = apr_bucket_transient_create(data, remaining, io->output->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); } return APR_SUCCESS; }
/*============================================================================ *============================================================================ * This is the beginning of the cgi filter code moved from mod_include. This * is the code required to handle the "exec" SSI directive. *============================================================================ *============================================================================*/ static apr_status_t include_cgi(include_ctx_t *ctx, ap_filter_t *f, apr_bucket_brigade *bb, char *s) { request_rec *r = f->r; request_rec *rr = ap_sub_req_lookup_uri(s, r, f->next); int rr_status; if (rr->status != HTTP_OK) { ap_destroy_sub_req(rr); return APR_EGENERAL; } /* No hardwired path info or query allowed */ if ((rr->path_info && rr->path_info[0]) || rr->args) { ap_destroy_sub_req(rr); return APR_EGENERAL; } if (rr->finfo.filetype != APR_REG) { ap_destroy_sub_req(rr); return APR_EGENERAL; } /* Script gets parameters of the *document*, for back compatibility */ rr->path_info = r->path_info; /* hard to get right; see mod_cgi.c */ rr->args = r->args; /* Force sub_req to be treated as a CGI request, even if ordinary * typing rules would have called it something else. */ ap_set_content_type(rr, CGI_MAGIC_TYPE); /* Run it. */ rr_status = ap_run_sub_req(rr); if (ap_is_HTTP_REDIRECT(rr_status)) { const char *location = apr_table_get(rr->headers_out, "Location"); if (location) { char *buffer; location = ap_escape_html(rr->pool, location); buffer = apr_pstrcat(ctx->pool, "<a href=\"", location, "\">", location, "</a>", NULL); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_pool_create(buffer, strlen(buffer), ctx->pool, f->c->bucket_alloc)); } } ap_destroy_sub_req(rr); return APR_SUCCESS; }
MP_INLINE static apr_status_t send_output_eos(ap_filter_t *f) { apr_bucket_alloc_t *ba = f->c->bucket_alloc; apr_bucket_brigade *bb = apr_brigade_create(MP_FILTER_POOL(f), ba); apr_bucket *b = apr_bucket_eos_create(ba); APR_BRIGADE_INSERT_TAIL(bb, b); ((modperl_filter_ctx_t *)f->ctx)->sent_eos = 1; MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT "write out: EOS bucket in separate bb", MP_FILTER_NAME(f)); return ap_pass_brigade(f->next, bb); }
static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb) { apr_bucket *b; mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj; if (mobj->type == CACHE_TYPE_FILE) { /* CACHE_TYPE_FILE */ apr_file_t *file; apr_os_file_put(&file, &mobj->fd, mobj->flags, p); b = apr_bucket_file_create(file, 0, mobj->m_len, p, bb->bucket_alloc); } else { /* CACHE_TYPE_HEAP */ b = apr_bucket_immortal_create(mobj->m, mobj->m_len, bb->bucket_alloc); } APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); return APR_SUCCESS; }
static apr_status_t bucketeer_buffer(h2_conn_io *io) { const char *data = io->buffer; apr_size_t remaining = io->buflen; int bcount = (int)(remaining / H2_CONN_IO_SSL_WRITE_SIZE); apr_bucket *b; for (int i = 0; i < bcount; ++i) { b = apr_bucket_transient_create(data, H2_CONN_IO_SSL_WRITE_SIZE, io->output->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); data += H2_CONN_IO_SSL_WRITE_SIZE; remaining -= H2_CONN_IO_SSL_WRITE_SIZE; } if (remaining > 0) { b = apr_bucket_transient_create(data, remaining, io->output->bucket_alloc); APR_BRIGADE_INSERT_TAIL(io->output, b); } return APR_SUCCESS; }
APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut, apr_bucket_brigade *bbIn, apr_read_type_e block, apr_off_t maxbytes) { apr_off_t readbytes = 0; while (!APR_BRIGADE_EMPTY(bbIn)) { const char *pos; const char *str; apr_size_t len; apr_status_t rv; apr_bucket *e; e = APR_BRIGADE_FIRST(bbIn); rv = apr_bucket_read(e, &str, &len, block); if (rv != APR_SUCCESS) { return rv; } pos = memchr(str, APR_ASCII_LF, len); /* We found a match. */ if (pos != NULL) { apr_bucket_split(e, pos - str + 1); APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(bbOut, e); return APR_SUCCESS; } APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(bbOut, e); readbytes += len; /* We didn't find an APR_ASCII_LF within the maximum line length. */ if (readbytes >= maxbytes) { break; } } return APR_SUCCESS; }
static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb) { apr_bucket *e; disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj; e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p, bb->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, e); e = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); return APR_SUCCESS; }
pstar_io::~pstar_io() { apr_status_t rv; apr_bucket *b; static const char msg[] = "<h1>P* Error</h1><p>An error occured. More information can be found in the web server log files.</p>"; if (http_error_pending) { apr_table_set (r->headers_out, "Content-Type", "text/html"); // apr_brigade_cleanup(bb); b = apr_bucket_immortal_create(msg, sizeof(msg), ba); APR_BRIGADE_INSERT_TAIL(bb, b); } b = apr_bucket_eos_create(ba); APR_BRIGADE_INSERT_TAIL(bb, b); rv = ap_pass_brigade(r->output_filters, bb); if (rv != APR_SUCCESS) { throw runtime_error("pstar_io::write(); Could not write to client"); } }
static apr_status_t send_bucket_downstream(ap_filter_t *f, apr_bucket *b) { charset_filter_ctx_t *ctx = f->ctx; apr_status_t rv; APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, b); rv = ap_pass_brigade(f->next, ctx->tmpbb); if (rv != APR_SUCCESS) { ctx->ees = EES_DOWNSTREAM; } apr_brigade_cleanup(ctx->tmpbb); return rv; }
static PyObject * conn_write(connobject *self, PyObject *args) { char *buff; int len; apr_bucket_brigade *bb; apr_bucket *b; PyObject *s; conn_rec *c = self->conn; if (! PyArg_ParseTuple(args, "O", &s)) return NULL; if (! PyString_Check(s)) { PyErr_SetString(PyExc_TypeError, "Argument to write() must be a string"); return NULL; } /* PYTHON 2.5: 'PyString_Size' uses Py_ssize_t for return values (may need overflow check) */ len = PyString_Size(s); if (len) { buff = apr_pmemdup(c->pool, PyString_AS_STRING(s), len); bb = apr_brigade_create(c->pool, c->bucket_alloc); b = apr_bucket_pool_create(buff, len, c->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); /* Make sure the data is flushed to the client */ b = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); ap_pass_brigade(c->output_filters, bb); } Py_INCREF(Py_None); return Py_None; }
int output_finalize_bb (output_t* output, apr_table_t* out_headers){ apr_status_t rv; apr_bucket* eos_bucket; eos_bucket = apr_bucket_eos_create(output->bucket_allocator); APR_BRIGADE_INSERT_TAIL(output->bucket_brigade, eos_bucket); apr_table_overlap(out_headers, output->headers, APR_OVERLAP_TABLES_SET); rv = apr_brigade_length(output->bucket_brigade,1,&(output->length)); return rv; }
static apr_status_t include_cmd(include_ctx_t *ctx, ap_filter_t *f, apr_bucket_brigade *bb, const char *command) { cgi_exec_info_t e_info; const char **argv; apr_file_t *script_out = NULL, *script_in = NULL, *script_err = NULL; apr_status_t rv; request_rec *r = f->r; add_ssi_vars(r); e_info.process_cgi = 0; e_info.cmd_type = APR_SHELLCMD; e_info.detached = 0; e_info.in_pipe = APR_NO_PIPE; e_info.out_pipe = APR_FULL_BLOCK; e_info.err_pipe = APR_NO_PIPE; e_info.prog_type = RUN_AS_SSI; e_info.bb = &bb; e_info.ctx = ctx; e_info.next = f->next; e_info.addrspace = 0; if ((rv = cgi_build_command(&command, &argv, r, r->pool, &e_info)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "don't know how to spawn cmd child process: %s", r->filename); return rv; } /* run the script in its own process */ if ((rv = run_cgi_child(&script_out, &script_in, &script_err, command, argv, r, r->pool, &e_info)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "couldn't spawn child process: %s", r->filename); return rv; } APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_pipe_create(script_in, f->c->bucket_alloc)); ctx->flush_now = 1; /* We can't close the pipe here, because we may return before the * full CGI has been sent to the network. That's okay though, * because we can rely on the pool to close the pipe for us. */ return APR_SUCCESS; }
apr_status_t jxr_append_brigade(request_rec *r, apr_bucket_brigade *dest, apr_bucket_brigade *bb, int *eos_seen) { apr_size_t max_msglen = MAX_PACKET_SIZE - sizeof(Jaxer_Header); apr_status_t rv; while (!APR_BRIGADE_EMPTY(bb)) { apr_size_t readlen; const char *buffer; apr_bucket *e = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_EOS(e) ) { apr_bucket_delete(e); if (eos_seen) *eos_seen = 1; continue; } if (APR_BUCKET_IS_METADATA(e)) { apr_bucket_delete(e); continue; } /* Read the bucket now */ if ((rv = apr_bucket_read(e, &buffer, &readlen, APR_BLOCK_READ)) != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: can't read data from handler"); return rv; } if (readlen > max_msglen) { apr_bucket_split(e, max_msglen); }else { APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(dest, e); } } if ((rv=apr_brigade_destroy(bb)) != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: failed to destroy brigade."); return rv; } return APR_SUCCESS; }