APU_DECLARE(apr_status_t) apr_brigade_puts(apr_bucket_brigade *bb, apr_brigade_flush flush, void *ctx, const char *str) { apr_size_t len = strlen(str); apr_bucket *bkt = APR_BRIGADE_LAST(bb); if (!APR_BRIGADE_EMPTY(bb) && APR_BUCKET_IS_HEAP(bkt)) { /* If there is enough space available in a heap bucket * at the end of the brigade, copy the string directly * into the heap bucket */ apr_bucket_heap *h = bkt->data; apr_size_t bytes_avail = h->alloc_len - bkt->length; if (bytes_avail >= len) { char *buf = h->base + bkt->start + bkt->length; memcpy(buf, str, len); bkt->length += len; return APR_SUCCESS; } } /* If the string could not be copied into an existing heap * bucket, delegate the work to apr_brigade_write(), which * knows how to grow the brigade */ return apr_brigade_write(bb, flush, ctx, str, len); }
APU_DECLARE(apr_status_t) apr_brigade_vprintf(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const char *fmt, va_list va) { /* the cast, in order of appearance */ struct brigade_vprintf_data_t vd; char buf[APR_BUCKET_BUFF_SIZE]; int written; vd.vbuff.curpos = buf; vd.vbuff.endpos = buf + APR_BUCKET_BUFF_SIZE; vd.b = b; vd.flusher = &flush; vd.ctx = ctx; vd.cbuff = buf; written = apr_vformatter(brigade_flush, &vd.vbuff, fmt, va); if (written == -1) { return -1; } /* write out what remains in the buffer */ return apr_brigade_write(b, flush, ctx, buf, vd.vbuff.curpos - buf); }
static apr_status_t input_add_data(h2_stream *stream, const char *data, size_t len, int chunked) { apr_status_t status = APR_SUCCESS; if (chunked) { status = apr_brigade_printf(stream->bbin, input_flush, stream, "%lx\r\n", (unsigned long)len); if (status == APR_SUCCESS) { status = apr_brigade_write(stream->bbin, input_flush, stream, data, len); if (status == APR_SUCCESS) { status = apr_brigade_puts(stream->bbin, input_flush, stream, "\r\n"); } } } else { status = apr_brigade_write(stream->bbin, input_flush, stream, data, len); } return status; }
static int brigade_write(lua_State*L) { apr_size_t nbyte; apr_status_t rc; apr_bucket_brigade *bb = (apr_bucket_brigade *)CHECK_BUCKETBRIGADE_OBJECT(1); const char* str = luaL_checklstring(L,2,&nbyte); nbyte = luaL_optinteger(L,3,nbyte); rc = apr_brigade_write(bb, NULL,NULL, str,nbyte); lua_pushinteger(L,rc); return 1; }
// Appends the sub parameter to the bucket brigade. Sub parameters are given // 'rails style' with parent_param[sub_param_name]=sub_param_value apr_status_t porter_append_sub_parameter(apr_pool_t *pool, apr_bucket_brigade *bb, const char *parent_param, const char *sub_param, const char *sub_param_value, apr_size_t length) { const char *escaped_value = apreq_escape(pool, sub_param_value, length); const char *encoded_value = apr_pstrcat(pool, parent_param, "%5B", sub_param, "%5D=", escaped_value, "&", NULL); PORTER_LOG("appending sub param"); PORTER_LOG(sub_param); PORTER_LOG(escaped_value); return apr_brigade_write(bb, NULL, NULL, encoded_value, strlen(encoded_value)); }
APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut, apr_bucket_brigade *bbIn, apr_read_type_e block, apr_off_t maxbytes) { apr_off_t readbytes = 0; while (!APR_BRIGADE_EMPTY(bbIn)) { const char *pos; const char *str; apr_size_t len; apr_status_t rv; apr_bucket *e; e = APR_BRIGADE_FIRST(bbIn); rv = apr_bucket_read(e, &str, &len, block); if (rv != APR_SUCCESS) { return rv; } pos = memchr(str, APR_ASCII_LF, len); /* We found a match. */ if (pos != NULL) { apr_bucket_split(e, pos - str + 1); APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(bbOut, e); return APR_SUCCESS; } APR_BUCKET_REMOVE(e); if (APR_BUCKET_IS_METADATA(e) || len > APR_BUCKET_BUFF_SIZE/4) { APR_BRIGADE_INSERT_TAIL(bbOut, e); } else { if (len > 0) { rv = apr_brigade_write(bbOut, NULL, NULL, str, len); if (rv != APR_SUCCESS) { return rv; } } apr_bucket_destroy(e); } readbytes += len; /* We didn't find an APR_ASCII_LF within the maximum line length. */ if (readbytes >= maxbytes) { break; } } return APR_SUCCESS; }
apr_status_t h2_conn_io_write(h2_conn_io *io, const char *buf, size_t length) { apr_status_t status = APR_SUCCESS; io->unflushed = 1; #if H2_CONN_IO_USE_BUFFER ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection, "h2_conn_io: buffering %ld bytes", (long)length); while (length > 0 && (status == APR_SUCCESS)) { apr_size_t avail = io->bufsize - io->buflen; if (avail <= 0) { bucketeer_buffer(io); status = flush_out(io->output, io); io->buflen = 0; } else if (length > avail) { memcpy(io->buffer + io->buflen, buf, avail); io->buflen += avail; length -= avail; buf += avail; } else { memcpy(io->buffer + io->buflen, buf, length); io->buflen += length; length = 0; break; } } #else /* H2_CONN_IO_USE_BUFFER */ status = apr_brigade_write(io->output, flush_out, io, buf, length); if (status == APR_SUCCESS || APR_STATUS_IS_ECONNABORTED(status) || APR_STATUS_IS_EPIPE(status)) { /* These are all fine and no reason for concern. Everything else * is interesting. */ status = APR_SUCCESS; } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, io->connection, "h2_conn_io: write error"); } #endif /* H2_CONN_IO_USE_BUFFER (else) */ return status; }
static apr_status_t h2_to_h1_add_data_raw(h2_to_h1 *to_h1, const char *data, size_t len) { apr_status_t status = APR_SUCCESS; h2_mplx_get_conn(to_h1->m); if (to_h1->eos || !to_h1->eoh) { return APR_EINVAL; } status = apr_brigade_write(to_h1->bb, flush, to_h1, data, len); if (status == APR_SUCCESS) { status = h2_to_h1_flush(to_h1); } return status; }
// Handles a simple parameter value, simply concats the name and value to the output // stream apr_status_t porter_handle_parameter(porter_upload_request_t *ur, apreq_param_t *param) { const char *new_parameter_value; apr_status_t rv; new_parameter_value = apr_pstrcat(ur->pool, apreq_escape(ur->pool, param->v.name, strlen(param->v.name)), "=", apreq_escape(ur->pool, param->v.data, strlen(param->v.data)), "&", NULL); PORTER_LOG("Writing plain parameter"); PORTER_LOG(param->v.name); PORTER_LOG(new_parameter_value); PORTER_HANDLE_ERROR(apr_brigade_write(ur->bucket_brigade, NULL, NULL, new_parameter_value, strlen(new_parameter_value))); return APR_SUCCESS; }
apr_status_t h2_conn_io_write(h2_conn_io *io, const char *buf, size_t length) { apr_status_t status = APR_SUCCESS; io->unflushed = 1; if (io->bufsize > 0) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, io->connection, "h2_conn_io: buffering %ld bytes", (long)length); if (!APR_BRIGADE_EMPTY(io->output)) { status = h2_conn_io_flush(io); io->unflushed = 1; } while (length > 0 && (status == APR_SUCCESS)) { apr_size_t avail = io->bufsize - io->buflen; if (avail <= 0) { bucketeer_buffer(io); status = pass_out(io->output, io); io->buflen = 0; } else if (length > avail) { memcpy(io->buffer + io->buflen, buf, avail); io->buflen += avail; length -= avail; buf += avail; } else { memcpy(io->buffer + io->buflen, buf, length); io->buflen += length; length = 0; break; } } } else { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, io->connection, "h2_conn_io: writing %ld bytes to brigade", (long)length); status = apr_brigade_write(io->output, pass_out, io, buf, length); } return status; }
APU_DECLARE(apr_status_t) apr_brigade_vputstrs(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, va_list va) { for (;;) { const char *str = va_arg(va, const char *); apr_status_t rv; if (str == NULL) break; rv = apr_brigade_write(b, flush, ctx, str, strlen(str)); if (rv != APR_SUCCESS) return rv; } return APR_SUCCESS; }
static void test_bwrite(abts_case *tc, void *data) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb = apr_brigade_create(p, ba); apr_off_t length; int n; for (n = 0; n < COUNT; n++) { apr_assert_success(tc, "brigade_write", apr_brigade_write(bb, NULL, NULL, THESTR, sizeof THESTR)); } apr_assert_success(tc, "determine brigade length", apr_brigade_length(bb, 1, &length)); ABTS_ASSERT(tc, "brigade has correct length", length == (COUNT * sizeof THESTR)); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static apr_status_t brigade_flush(apr_vformatter_buff_t *buff) { /* callback function passed to ap_vformatter to be * called when vformatter needs to buff and * buff.curpos > buff.endpos */ /* "downcast," have really passed a brigade_vprintf_data_t* */ struct brigade_vprintf_data_t *vd = (struct brigade_vprintf_data_t*)buff; apr_status_t res = APR_SUCCESS; res = apr_brigade_write(vd->b, *vd->flusher, vd->ctx, vd->cbuff, APR_BUCKET_BUFF_SIZE); if(res != APR_SUCCESS) { return -1; } vd->vbuff.curpos = vd->cbuff; vd->vbuff.endpos = vd->cbuff + APR_BUCKET_BUFF_SIZE; return res; }
static int reflector_handler(request_rec * r) { apr_bucket_brigade *bbin, *bbout; reflector_cfg *conf; apr_status_t status; if (strcmp(r->handler, "reflector")) { return DECLINED; } conf = (reflector_cfg *) ap_get_module_config(r->per_dir_config, &reflector_module); ap_allow_methods(r, 1, "POST", "OPTIONS", NULL); if (r->method_number == M_OPTIONS) { return ap_send_http_options(r); } else if (r->method_number == M_POST) { const char *content_length, *content_type; int seen_eos; /* * Sometimes we'll get in a state where the input handling has * detected an error where we want to drop the connection, so if * that's the case, don't read the data as that is what we're trying * to avoid. * * This function is also a no-op on a subrequest. */ if (r->main || r->connection->keepalive == AP_CONN_CLOSE || ap_status_drops_connection(r->status)) { return OK; } /* copy headers from in to out if configured */ apr_table_do(header_do, r, conf->headers, NULL); /* last modified defaults to now, unless otherwise set on the way in */ if (!apr_table_get(r->headers_out, "Last-Modified")) { ap_update_mtime(r, apr_time_now()); ap_set_last_modified(r); } ap_set_accept_ranges(r); /* reflect the content length, if present */ if ((content_length = apr_table_get(r->headers_in, "Content-Length"))) { apr_off_t offset; apr_strtoff(&offset, content_length, NULL, 10); ap_set_content_length(r, offset); } /* reflect the content type, if present */ if ((content_type = apr_table_get(r->headers_in, "Content-Type"))) { ap_set_content_type(r, content_type); } bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc); bbout = apr_brigade_create(r->pool, r->connection->bucket_alloc); seen_eos = 0; do { apr_bucket *bucket; status = ap_get_brigade(r->input_filters, bbin, AP_MODE_READBYTES, APR_BLOCK_READ, HUGE_STRING_LEN); if (status != APR_SUCCESS) { if (status == AP_FILTER_ERROR) { apr_brigade_destroy(bbin); return status; } else { apr_brigade_destroy(bbin); return HTTP_BAD_REQUEST; } } for (bucket = APR_BRIGADE_FIRST(bbin); bucket != APR_BRIGADE_SENTINEL(bbin); bucket = APR_BUCKET_NEXT(bucket)) { const char *data; apr_size_t len; if (APR_BUCKET_IS_EOS(bucket)) { seen_eos = 1; break; } /* These are metadata buckets. */ if (bucket->length == 0) { continue; } /* * We MUST read because in case we have an unknown-length * bucket or one that morphs, we want to exhaust it. */ status = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); if (status != APR_SUCCESS) { apr_brigade_destroy(bbin); return HTTP_BAD_REQUEST; } apr_brigade_write(bbout, NULL, NULL, data, len); status = ap_pass_brigade(r->output_filters, bbout); if (status != APR_SUCCESS) { /* no way to know what type of error occurred */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(01410) "reflector_handler: ap_pass_brigade returned %i", status); return HTTP_INTERNAL_SERVER_ERROR; } } apr_brigade_cleanup(bbin); } while (!seen_eos); return OK; } else { return HTTP_METHOD_NOT_ALLOWED; } }
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b) { apr_status_t rv; apr_bucket_brigade *more; conn_rec *c = f->c; core_net_rec *net = f->ctx; core_output_filter_ctx_t *ctx = net->out_ctx; apr_read_type_e eblock = APR_NONBLOCK_READ; apr_pool_t *input_pool = b->p; /* Fail quickly if the connection has already been aborted. */ if (c->aborted) { apr_brigade_cleanup(b); return APR_ECONNABORTED; } if (ctx == NULL) { ctx = apr_pcalloc(c->pool, sizeof(*ctx)); net->out_ctx = ctx; } /* If we have a saved brigade, concatenate the new brigade to it */ if (ctx->b) { APR_BRIGADE_CONCAT(ctx->b, b); b = ctx->b; ctx->b = NULL; } /* Perform multiple passes over the brigade, sending batches of output to the connection. */ while (b && !APR_BRIGADE_EMPTY(b)) { apr_size_t nbytes = 0; apr_bucket *last_e = NULL; /* initialized for debugging */ apr_bucket *e; /* one group of iovecs per pass over the brigade */ apr_size_t nvec = 0; apr_size_t nvec_trailers = 0; struct iovec vec[MAX_IOVEC_TO_WRITE]; struct iovec vec_trailers[MAX_IOVEC_TO_WRITE]; /* one file per pass over the brigade */ apr_file_t *fd = NULL; apr_size_t flen = 0; apr_off_t foffset = 0; /* keep track of buckets that we've concatenated * to avoid small writes */ apr_bucket *last_merged_bucket = NULL; /* tail of brigade if we need another pass */ more = NULL; /* Iterate over the brigade: collect iovecs and/or a file */ for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { /* keep track of the last bucket processed */ last_e = e; if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) { break; } else if (APR_BUCKET_IS_FLUSH(e)) { if (e != APR_BRIGADE_LAST(b)) { more = apr_brigade_split(b, APR_BUCKET_NEXT(e)); } break; } /* It doesn't make any sense to use sendfile for a file bucket * that represents 10 bytes. */ else if (APR_BUCKET_IS_FILE(e) && (e->length >= AP_MIN_SENDFILE_BYTES)) { apr_bucket_file *a = e->data; /* We can't handle more than one file bucket at a time * so we split here and send the file we have already * found. */ if (fd) { more = apr_brigade_split(b, e); break; } fd = a->fd; flen = e->length; foffset = e->start; } else { const char *str; apr_size_t n; rv = apr_bucket_read(e, &str, &n, eblock); if (APR_STATUS_IS_EAGAIN(rv)) { /* send what we have so far since we shouldn't expect more * output for a while... next time we read, block */ more = apr_brigade_split(b, e); eblock = APR_BLOCK_READ; break; } eblock = APR_NONBLOCK_READ; if (n) { if (!fd) { if (nvec == MAX_IOVEC_TO_WRITE) { /* woah! too many. buffer them up, for use later. */ apr_bucket *temp, *next; apr_bucket_brigade *temp_brig; if (nbytes >= AP_MIN_BYTES_TO_WRITE) { /* We have enough data in the iovec * to justify doing a writev */ more = apr_brigade_split(b, e); break; } /* Create a temporary brigade as a means * of concatenating a bunch of buckets together */ temp_brig = apr_brigade_create(f->c->pool, f->c->bucket_alloc); if (last_merged_bucket) { /* If we've concatenated together small * buckets already in a previous pass, * the initial buckets in this brigade * are heap buckets that may have extra * space left in them (because they * were created by apr_brigade_write()). * We can take advantage of this by * building the new temp brigade out of * these buckets, so that the content * in them doesn't have to be copied again. */ APR_BRIGADE_PREPEND(b, temp_brig); brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket)); } temp = APR_BRIGADE_FIRST(b); while (temp != e) { apr_bucket *d; rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ); apr_brigade_write(temp_brig, NULL, NULL, str, n); d = temp; temp = APR_BUCKET_NEXT(temp); apr_bucket_delete(d); } nvec = 0; nbytes = 0; temp = APR_BRIGADE_FIRST(temp_brig); APR_BUCKET_REMOVE(temp); APR_BRIGADE_INSERT_HEAD(b, temp); apr_bucket_read(temp, &str, &n, APR_BLOCK_READ); vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; /* Just in case the temporary brigade has * multiple buckets, recover the rest of * them and put them in the brigade that * we're sending. */ for (next = APR_BRIGADE_FIRST(temp_brig); next != APR_BRIGADE_SENTINEL(temp_brig); next = APR_BRIGADE_FIRST(temp_brig)) { APR_BUCKET_REMOVE(next); APR_BUCKET_INSERT_AFTER(temp, next); temp = next; apr_bucket_read(next, &str, &n, APR_BLOCK_READ); vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; } apr_brigade_destroy(temp_brig); last_merged_bucket = temp; e = temp; last_e = e; } else { vec[nvec].iov_base = (char*) str; vec[nvec].iov_len = n; nvec++; } } else { /* The bucket is a trailer to a file bucket */ if (nvec_trailers == MAX_IOVEC_TO_WRITE) { /* woah! too many. stop now. */ more = apr_brigade_split(b, e); break; } vec_trailers[nvec_trailers].iov_base = (char*) str; vec_trailers[nvec_trailers].iov_len = n; nvec_trailers++; } nbytes += n; } } } /* Completed iterating over the brigade, now determine if we want * to buffer the brigade or send the brigade out on the network. * * Save if we haven't accumulated enough bytes to send, the connection * is not about to be closed, and: * * 1) we didn't see a file, we don't have more passes over the * brigade to perform, AND we didn't stop at a FLUSH bucket. * (IOW, we will save plain old bytes such as HTTP headers) * or * 2) we hit the EOS and have a keep-alive connection * (IOW, this response is a bit more complex, but we save it * with the hope of concatenating with another response) */ if (nbytes + flen < AP_MIN_BYTES_TO_WRITE && !AP_BUCKET_IS_EOC(last_e) && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e)) || (APR_BUCKET_IS_EOS(last_e) && c->keepalive == AP_CONN_KEEPALIVE))) { /* NEVER save an EOS in here. If we are saving a brigade with * an EOS bucket, then we are doing keepalive connections, and * we want to process to second request fully. */ if (APR_BUCKET_IS_EOS(last_e)) { apr_bucket *bucket; int file_bucket_saved = 0; apr_bucket_delete(last_e); for (bucket = APR_BRIGADE_FIRST(b); bucket != APR_BRIGADE_SENTINEL(b); bucket = APR_BUCKET_NEXT(bucket)) { /* Do a read on each bucket to pull in the * data from pipe and socket buckets, so * that we don't leave their file descriptors * open indefinitely. Do the same for file * buckets, with one exception: allow the * first file bucket in the brigade to remain * a file bucket, so that we don't end up * doing an mmap+memcpy every time a client * requests a <8KB file over a keepalive * connection. */ if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) { file_bucket_saved = 1; } else { const char *buf; apr_size_t len = 0; rv = apr_bucket_read(bucket, &buf, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c, "core_output_filter:" " Error reading from bucket."); return HTTP_INTERNAL_SERVER_ERROR; } } } } if (!ctx->deferred_write_pool) { apr_pool_create(&ctx->deferred_write_pool, c->pool); apr_pool_tag(ctx->deferred_write_pool, "deferred_write"); } ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool); return APR_SUCCESS; } if (fd) { apr_hdtr_t hdtr; apr_size_t bytes_sent; #if APR_HAS_SENDFILE apr_int32_t flags = 0; #endif memset(&hdtr, '\0', sizeof(hdtr)); if (nvec) { hdtr.numheaders = nvec; hdtr.headers = vec; } if (nvec_trailers) { hdtr.numtrailers = nvec_trailers; hdtr.trailers = vec_trailers; } #if APR_HAS_SENDFILE if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) { if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) { /* Prepare the socket to be reused */ flags |= APR_SENDFILE_DISCONNECT_SOCKET; } rv = sendfile_it_all(net, /* the network information */ fd, /* the file to send */ &hdtr, /* header and trailer iovecs */ foffset, /* offset in the file to begin sending from */ flen, /* length of file */ nbytes + flen, /* total length including headers */ &bytes_sent, /* how many bytes were sent */ flags); /* apr_sendfile flags */ } else #endif { rv = emulate_sendfile(net, fd, &hdtr, foffset, flen, &bytes_sent); } if (logio_add_bytes_out && bytes_sent > 0) logio_add_bytes_out(c, bytes_sent); fd = NULL; } else { apr_size_t bytes_sent; rv = writev_it_all(net->client_socket, vec, nvec, nbytes, &bytes_sent); if (logio_add_bytes_out && bytes_sent > 0) logio_add_bytes_out(c, bytes_sent); } apr_brigade_cleanup(b); /* drive cleanups for resources which were set aside * this may occur before or after termination of the request which * created the resource */ if (ctx->deferred_write_pool) { if (more && more->p == ctx->deferred_write_pool) { /* "more" belongs to the deferred_write_pool, * which is about to be cleared. */ if (APR_BRIGADE_EMPTY(more)) { more = NULL; } else { /* uh oh... change more's lifetime * to the input brigade's lifetime */ apr_bucket_brigade *tmp_more = more; more = NULL; ap_save_brigade(f, &more, &tmp_more, input_pool); } } apr_pool_clear(ctx->deferred_write_pool); } if (rv != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c, "core_output_filter: writing data to the network"); if (more) apr_brigade_cleanup(more); /* No need to check for SUCCESS, we did that above. */ if (!APR_STATUS_IS_EAGAIN(rv)) { c->aborted = 1; return APR_ECONNABORTED; } return APR_SUCCESS; } b = more; more = NULL; } /* end while () */ return APR_SUCCESS; }
static int drive_flvx(request_rec *r) { apr_finfo_t fi; apr_bucket_brigade *bb; apr_off_t offset = 0; apr_off_t length = 0; apr_file_t *fp = NULL; apr_status_t rv = APR_SUCCESS; rv = apr_stat(&fi, r->filename, APR_FINFO_SIZE, r->pool); if (rv) { /* Let the core handle it. */ return DECLINED; } /* Open the file */ rv = apr_file_open(&fp, r->filename, APR_READ, APR_OS_DEFAULT, r->pool); if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "file permissions deny server access: %s", r->filename); return HTTP_FORBIDDEN; } offset = get_start(r); if (offset != 0 && offset < fi.size) { length = fi.size - offset; } else { length = fi.size; /* Offset should be reset if invalid mod by Artur Bodera */ offset = 0; } bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); if (offset != 0) { length += FLVX_HEADER_LEN; rv = apr_brigade_write(bb, NULL, NULL, FLVX_HEADER, FLVX_HEADER_LEN); if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "unable to write flv header in brigade"); return HTTP_INTERNAL_SERVER_ERROR; } } apr_brigade_insert_file(bb, fp, offset, length, r->pool); ap_set_content_type(r, "video/x-flv"); ap_set_content_length(r, length); /* Add last-modified headers mod by Artur Bodera */ ap_update_mtime(r, r->finfo.mtime); ap_set_last_modified(r); return ap_pass_brigade(r->output_filters, bb); }
APU_DECLARE(apr_status_t) apr_brigade_puts(apr_bucket_brigade *bb, apr_brigade_flush flush, void *ctx, const char *str) { return apr_brigade_write(bb, flush, ctx, str, strlen(str)); }
APU_DECLARE(apr_status_t) apr_brigade_putc(apr_bucket_brigade *b, apr_brigade_flush flush, void *ctx, const char c) { return apr_brigade_write(b, flush, ctx, &c, 1); }
static apr_status_t handle_response(serf_request_t *request, serf_bucket_t *response, void *vbaton, apr_pool_t *pool) { apr_status_t rv; s_baton_t *ctx = vbaton; const char *data; apr_size_t len; serf_status_line sl; if (response == NULL) { ctx->rstatus = HTTP_INTERNAL_SERVER_ERROR; return APR_EGENERAL; } /* XXXXXXX: Create better error message. */ rv = serf_bucket_response_status(response, &sl); if (rv) { if (APR_STATUS_IS_EAGAIN(rv)) { return APR_SUCCESS; } ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, ctx->r, APLOGNO(01121) "serf_bucket_response_status..."); ctx->rstatus = HTTP_INTERNAL_SERVER_ERROR; if (mpm_supprts_serf) { ap_mpm_register_timed_callback(apr_time_from_msec(1), timed_cleanup_callback, ctx); } return rv; } /** * XXXXX: If I understood serf buckets better, it might be possible to not * copy all of the data here, and better stream it to the client. **/ do { apr_brigade_cleanup(ctx->tmpbb); rv = serf_bucket_read(response, AP_IOBUFSIZE, &data, &len); if (SERF_BUCKET_READ_ERROR(rv)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, ctx->r, APLOGNO(01122) "serf_bucket_read(response)"); return rv; } if (!ctx->done_headers) { serf_bucket_t *hdrs; serf_status_line line; /* TODO: improve */ serf_bucket_response_status(response, &line); ctx->r->status = line.code; hdrs = serf_bucket_response_get_headers(response); serf_bucket_headers_do(hdrs, copy_headers_out, ctx); ctx->done_headers = 1; } if (len > 0) { /* TODO: make APR bucket <-> serf bucket stuff more magical. */ apr_brigade_write(ctx->tmpbb, NULL, NULL, data, len); } if (APR_STATUS_IS_EOF(rv)) { ctx->keep_reading = 0; ctx->rstatus = ap_pass_brigade(ctx->r->output_filters, ctx->tmpbb); if (mpm_supprts_serf) { ap_mpm_register_timed_callback(apr_time_from_msec(1), timed_cleanup_callback, ctx); } return APR_EOF; } ctx->rstatus = ap_pass_brigade(ctx->r->output_filters, ctx->tmpbb); /* XXXX: Should we send a flush now? */ if (APR_STATUS_IS_EAGAIN(rv)) { return APR_SUCCESS; } } while (1); }
apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, int *pfile_handles_allowed, const char *msg) { apr_status_t status = APR_SUCCESS; int same_alloc; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); same_alloc = (to->bucket_alloc == from->bucket_alloc); if (!FILE_MOVE) { pfile_handles_allowed = NULL; } if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end; status = last_not_included(from, maxlen, same_alloc, pfile_handles_allowed, &end); if (status != APR_SUCCESS) { return status; } while (!APR_BRIGADE_EMPTY(from) && status == APR_SUCCESS) { b = APR_BRIGADE_FIRST(from); if (b == end) { break; } if (same_alloc || (b->list == to->bucket_alloc)) { /* both brigades use the same bucket_alloc and auto-cleanups * have the same life time. It's therefore safe to just move * directly. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed bucket(same bucket_alloc) " "%ld-%ld, type=%s", msg, (long)b->start, (long)b->length, APR_BUCKET_IS_METADATA(b)? (APR_BUCKET_IS_EOS(b)? "EOS": (APR_BUCKET_IS_FLUSH(b)? "FLUSH" : "META")) : (APR_BUCKET_IS_FILE(b)? "FILE" : "DATA")); #endif } else if (DEEP_COPY) { /* we have not managed the magic of passing buckets from * one thread to another. Any attempts result in * cleanup of pools scrambling memory. */ if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); } else { /* ignore */ } } else if (pfile_handles_allowed && *pfile_handles_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* We do not want to read files when passing buckets, if * we can avoid it. However, what we've come up so far * is not working corrently, resulting either in crashes or * too many open file descriptors. */ apr_bucket_file *f = (apr_bucket_file *)b->data; apr_file_t *fd = f->fd; int setaside = (f->readpool != to->p); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, moving FILE bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p, setaside); #endif if (setaside) { status = apr_file_setaside(&fd, fd, to->p); if (status != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_ERR, status, to->p, APLOGNO(02947) "h2_util: %s, setaside FILE", msg); return status; } } apr_brigade_insert_file(to, fd, b->start, b->length, to->p); --(*pfile_handles_allowed); } else { const char *data; apr_size_t len; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } apr_bucket_delete(b); } else { apr_bucket_setaside(b, to->p); APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_move: %s, passed setaside bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } } return status; }
apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_size_t maxlen, const char *msg) { apr_status_t status = APR_SUCCESS; int same_alloc; (void)msg; AP_DEBUG_ASSERT(to); AP_DEBUG_ASSERT(from); same_alloc = (to->bucket_alloc == from->bucket_alloc); if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end, *cpy; status = last_not_included(from, maxlen, 0, 0, &end); if (status != APR_SUCCESS) { return status; } for (b = APR_BRIGADE_FIRST(from); b != APR_BRIGADE_SENTINEL(from) && b != end; b = APR_BUCKET_NEXT(b)) { if (same_alloc) { status = apr_bucket_copy(b, &cpy); if (status != APR_SUCCESS) { break; } APR_BRIGADE_INSERT_TAIL(to, cpy); } else { if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } else if (APR_BUCKET_IS_FLUSH(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); } else { /* ignore */ } } else { const char *data; apr_size_t len; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, "h2_util_copy: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, (long)from, (long)from->p, (long)to, (long)to->p); #endif } } } } } return status; }