static int insert_html_fragment_at_head(ap_filter_t * f, apr_bucket_brigade * bb, triger_conf_t * cfg) { triger_module_ctx_t *ctx = f->ctx; apr_bucket *tmp_b = ctx->triger_bucket->b; int ret = 0; apr_size_t pos; apr_bucket *js; if (ctx->find) goto last; js = apr_bucket_transient_create(cfg->js, (apr_size_t) strlen(cfg->js) + 1, f->r->connection->bucket_alloc); if (!js) goto last; if (ctx->triger_bucket->head_start_tag_pos != -1) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "The <head> tag found, insert at the end: %s", cfg->js); pos = ctx->triger_bucket->head_start_tag_pos; if (pos + 1 < ctx->triger_bucket->len) { apr_bucket_split(tmp_b, pos + 1); APR_BUCKET_INSERT_AFTER(tmp_b, js); } else { APR_BUCKET_INSERT_AFTER(tmp_b, js); } ctx->find = 1; } last: return ret; }
static int file_make_mmap(apr_bucket *e, apr_size_t filelength, apr_off_t fileoffset, apr_pool_t *p) { apr_bucket_file *a = e->data; apr_mmap_t *mm; if (!a->can_mmap) { return 0; } if (filelength > APR_MMAP_LIMIT) { if (apr_mmap_create(&mm, a->fd, fileoffset, APR_MMAP_LIMIT, APR_MMAP_READ, p) != APR_SUCCESS) { return 0; } apr_bucket_split(e, APR_MMAP_LIMIT); filelength = APR_MMAP_LIMIT; } else if ((filelength < APR_MMAP_THRESHOLD) || (apr_mmap_create(&mm, a->fd, fileoffset, filelength, APR_MMAP_READ, p) != APR_SUCCESS)) { return 0; } apr_bucket_mmap_make(e, mm, 0, filelength); file_bucket_destroy(a); return 1; }
static apr_status_t sendfile_nonblocking(apr_socket_t *s, apr_bucket *bucket, apr_size_t *cumulative_bytes_written, conn_rec *c) { apr_status_t rv = APR_SUCCESS; apr_bucket_file *file_bucket; apr_file_t *fd; apr_size_t file_length; apr_off_t file_offset; apr_size_t bytes_written = 0; if (!APR_BUCKET_IS_FILE(bucket)) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, c->base_server, APLOGNO(00006) "core_filter: sendfile_nonblocking: " "this should never happen"); return APR_EGENERAL; } file_bucket = (apr_bucket_file *)(bucket->data); fd = file_bucket->fd; file_length = bucket->length; file_offset = bucket->start; if (bytes_written < file_length) { apr_size_t n = file_length - bytes_written; apr_status_t arv; apr_interval_time_t old_timeout; arv = apr_socket_timeout_get(s, &old_timeout); if (arv != APR_SUCCESS) { return arv; } arv = apr_socket_timeout_set(s, 0); if (arv != APR_SUCCESS) { return arv; } rv = apr_socket_sendfile(s, fd, NULL, &file_offset, &n, 0); if (rv == APR_SUCCESS) { bytes_written += n; file_offset += n; } arv = apr_socket_timeout_set(s, old_timeout); if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) { rv = arv; } } if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) { ap__logio_add_bytes_out(c, bytes_written); } *cumulative_bytes_written += bytes_written; if ((bytes_written < file_length) && (bytes_written > 0)) { apr_bucket_split(bucket, bytes_written); apr_bucket_delete(bucket); } else if (bytes_written == file_length) { apr_bucket_delete(bucket); } return rv; }
static apr_status_t last_not_included(apr_bucket_brigade *bb, apr_size_t maxlen, int same_alloc, int *pfile_buckets_allowed, apr_bucket **pend) { apr_bucket *b; apr_status_t status = APR_SUCCESS; int files_allowed = pfile_buckets_allowed? *pfile_buckets_allowed : 0; if (maxlen > 0) { /* Find the bucket, up to which we reach maxlen/mem bytes */ for (b = APR_BRIGADE_FIRST(bb); (b != APR_BRIGADE_SENTINEL(bb)); b = APR_BUCKET_NEXT(b)) { if (APR_BUCKET_IS_METADATA(b)) { /* included */ } else { if (maxlen == 0) { *pend = b; return status; } if (b->length == ((apr_size_t)-1)) { const char *ign; apr_size_t ilen; status = apr_bucket_read(b, &ign, &ilen, APR_BLOCK_READ); if (status != APR_SUCCESS) { return status; } } if (same_alloc && APR_BUCKET_IS_FILE(b)) { /* we like it move it, always */ } else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* this has no memory footprint really unless * it is read, disregard it in length count, * unless we do not move the file buckets */ --files_allowed; } else if (maxlen < b->length) { apr_bucket_split(b, maxlen); maxlen = 0; } else { maxlen -= b->length; } } } } *pend = APR_BRIGADE_SENTINEL(bb); return status; }
static apr_status_t h2_conn_io_bucket_read(h2_conn_io *io, apr_read_type_e block, h2_conn_io_on_read_cb on_read_cb, void *puser, int *pdone) { apr_status_t status = APR_SUCCESS; apr_size_t readlen = 0; *pdone = 0; while (status == APR_SUCCESS && !*pdone && !APR_BRIGADE_EMPTY(io->input)) { apr_bucket* bucket = APR_BRIGADE_FIRST(io->input); if (APR_BUCKET_IS_METADATA(bucket)) { /* we do nothing regarding any meta here */ } else { const char *bucket_data = NULL; apr_size_t bucket_length = 0; status = apr_bucket_read(bucket, &bucket_data, &bucket_length, block); if (status == APR_SUCCESS && bucket_length > 0) { if (APLOGctrace2(io->connection)) { char buffer[32]; h2_util_hex_dump(buffer, sizeof(buffer)/sizeof(buffer[0]), bucket_data, bucket_length); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io(%ld): read %d bytes: %s", io->connection->id, (int)bucket_length, buffer); } if (bucket_length > 0) { apr_size_t consumed = 0; status = on_read_cb(bucket_data, bucket_length, &consumed, pdone, puser); if (status == APR_SUCCESS && bucket_length > consumed) { /* We have data left in the bucket. Split it. */ status = apr_bucket_split(bucket, consumed); } readlen += consumed; } } } apr_bucket_delete(bucket); } if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) { return APR_EAGAIN; } return status; }
APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut, apr_bucket_brigade *bbIn, apr_read_type_e block, apr_off_t maxbytes) { apr_off_t readbytes = 0; while (!APR_BRIGADE_EMPTY(bbIn)) { const char *pos; const char *str; apr_size_t len; apr_status_t rv; apr_bucket *e; e = APR_BRIGADE_FIRST(bbIn); rv = apr_bucket_read(e, &str, &len, block); if (rv != APR_SUCCESS) { return rv; } pos = memchr(str, APR_ASCII_LF, len); /* We found a match. */ if (pos != NULL) { apr_bucket_split(e, pos - str + 1); APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(bbOut, e); return APR_SUCCESS; } APR_BUCKET_REMOVE(e); if (APR_BUCKET_IS_METADATA(e) || len > APR_BUCKET_BUFF_SIZE/4) { APR_BRIGADE_INSERT_TAIL(bbOut, e); } else { if (len > 0) { rv = apr_brigade_write(bbOut, NULL, NULL, str, len); if (rv != APR_SUCCESS) { return rv; } } apr_bucket_destroy(e); } readbytes += len; /* We didn't find an APR_ASCII_LF within the maximum line length. */ if (readbytes >= maxbytes) { break; } } return APR_SUCCESS; }
apr_status_t jxr_append_brigade(request_rec *r, apr_bucket_brigade *dest, apr_bucket_brigade *bb, int *eos_seen) { apr_size_t max_msglen = MAX_PACKET_SIZE - sizeof(Jaxer_Header); apr_status_t rv; while (!APR_BRIGADE_EMPTY(bb)) { apr_size_t readlen; const char *buffer; apr_bucket *e = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_EOS(e) ) { apr_bucket_delete(e); if (eos_seen) *eos_seen = 1; continue; } if (APR_BUCKET_IS_METADATA(e)) { apr_bucket_delete(e); continue; } /* Read the bucket now */ if ((rv = apr_bucket_read(e, &buffer, &readlen, APR_BLOCK_READ)) != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: can't read data from handler"); return rv; } if (readlen > max_msglen) { apr_bucket_split(e, max_msglen); }else { APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(dest, e); } } if ((rv=apr_brigade_destroy(bb)) != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_INFO, rv, r->pool, "mod_jaxer: failed to destroy brigade."); return rv; } return APR_SUCCESS; }
static int getsfunc_BRIGADE(char *buf, int len, void *arg) { apr_bucket_brigade *bb = (apr_bucket_brigade *)arg; const char *dst_end = buf + len - 1; /* leave room for terminating null */ char *dst = buf; apr_bucket *e = APR_BRIGADE_FIRST(bb); apr_status_t rv; int done = 0; while ((dst < dst_end) && !done && e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)) { const char *bucket_data; apr_size_t bucket_data_len; const char *src; const char *src_end; apr_bucket * next; rv = apr_bucket_read(e, &bucket_data, &bucket_data_len, APR_BLOCK_READ); if (rv != APR_SUCCESS || (bucket_data_len == 0)) { *dst = '\0'; return APR_STATUS_IS_TIMEUP(rv) ? -1 : 0; } src = bucket_data; src_end = bucket_data + bucket_data_len; while ((src < src_end) && (dst < dst_end) && !done) { if (*src == '\n') { done = 1; } else if (*src != '\r') { *dst++ = *src; } src++; } if (src < src_end) { apr_bucket_split(e, src - bucket_data); } next = APR_BUCKET_NEXT(e); apr_bucket_delete(e); e = next; } *dst = 0; return done; }
static int insert_html_fragment_at_tail(ap_filter_t * f, apr_bucket_brigade * bb, triger_conf_t * cfg) { apr_bucket *tmp_b; int ret = 0; apr_size_t pos; apr_bucket *js; triger_module_ctx_t *ctx = f->ctx; if (ctx->find) goto last; js = apr_bucket_transient_create(cfg->js, (apr_size_t) strlen(cfg->js) + 1, f->r->connection->bucket_alloc); if (!js) goto last; if (ctx->triger_bucket->body_end_tag_pos == -1 && ctx->triger_bucket->html_end_tag_pos == -1) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "Neither </body> nor </html> tag found, insert at the end: %s", cfg->js); tmp_b = APR_BRIGADE_LAST(bb); APR_BUCKET_INSERT_BEFORE(tmp_b, js); } else { tmp_b = ctx->triger_bucket->b; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, "One of </body> and</html> tag are found, insert at there: %s", cfg->js); pos = ctx->triger_bucket->body_end_tag_pos != -1 ? ctx->triger_bucket->body_end_tag_pos : ctx-> triger_bucket->html_end_tag_pos; apr_bucket_split(tmp_b, pos); APR_BUCKET_INSERT_AFTER(tmp_b, js); } ctx->find = 1; last: return ret; }
static apr_status_t consume_brigade(h2_filter_cin *cin, apr_bucket_brigade *bb, apr_read_type_e block) { apr_status_t status = APR_SUCCESS; apr_size_t readlen = 0; while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) { apr_bucket* bucket = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_METADATA(bucket)) { /* we do nothing regarding any meta here */ } else { const char *bucket_data = NULL; apr_size_t bucket_length = 0; status = apr_bucket_read(bucket, &bucket_data, &bucket_length, block); if (status == APR_SUCCESS && bucket_length > 0) { apr_size_t consumed = 0; status = cin->cb(cin->cb_ctx, bucket_data, bucket_length, &consumed); if (status == APR_SUCCESS && bucket_length > consumed) { /* We have data left in the bucket. Split it. */ status = apr_bucket_split(bucket, consumed); } readlen += consumed; cin->start_read = apr_time_now(); } } apr_bucket_delete(bucket); } if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) { return APR_EAGAIN; } return status; }
static PyObject * _conn_read(conn_rec *c, ap_input_mode_t mode, long len) { apr_bucket *b; apr_bucket_brigade *bb; apr_status_t rc; long bytes_read; PyObject *result; char *buffer; long bufsize; bb = apr_brigade_create(c->pool, c->bucket_alloc); bufsize = len == 0 ? HUGE_STRING_LEN : len; while (APR_BRIGADE_EMPTY(bb)) { Py_BEGIN_ALLOW_THREADS; rc = ap_get_brigade(c->input_filters, bb, mode, APR_BLOCK_READ, bufsize); Py_END_ALLOW_THREADS; if (rc != APR_SUCCESS) { PyErr_SetObject(PyExc_IOError, PyString_FromString("Connection read error")); return NULL; } } /* * loop through the brigade reading buckets into the string */ b = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_EOS(b)) { apr_bucket_delete(b); Py_INCREF(Py_None); return Py_None; } /* PYTHON 2.5: 'PyString_FromStringAndSize' uses Py_ssize_t for input parameters */ result = PyString_FromStringAndSize(NULL, bufsize); /* possibly no more memory */ if (result == NULL) return PyErr_NoMemory(); buffer = PyString_AS_STRING((PyStringObject *) result); bytes_read = 0; while ((bytes_read < len || len == 0) && !(b == APR_BRIGADE_SENTINEL(bb) || APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b))) { const char *data; apr_size_t size; apr_bucket *old; if (apr_bucket_read(b, &data, &size, APR_BLOCK_READ) != APR_SUCCESS) { PyErr_SetObject(PyExc_IOError, PyString_FromString("Connection read error")); return NULL; } if (bytes_read + size > bufsize) { apr_bucket_split(b, bufsize - bytes_read); size = bufsize - bytes_read; /* now the bucket is the exact size we need */ } memcpy(buffer, data, size); buffer += size; bytes_read += size; /* time to grow destination string? */ if (len == 0 && bytes_read == bufsize) { /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */ _PyString_Resize(&result, bufsize + HUGE_STRING_LEN); buffer = PyString_AS_STRING((PyStringObject *) result); buffer += bufsize; bufsize += HUGE_STRING_LEN; } if (mode == AP_MODE_GETLINE || len == 0) { apr_bucket_delete(b); break; } old = b; b = APR_BUCKET_NEXT(b); apr_bucket_delete(old); } /* resize if necessary */ if (bytes_read < len || len == 0) /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */ if(_PyString_Resize(&result, bytes_read)) return NULL; return result; }
static apr_status_t append_bucket(h2_bucket_beam *beam, apr_bucket *bred, apr_read_type_e block, apr_pool_t *pool, h2_beam_lock *pbl) { const char *data; apr_size_t len; apr_off_t space_left = 0; apr_status_t status; if (APR_BUCKET_IS_METADATA(bred)) { if (APR_BUCKET_IS_EOS(bred)) { beam->closed = 1; } APR_BUCKET_REMOVE(bred); H2_BLIST_INSERT_TAIL(&beam->red, bred); return APR_SUCCESS; } else if (APR_BUCKET_IS_FILE(bred)) { /* file bucket lengths do not really count */ } else { space_left = calc_space_left(beam); if (space_left > 0 && bred->length == ((apr_size_t)-1)) { const char *data; status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ); if (status != APR_SUCCESS) { return status; } } if (space_left < bred->length) { status = r_wait_space(beam, block, pbl, &space_left); if (status != APR_SUCCESS) { return status; } if (space_left <= 0) { return APR_EAGAIN; } } /* space available, maybe need bucket split */ } /* The fundamental problem is that reading a red bucket from * a green thread is a total NO GO, because the bucket might use * its pool/bucket_alloc from a foreign thread and that will * corrupt. */ status = APR_ENOTIMPL; if (beam->closed && bred->length > 0) { status = APR_EOF; } else if (APR_BUCKET_IS_TRANSIENT(bred)) { /* this takes care of transient buckets and converts them * into heap ones. Other bucket types might or might not be * affected by this. */ status = apr_bucket_setaside(bred, pool); } else if (APR_BUCKET_IS_HEAP(bred)) { /* For heap buckets read from a green thread is fine. The * data will be there and live until the bucket itself is * destroyed. */ status = APR_SUCCESS; } else if (APR_BUCKET_IS_POOL(bred)) { /* pool buckets are bastards that register at pool cleanup * to morph themselves into heap buckets. That may happen anytime, * even after the bucket data pointer has been read. So at * any time inside the green thread, the pool bucket memory * may disappear. yikes. */ status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS) { apr_bucket_heap_make(bred, data, len, NULL); } } else if (APR_BUCKET_IS_FILE(bred)) { /* For file buckets the problem is their internal readpool that * is used on the first read to allocate buffer/mmap. * Since setting aside a file bucket will de-register the * file cleanup function from the previous pool, we need to * call that from a red thread. * Additionally, we allow callbacks to prevent beaming file * handles across. The use case for this is to limit the number * of open file handles and rather use a less efficient beam * transport. */ apr_file_t *fd = ((apr_bucket_file *)bred->data)->fd; int can_beam = 1; if (beam->last_beamed != fd && beam->can_beam_fn) { can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd); } if (can_beam) { beam->last_beamed = fd; status = apr_bucket_setaside(bred, pool); } /* else: enter ENOTIMPL case below */ } if (status == APR_ENOTIMPL) { /* we have no knowledge about the internals of this bucket, * but hope that after read, its data stays immutable for the * lifetime of the bucket. (see pool bucket handling above for * a counter example). * We do the read while in a red thread, so that the bucket may * use pools/allocators safely. */ if (space_left < APR_BUCKET_BUFF_SIZE) { space_left = APR_BUCKET_BUFF_SIZE; } if (space_left < bred->length) { apr_bucket_split(bred, space_left); } status = apr_bucket_read(bred, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS) { status = apr_bucket_setaside(bred, pool); } } if (status != APR_SUCCESS && status != APR_ENOTIMPL) { return status; } APR_BUCKET_REMOVE(bred); H2_BLIST_INSERT_TAIL(&beam->red, bred); beam->sent_bytes += bred->length; return APR_SUCCESS; }
static apr_status_t writev_nonblocking(apr_socket_t *s, struct iovec *vec, apr_size_t nvec, apr_bucket_brigade *bb, apr_size_t *cumulative_bytes_written, conn_rec *c) { apr_status_t rv = APR_SUCCESS, arv; apr_size_t bytes_written = 0, bytes_to_write = 0; apr_size_t i, offset; apr_interval_time_t old_timeout; arv = apr_socket_timeout_get(s, &old_timeout); if (arv != APR_SUCCESS) { return arv; } arv = apr_socket_timeout_set(s, 0); if (arv != APR_SUCCESS) { return arv; } for (i = 0; i < nvec; i++) { bytes_to_write += vec[i].iov_len; } offset = 0; while (bytes_written < bytes_to_write) { apr_size_t n = 0; rv = apr_socket_sendv(s, vec + offset, nvec - offset, &n); if (n > 0) { bytes_written += n; for (i = offset; i < nvec; ) { apr_bucket *bucket = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_METADATA(bucket)) { apr_bucket_delete(bucket); } else if (n >= vec[i].iov_len) { apr_bucket_delete(bucket); offset++; n -= vec[i++].iov_len; } else { apr_bucket_split(bucket, n); apr_bucket_delete(bucket); vec[i].iov_len -= n; vec[i].iov_base = (char *) vec[i].iov_base + n; break; } } } if (rv != APR_SUCCESS) { break; } } if ((ap__logio_add_bytes_out != NULL) && (bytes_written > 0)) { ap__logio_add_bytes_out(c, bytes_written); } *cumulative_bytes_written += bytes_written; arv = apr_socket_timeout_set(s, old_timeout); if ((arv != APR_SUCCESS) && (rv == APR_SUCCESS)) { return arv; } else { return rv; } }
apr_status_t dav_svn__location_body_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; locate_ctx_t *ctx = f->ctx; apr_bucket *bkt; const char *master_uri, *root_dir, *canonicalized_uri; apr_uri_t uri; /* Don't filter if we're in a subrequest or we aren't setup to proxy anything. */ master_uri = dav_svn__get_master_uri(r); if (r->main || !master_uri) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* And don't filter if our search-n-replace would be a noop anyway (that is, if our root path matches that of the master server). */ apr_uri_parse(r->pool, master_uri, &uri); root_dir = dav_svn__get_root_dir(r); canonicalized_uri = svn_urlpath__canonicalize(uri.path, r->pool); if (strcmp(canonicalized_uri, root_dir) == 0) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* ### FIXME: GET and PROPFIND requests that make it here must be ### referring to data inside commit transactions-in-progress. ### We've got to be careful not to munge the versioned data ### they return in the process of trying to do URI fix-ups. ### See issue #3445 for details. */ /* We are url encoding the current url and the master url as incoming(from master) request body has it encoded already. */ canonicalized_uri = svn_path_uri_encode(canonicalized_uri, r->pool); root_dir = svn_path_uri_encode(root_dir, r->pool); if (!f->ctx) { ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->remotepath = canonicalized_uri; ctx->remotepath_len = strlen(ctx->remotepath); ctx->localpath = root_dir; ctx->localpath_len = strlen(ctx->localpath); ctx->pattern = apr_strmatch_precompile(r->pool, ctx->remotepath, 1); ctx->pattern_len = ctx->remotepath_len; } bkt = APR_BRIGADE_FIRST(bb); while (bkt != APR_BRIGADE_SENTINEL(bb)) { const char *data, *match; apr_size_t len; /* read */ apr_bucket_read(bkt, &data, &len, APR_BLOCK_READ); match = apr_strmatch(ctx->pattern, data, len); if (match) { apr_bucket *next_bucket; apr_bucket_split(bkt, match - data); next_bucket = APR_BUCKET_NEXT(bkt); apr_bucket_split(next_bucket, ctx->pattern_len); bkt = APR_BUCKET_NEXT(next_bucket); apr_bucket_delete(next_bucket); next_bucket = apr_bucket_pool_create(ctx->localpath, ctx->localpath_len, r->pool, bb->bucket_alloc); APR_BUCKET_INSERT_BEFORE(bkt, next_bucket); } else { bkt = APR_BUCKET_NEXT(bkt); } } return ap_pass_brigade(f->next, bb); }
apr_status_t dav_svn__location_in_filter(ap_filter_t *f, apr_bucket_brigade *bb, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { request_rec *r = f->r; locate_ctx_t *ctx = f->ctx; apr_status_t rv; apr_bucket *bkt; const char *master_uri, *root_dir, *canonicalized_uri; apr_uri_t uri; /* Don't filter if we're in a subrequest or we aren't setup to proxy anything. */ master_uri = dav_svn__get_master_uri(r); if (r->main || !master_uri) { ap_remove_input_filter(f); return ap_get_brigade(f->next, bb, mode, block, readbytes); } /* And don't filter if our search-n-replace would be a noop anyway (that is, if our root path matches that of the master server). */ apr_uri_parse(r->pool, master_uri, &uri); root_dir = dav_svn__get_root_dir(r); canonicalized_uri = svn_urlpath__canonicalize(uri.path, r->pool); if (strcmp(canonicalized_uri, root_dir) == 0) { ap_remove_input_filter(f); return ap_get_brigade(f->next, bb, mode, block, readbytes); } /* ### FIXME: While we want to fix up any locations in proxied XML ### requests, we do *not* want to be futzing with versioned (or ### to-be-versioned) data, such as the file contents present in ### PUT requests and properties in PROPPATCH requests. ### See issue #3445 for details. */ /* We are url encoding the current url and the master url as incoming(from client) request body has it encoded already. */ canonicalized_uri = svn_path_uri_encode(canonicalized_uri, r->pool); root_dir = svn_path_uri_encode(root_dir, r->pool); if (!f->ctx) { ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->remotepath = canonicalized_uri; ctx->remotepath_len = strlen(ctx->remotepath); ctx->localpath = root_dir; ctx->localpath_len = strlen(ctx->localpath); ctx->pattern = apr_strmatch_precompile(r->pool, ctx->localpath, 1); ctx->pattern_len = ctx->localpath_len; } rv = ap_get_brigade(f->next, bb, mode, block, readbytes); if (rv) { return rv; } bkt = APR_BRIGADE_FIRST(bb); while (bkt != APR_BRIGADE_SENTINEL(bb)) { const char *data, *match; apr_size_t len; if (APR_BUCKET_IS_METADATA(bkt)) { bkt = APR_BUCKET_NEXT(bkt); continue; } /* read */ apr_bucket_read(bkt, &data, &len, APR_BLOCK_READ); match = apr_strmatch(ctx->pattern, data, len); if (match) { apr_bucket *next_bucket; apr_bucket_split(bkt, match - data); next_bucket = APR_BUCKET_NEXT(bkt); apr_bucket_split(next_bucket, ctx->pattern_len); bkt = APR_BUCKET_NEXT(next_bucket); apr_bucket_delete(next_bucket); next_bucket = apr_bucket_pool_create(ctx->remotepath, ctx->remotepath_len, r->pool, bb->bucket_alloc); APR_BUCKET_INSERT_BEFORE(bkt, next_bucket); } else { bkt = APR_BUCKET_NEXT(bkt); } } return APR_SUCCESS; }
/** * Process input stream */ static apr_status_t helocon_filter_in(ap_filter_t *f, apr_bucket_brigade *b, ap_input_mode_t mode, apr_read_type_e block, apr_off_t readbytes) { conn_rec *c = f->c; my_ctx *ctx = f->ctx; // Fail quickly if the connection has already been aborted. if (c->aborted) { apr_brigade_cleanup(b); return APR_ECONNABORTED; } // Fast passthrough if (ctx->phase == PHASE_DONE) { return ap_get_brigade(f->next, b, mode, block, readbytes); } // Process Head do { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif if (APR_BRIGADE_EMPTY(b)) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif apr_status_t s = ap_get_brigade(f->next, b, ctx->mode, APR_BLOCK_READ, ctx->need); if (s != APR_SUCCESS) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (fail)(1)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return s; } } if (ctx->phase == PHASE_DONE) { return APR_SUCCESS; } if (APR_BRIGADE_EMPTY(b)) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (empty)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return APR_SUCCESS; } apr_bucket *e = NULL; for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if (e->type == NULL) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " phase=%d (type=NULL)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->phase); #endif return APR_SUCCESS; } // We need more data if (ctx->need > 0) { const char *str = NULL; apr_size_t length = 0; apr_status_t s = apr_bucket_read(e, &str, &length, APR_BLOCK_READ); if (s != APR_SUCCESS) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (fail)(2)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length); #endif return s; } #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d readed=%" APR_SIZE_T_FMT " (3)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase, length); #endif if (length > 0) { if ((ctx->offset + length) > PROXY_MAX_LENGTH) { // Overflow ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header overflow from=%s to port=%d length=%" APR_OFF_T_FMT, _CLIENT_IP, c->local_addr->port, (ctx->offset + length)); goto ABORT_CONN2; } memcpy(ctx->buf + ctx->offset, str, length); if (ctx->pad != ctx->magic) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in padding magic fail (bad=%d vs good=%d)", ctx->pad, ctx->magic); goto ABORT_CONN; } ctx->offset += length; ctx->recv += length; ctx->need -= length; ctx->buf[ctx->offset] = 0; // delete HEAD if (e->length > length) { apr_bucket_split(e, length); } } apr_bucket_delete(e); if (length == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG bucket flush=%d meta=%d", APR_BUCKET_IS_FLUSH(e) ? 1 : 0, APR_BUCKET_IS_METADATA(e) ? 1 : 0); #endif continue; } } // Handle GETLINE mode if (ctx->mode == AP_MODE_GETLINE) { if ((ctx->need > 0) && (ctx->recv > 2)) { char *end = memchr(ctx->buf, '\r', ctx->offset - 1); if (end) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: GETLINE OK"); #endif if ((end[0] == '\r') && (end[1] == '\n')) { ctx->need = 0; } } } } if (ctx->need <= 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d need=%" APR_OFF_T_FMT " recv=%" APR_OFF_T_FMT " phase=%d (4)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->need, ctx->recv, ctx->phase); #endif switch (ctx->phase) { case PHASE_WANT_HEAD: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "HEAD", ctx->buf); #endif // TEST Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST CHECK"); #endif if (strncmp(TEST, ctx->buf, 4) == 0) { apr_socket_t *csd = ap_get_module_config(c->conn_config, &core_module); apr_size_t length = strlen(TEST_RES_OK); apr_socket_send(csd, TEST_RES_OK, &length); apr_socket_shutdown(csd, APR_SHUTDOWN_WRITE); apr_socket_close(csd); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=TEST OK"); #endif // No need to check for SUCCESS, we did that above c->aborted = 1; apr_brigade_cleanup(b); return APR_ECONNABORTED; } // HELO Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO CHECK"); #endif if (strncmp(HELO, ctx->buf, 4) == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=HELO OK"); #endif ctx->phase = PHASE_WANT_BINIP; ctx->mode = AP_MODE_READBYTES; ctx->need = 4; ctx->recv = 0; break; } // PROXY Command #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY CHECK"); #endif if (strncmp(PROXY, ctx->buf, 4) == 0) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG: CMD=PROXY OK"); #endif ctx->phase = PHASE_WANT_LINE; ctx->mode = AP_MODE_GETLINE; ctx->need = PROXY_MAX_LENGTH - ctx->offset; ctx->recv = 0; break; } // ELSE... GET / POST / etc ctx->phase = PHASE_DONE; #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (1) size=%" APR_OFF_T_FMT, _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->offset); #endif // Restore original data if (ctx->offset) { e = apr_bucket_heap_create(ctx->buf, ctx->offset, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); goto END_CONN; } break; } case PHASE_WANT_BINIP: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "BINIP"); #endif // REWRITE CLIENT IP const char *new_ip = fromBinIPtoString(c->pool, ctx->buf+4); if (!new_ip) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: HELO+IP invalid"); goto ABORT_CONN; } apr_table_set(c->notes, NOTE_REWRITE_IP, new_ip); ctx->phase = PHASE_DONE; #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newip=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, new_ip); #endif break; } case PHASE_WANT_LINE: { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d checking=%s buf=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase, "LINE", ctx->buf); #endif ctx->phase = PHASE_DONE; char *end = memchr(ctx->buf, '\r', ctx->offset - 1); if (!end) { goto ABORT_CONN; } if ((end[0] != '\r') || (end[1] != '\n')) { goto ABORT_CONN; } if (!process_proxy_header(f)) { goto ABORT_CONN; } // Restore original data int count = (ctx->offset - ((end - ctx->buf) + 2)); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in DEBUG from: %s:%d to port=%d newBucket (2) size=%d rest=%s", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, count, end + 2); #endif if (count > 0) { e = apr_bucket_heap_create(end + 2, count, NULL, c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(b, e); goto END_CONN; } break; } } if (ctx->phase == PHASE_DONE) { #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in from: %s:%d to port=%d phase=%d (DONE)", _CLIENT_IP, _CLIENT_ADDR->port, c->local_addr->port, ctx->phase); #endif ctx->mode = mode; ctx->need = 0; ctx->recv = 0; } break; } } } while (ctx->phase != PHASE_DONE); END_CONN: return ap_get_brigade(f->next, b, mode, block, readbytes); ABORT_CONN: ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL, MODULE_NAME "::helocon_filter_in ERROR: PROXY protocol header invalid from=%s to port=%d", _CLIENT_IP, c->local_addr->port); ABORT_CONN2: c->aborted = 1; apr_brigade_cleanup(b); return APR_ECONNABORTED; }
apr_status_t h2_beam_receive(h2_bucket_beam *beam, apr_bucket_brigade *bb, apr_read_type_e block, apr_off_t readbytes) { h2_beam_lock bl; apr_bucket *bred, *bgreen, *ng; int transferred = 0; apr_status_t status = APR_SUCCESS; apr_off_t remain = readbytes; /* Called from the green thread to take buckets from the beam */ if (enter_yellow(beam, &bl) == APR_SUCCESS) { transfer: if (beam->aborted) { if (beam->green && !APR_BRIGADE_EMPTY(beam->green)) { apr_brigade_cleanup(beam->green); } status = APR_ECONNABORTED; goto leave; } /* transfer enough buckets from our green brigade, if we have one */ while (beam->green && !APR_BRIGADE_EMPTY(beam->green) && (readbytes <= 0 || remain >= 0)) { bgreen = APR_BRIGADE_FIRST(beam->green); if (readbytes > 0 && bgreen->length > 0 && remain <= 0) { break; } APR_BUCKET_REMOVE(bgreen); APR_BRIGADE_INSERT_TAIL(bb, bgreen); remain -= bgreen->length; ++transferred; } /* transfer from our red brigade, transforming red buckets to * green ones until we have enough */ while (!H2_BLIST_EMPTY(&beam->red) && (readbytes <= 0 || remain >= 0)) { bred = H2_BLIST_FIRST(&beam->red); bgreen = NULL; if (readbytes > 0 && bred->length > 0 && remain <= 0) { break; } if (APR_BUCKET_IS_METADATA(bred)) { if (APR_BUCKET_IS_EOS(bred)) { bgreen = apr_bucket_eos_create(bb->bucket_alloc); beam->close_sent = 1; } else if (APR_BUCKET_IS_FLUSH(bred)) { bgreen = apr_bucket_flush_create(bb->bucket_alloc); } else { /* put red into hold, no green sent out */ } } else if (APR_BUCKET_IS_FILE(bred)) { /* This is set aside into the target brigade pool so that * any read operation messes with that pool and not * the red one. */ apr_bucket_file *f = (apr_bucket_file *)bred->data; apr_file_t *fd = f->fd; int setaside = (f->readpool != bb->p); if (setaside) { status = apr_file_setaside(&fd, fd, bb->p); if (status != APR_SUCCESS) { goto leave; } ++beam->files_beamed; } ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length, bb->p); #if APR_HAS_MMAP /* disable mmap handling as this leads to segfaults when * the underlying file is changed while memory pointer has * been handed out. See also PR 59348 */ apr_bucket_file_enable_mmap(ng, 0); #endif remain -= bred->length; ++transferred; APR_BUCKET_REMOVE(bred); H2_BLIST_INSERT_TAIL(&beam->hold, bred); ++transferred; continue; } else { /* create a "green" standin bucket. we took care about the * underlying red bucket and its data when we placed it into * the red brigade. * the beam bucket will notify us on destruction that bred is * no longer needed. */ bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc, beam->buckets_sent++); } /* Place the red bucket into our hold, to be destroyed when no * green bucket references it any more. */ APR_BUCKET_REMOVE(bred); H2_BLIST_INSERT_TAIL(&beam->hold, bred); beam->received_bytes += bred->length; if (bgreen) { APR_BRIGADE_INSERT_TAIL(bb, bgreen); remain -= bgreen->length; ++transferred; } } if (readbytes > 0 && remain < 0) { /* too much, put some back */ remain = readbytes; for (bgreen = APR_BRIGADE_FIRST(bb); bgreen != APR_BRIGADE_SENTINEL(bb); bgreen = APR_BUCKET_NEXT(bgreen)) { remain -= bgreen->length; if (remain < 0) { apr_bucket_split(bgreen, bgreen->length+remain); beam->green = apr_brigade_split_ex(bb, APR_BUCKET_NEXT(bgreen), beam->green); break; } } } if (beam->closed && (!beam->green || APR_BRIGADE_EMPTY(beam->green)) && H2_BLIST_EMPTY(&beam->red)) { /* beam is closed and we have nothing more to receive */ if (!beam->close_sent) { apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); beam->close_sent = 1; ++transferred; status = APR_SUCCESS; } } if (transferred) { status = APR_SUCCESS; } else if (beam->closed) { status = APR_EOF; } else if (block == APR_BLOCK_READ && bl.mutex && beam->m_cond) { status = wait_cond(beam, bl.mutex); if (status != APR_SUCCESS) { goto leave; } goto transfer; } else { status = APR_EAGAIN; } leave: leave_yellow(beam, &bl); } return status; }
APU_DECLARE(apr_status_t) apr_brigade_partition(apr_bucket_brigade *b, apr_off_t point, apr_bucket **after_point) { apr_bucket *e; const char *s; apr_size_t len; apr_status_t rv; if (point < 0) { /* this could cause weird (not necessarily SEGV) things to happen */ return APR_EINVAL; } if (point == 0) { *after_point = APR_BRIGADE_FIRST(b); return APR_SUCCESS; } APR_BRIGADE_CHECK_CONSISTENCY(b); for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { if ((e->length == (apr_size_t)(-1)) && (point > (apr_size_t)(-1))) { /* point is too far out to simply split this bucket, * we must fix this bucket's size and keep going... */ rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { *after_point = e; return rv; } } if ((point < e->length) || (e->length == (apr_size_t)(-1))) { /* We already checked e->length -1 above, so we now * trust e->length < MAX_APR_SIZE_T. * First try to split the bucket natively... */ if ((rv = apr_bucket_split(e, (apr_size_t)point)) != APR_ENOTIMPL) { *after_point = APR_BUCKET_NEXT(e); return rv; } /* if the bucket cannot be split, we must read from it, * changing its type to one that can be split */ rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { *after_point = e; return rv; } /* this assumes that len == e->length, which is okay because e * might have been morphed by the apr_bucket_read() above, but * if it was, the length would have been adjusted appropriately */ if (point < e->length) { rv = apr_bucket_split(e, (apr_size_t)point); *after_point = APR_BUCKET_NEXT(e); return rv; } } if (point == e->length) { *after_point = APR_BUCKET_NEXT(e); return APR_SUCCESS; } point -= e->length; } *after_point = APR_BRIGADE_SENTINEL(b); return APR_INCOMPLETE; }
static apr_status_t google_analytics_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; google_analytics_filter_ctx *ctx = f->ctx; google_analytics_filter_config *c; apr_bucket *b = APR_BRIGADE_FIRST(bb); apr_size_t bytes; apr_size_t fbytes; apr_size_t offs; const char *buf; const char *le = NULL; const char *le_n; const char *le_r; const char *bufp; const char *subs; unsigned int match; apr_bucket *b1; char *fbuf; int found = 0; apr_status_t rv; apr_bucket_brigade *bbline; // サブリクエストならなにもしない if (r->main) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } c = ap_get_module_config(r->per_dir_config, &google_analytics_module); if (ctx == NULL) { ctx = f->ctx = apr_pcalloc(r->pool, sizeof(google_analytics_filter_ctx)); ctx->bbsave = apr_brigade_create(r->pool, f->c->bucket_alloc); } // length かわってしまうので unset で OK? apr_table_unset(r->headers_out, "Content-Length"); apr_table_unset(r->headers_out, "Content-MD5"); apr_table_unset(r->headers_out, "Accept-Ranges"); apr_table_unset(r->headers_out, "ETag"); bbline = apr_brigade_create(r->pool, f->c->bucket_alloc); // 改行毎なbucketに編成しなおす while ( b != APR_BRIGADE_SENTINEL(bb) ) { if ( !APR_BUCKET_IS_METADATA(b) ) { if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) { if ( bytes == 0 ) { APR_BUCKET_REMOVE(b); } else { while ( bytes > 0 ) { le_n = memchr(buf, '\n', bytes); le_r = memchr(buf, '\r', bytes); if ( le_n != NULL ) { if ( le_n == le_r + sizeof(char)) { le = le_n; } else if ( (le_r < le_n) && (le_r != NULL) ) { le = le_r; } else { le = le_n; } } else { le = le_r; } if ( le ) { offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char); apr_bucket_split(b, offs); bytes -= offs; buf += offs; b1 = APR_BUCKET_NEXT(b); APR_BUCKET_REMOVE(b); if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, r->pool); b = apr_bucket_pool_create(fbuf, fbytes, r->pool, r->connection->bucket_alloc); apr_brigade_cleanup(ctx->bbsave); } APR_BRIGADE_INSERT_TAIL(bbline, b); b = b1; } else { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); bytes = 0; } } /* while bytes > 0 */ } } else { APR_BUCKET_REMOVE(b); } } else if ( APR_BUCKET_IS_EOS(b) ) { if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, r->pool); b1 = apr_bucket_pool_create(fbuf, fbytes, r->pool, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbline, b1); } apr_brigade_cleanup(ctx->bbsave); f->ctx = NULL; APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bbline, b); } else { apr_bucket_delete(b); } b = APR_BRIGADE_FIRST(bb); } // 改行毎なbucketをまわす for ( b = APR_BRIGADE_FIRST(bbline); b != APR_BRIGADE_SENTINEL(bbline); b = APR_BUCKET_NEXT(b) ) { if ( !APR_BUCKET_IS_METADATA(b) && (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) { bufp = buf; if (ap_regexec(regex_tag_exists, bufp, 0, NULL, 0) == 0) { break; } subs = apr_strmatch(pattern_body_end_tag, bufp, bytes); if (subs != NULL) { match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char); bytes -= match; bufp += match; apr_bucket_split(b, match); b1 = APR_BUCKET_NEXT(b); apr_bucket_split(b1, body_end_tag_length); b = APR_BUCKET_NEXT(b1); apr_bucket_delete(b1); bytes -= body_end_tag_length; bufp += body_end_tag_length; b1 = apr_bucket_immortal_create(c->replace, strlen(c->replace), r->connection->bucket_alloc); APR_BUCKET_INSERT_BEFORE(b, b1); } } } rv = ap_pass_brigade(f->next, bbline); for ( b = APR_BRIGADE_FIRST(ctx->bbsave); b != APR_BRIGADE_SENTINEL(ctx->bbsave); b = APR_BUCKET_NEXT(b)) { apr_bucket_setaside(b, r->pool); } return rv; }
apr_status_t mgs_filter_output(ap_filter_t * f, apr_bucket_brigade * bb) { apr_size_t ret; mgs_handle_t *ctxt = (mgs_handle_t *) f->ctx; apr_status_t status = APR_SUCCESS; apr_read_type_e rblock = APR_NONBLOCK_READ; if (f->c->aborted) { apr_brigade_cleanup(bb); return APR_ECONNABORTED; } if (ctxt->status == 0) { gnutls_do_handshake(ctxt); } if (ctxt->status < 0) { return ap_pass_brigade(f->next, bb); } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *bucket = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_EOS(bucket)) { return ap_pass_brigade(f->next, bb); } else if (APR_BUCKET_IS_FLUSH(bucket)) { /* Try Flush */ if (write_flush(ctxt) < 0) { /* Flush Error */ return ctxt->output_rc; } /* cleanup! */ apr_bucket_delete(bucket); } else if (AP_BUCKET_IS_EOC(bucket)) { /* End Of Connection */ if (ctxt->session != NULL) { /* Try A Clean Shutdown */ do { ret = gnutls_bye(ctxt->session, GNUTLS_SHUT_WR); } while (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN); /* De-Initialize Session */ gnutls_deinit(ctxt->session); ctxt->session = NULL; } /* cleanup! */ apr_bucket_delete(bucket); /* Pass next brigade! */ return ap_pass_brigade(f->next, bb); } else { /* filter output */ const char *data; apr_size_t len; status = apr_bucket_read(bucket, &data, &len, rblock); if (APR_STATUS_IS_EAGAIN(status)) { /* No data available so Flush! */ if (write_flush(ctxt) < 0) { return ctxt->output_rc; } /* Try again with a blocking read. */ rblock = APR_BLOCK_READ; continue; } rblock = APR_NONBLOCK_READ; if (!APR_STATUS_IS_EOF(status) && (status != APR_SUCCESS)) { return status; } if (len > 0) { if (ctxt->session == NULL) { ret = GNUTLS_E_INVALID_REQUEST; } else { do { ret = gnutls_record_send (ctxt->session, data, len); } while (ret == GNUTLS_E_INTERRUPTED || ret == GNUTLS_E_AGAIN); } if (ret < 0) { /* error sending output */ ap_log_error(APLOG_MARK, APLOG_INFO, ctxt->output_rc, ctxt->c->base_server, "GnuTLS: Error writing data." " (%d) '%s'", (int) ret, gnutls_strerror(ret)); if (ctxt->output_rc == APR_SUCCESS) { ctxt->output_rc = APR_EGENERAL; return ctxt->output_rc; } } else if (ret != len) { /* Not able to send the entire bucket, split it and send it again. */ apr_bucket_split(bucket, ret); } } apr_bucket_delete(bucket); } } return status; }
static apr_status_t copy_brigade_range(apr_bucket_brigade *bb, apr_bucket_brigade *bbout, apr_off_t start, apr_off_t end) { apr_bucket *first = NULL, *last = NULL, *out_first = NULL, *e; apr_uint64_t pos = 0, off_first = 0, off_last = 0; apr_status_t rv; apr_uint64_t start64, end64; apr_off_t pofft = 0; /* * Once we know that start and end are >= 0 convert everything to apr_uint64_t. * See the comments in apr_brigade_partition why. * In short apr_off_t (for values >= 0)and apr_size_t fit into apr_uint64_t. */ start64 = (apr_uint64_t)start; end64 = (apr_uint64_t)end; if (start < 0 || end < 0 || start64 > end64) return APR_EINVAL; for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { apr_uint64_t elen64; /* we know that no bucket has undefined length (-1) */ AP_DEBUG_ASSERT(e->length != (apr_size_t)(-1)); elen64 = (apr_uint64_t)e->length; if (!first && (elen64 + pos > start64)) { first = e; off_first = pos; } if (elen64 + pos > end64) { last = e; off_last = pos; break; } pos += elen64; } if (!first || !last) return APR_EINVAL; e = first; while (1) { apr_bucket *copy; AP_DEBUG_ASSERT(e != APR_BRIGADE_SENTINEL(bb)); rv = apr_bucket_copy(e, ©); if (rv != APR_SUCCESS) { apr_brigade_cleanup(bbout); return rv; } APR_BRIGADE_INSERT_TAIL(bbout, copy); if (e == first) { if (off_first != start64) { rv = apr_bucket_split(copy, (apr_size_t)(start64 - off_first)); if (rv != APR_SUCCESS) { apr_brigade_cleanup(bbout); return rv; } out_first = APR_BUCKET_NEXT(copy); APR_BUCKET_REMOVE(copy); apr_bucket_destroy(copy); } else { out_first = copy; } } if (e == last) { if (e == first) { off_last += start64 - off_first; copy = out_first; } if (end64 - off_last != (apr_uint64_t)e->length) { rv = apr_bucket_split(copy, (apr_size_t)(end64 + 1 - off_last)); if (rv != APR_SUCCESS) { apr_brigade_cleanup(bbout); return rv; } copy = APR_BUCKET_NEXT(copy); if (copy != APR_BRIGADE_SENTINEL(bbout)) { APR_BUCKET_REMOVE(copy); apr_bucket_destroy(copy); } } break; } e = APR_BUCKET_NEXT(e); } AP_DEBUG_ASSERT(APR_SUCCESS == apr_brigade_length(bbout, 1, &pofft)); pos = (apr_uint64_t)pofft; AP_DEBUG_ASSERT(pos == end64 - start64 + 1); return APR_SUCCESS; }
/* xlate_brigade() is used to filter request and response bodies * * we'll stop when one of the following occurs: * . we run out of buckets * . we run out of space in the output buffer * . we hit an error or metadata * * inputs: * bb: brigade to process * buffer: storage to hold the translated characters * buffer_avail: size of buffer * (and a few more uninteresting parms) * * outputs: * return value: APR_SUCCESS or some error code * bb: we've removed any buckets representing the * translated characters; the eos bucket, if * present, will be left in the brigade * buffer: filled in with translated characters * buffer_avail: updated with the bytes remaining * hit_eos: did we hit an EOS bucket? */ static apr_status_t xlate_brigade(charset_filter_ctx_t *ctx, apr_bucket_brigade *bb, char *buffer, apr_size_t *buffer_avail, int *hit_eos) { apr_bucket *b = NULL; /* set to NULL only to quiet some gcc */ apr_bucket *consumed_bucket; const char *bucket; apr_size_t bytes_in_bucket; /* total bytes read from current bucket */ apr_size_t bucket_avail; /* bytes left in current bucket */ apr_status_t rv = APR_SUCCESS; *hit_eos = 0; bucket_avail = 0; consumed_bucket = NULL; while (1) { if (!bucket_avail) { /* no bytes left to process in the current bucket... */ if (consumed_bucket) { apr_bucket_delete(consumed_bucket); consumed_bucket = NULL; } b = APR_BRIGADE_FIRST(bb); if (b == APR_BRIGADE_SENTINEL(bb) || APR_BUCKET_IS_METADATA(b)) { break; } rv = apr_bucket_read(b, &bucket, &bytes_in_bucket, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ctx->ees = EES_BUCKET_READ; break; } bucket_avail = bytes_in_bucket; consumed_bucket = b; /* for axing when we're done reading it */ } if (bucket_avail) { /* We've got data, so translate it. */ if (ctx->saved) { /* Rats... we need to finish a partial character from the previous * bucket. * * Strangely, finish_partial_char() increments the input buffer * pointer but does not increment the output buffer pointer. */ apr_size_t old_buffer_avail = *buffer_avail; rv = finish_partial_char(ctx, &bucket, &bucket_avail, &buffer, buffer_avail); buffer += old_buffer_avail - *buffer_avail; } else { apr_size_t old_buffer_avail = *buffer_avail; apr_size_t old_bucket_avail = bucket_avail; rv = apr_xlate_conv_buffer(ctx->xlate, bucket, &bucket_avail, buffer, buffer_avail); buffer += old_buffer_avail - *buffer_avail; bucket += old_bucket_avail - bucket_avail; if (rv == APR_INCOMPLETE) { /* partial character at end of input */ /* We need to save the final byte(s) for next time; we can't * convert it until we look at the next bucket. */ rv = set_aside_partial_char(ctx, bucket, bucket_avail); bucket_avail = 0; } } if (rv != APR_SUCCESS) { /* bad input byte or partial char too big to store */ break; } if (*buffer_avail < XLATE_MIN_BUFF_LEFT) { /* if any data remains in the current bucket, split there */ if (bucket_avail) { apr_bucket_split(b, bytes_in_bucket - bucket_avail); } apr_bucket_delete(b); break; } } } if (!APR_BRIGADE_EMPTY(bb)) { b = APR_BRIGADE_FIRST(bb); if (APR_BUCKET_IS_EOS(b)) { /* Leave the eos bucket in the brigade for reporting to * subsequent filters. */ *hit_eos = 1; if (ctx->saved) { /* Oops... we have a partial char from the previous bucket * that won't be completed because there's no more data. */ rv = APR_INCOMPLETE; ctx->ees = EES_INCOMPLETE_CHAR; } } } return rv; }
static int zevent_process_connection(conn_state_t *cs) { /* * code for your app,this just an example for echo test. */ apr_bucket *b; char *msg; apr_size_t len=0; int olen = 0; const char *buf; apr_status_t rv; cs->pfd->reqevents = APR_POLLIN; if(cs->pfd->rtnevents & APR_POLLIN){ len = 4096; msg = (char *)apr_bucket_alloc(len,cs->baout); if (msg == NULL) { return -1; } rv = apr_socket_recv(cs->pfd->desc.s,msg,&len); if(rv != APR_SUCCESS) { zevent_log_error(APLOG_MARK,NULL,"close socket!"); return -1; } zevent_log_error(APLOG_MARK,NULL,"recv:%s",msg); b = apr_bucket_heap_create(msg,len,NULL,cs->baout); apr_bucket_free(msg); APR_BRIGADE_INSERT_TAIL(cs->bbout,b); cs->pfd->reqevents |= APR_POLLOUT; } else { if(cs->bbout){ for (b = APR_BRIGADE_FIRST(cs->bbout); b != APR_BRIGADE_SENTINEL(cs->bbout); b = APR_BUCKET_NEXT(b)) { apr_bucket_read(b,&buf,&len,APR_BLOCK_READ); olen = len; //apr_brigade_flatten(cs->bbout,buf,&len); rv = apr_socket_send(cs->pfd->desc.s,buf,&len); if((rv == APR_SUCCESS) && (len>=olen)) { // zevent_log_error(APLOG_MARK,NULL,"send:%d bytes\n", // len); apr_bucket_delete(b); } if((rv == APR_SUCCESS && len < olen) || (rv != APR_SUCCESS)) { if(rv == APR_SUCCESS){ apr_bucket_split(b,len); apr_bucket *bucket = APR_BUCKET_NEXT(b); apr_bucket_delete(b); b = bucket; } break; } } if(b != APR_BRIGADE_SENTINEL(cs->bbout)) cs->pfd->reqevents |= APR_POLLOUT; } } apr_pollset_add(cs->pollset,cs->pfd); return 0; }
apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb, h2_util_pass_cb *cb, void *ctx, apr_size_t *plen, int *peos) { apr_status_t status = APR_SUCCESS; int consume = (cb != NULL); apr_size_t written = 0; apr_size_t avail = *plen; apr_bucket *next, *b; /* Pass data in our brigade through the callback until the length * is satisfied or we encounter an EOS. */ *peos = 0; for (b = APR_BRIGADE_FIRST(bb); (status == APR_SUCCESS) && (b != APR_BRIGADE_SENTINEL(bb)); b = next) { if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) { *peos = 1; } else { /* ignore */ } } else if (avail <= 0) { break; } else { const char *data = NULL; apr_size_t data_len; if (b->length == ((apr_size_t)-1)) { /* read to determine length */ status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ); } else { data_len = b->length; } if (data_len > avail) { apr_bucket_split(b, avail); data_len = avail; } if (consume) { if (!data) { status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ); } if (status == APR_SUCCESS) { status = cb(ctx, data, data_len); } } else { data_len = b->length; } avail -= data_len; written += data_len; } next = APR_BUCKET_NEXT(b); if (consume) { apr_bucket_delete(b); } } *plen = written; if (status == APR_SUCCESS && !*peos && !*plen) { return APR_EAGAIN; } return status; }
static void test_splits(abts_case *tc, void *ctx) { apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p); apr_bucket_brigade *bb; apr_bucket *e; char *str = "alphabeta"; int n; bb = apr_brigade_create(p, ba); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_immortal_create(str, 9, ba)); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_transient_create(str, 9, ba)); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_heap_create(strdup(str), 9, free, ba)); APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_pool_create(apr_pstrdup(p, str), 9, p, ba)); ABTS_ASSERT(tc, "four buckets inserted", count_buckets(bb) == 4); /* now split each of the buckets after byte 5 */ for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) { ABTS_ASSERT(tc, "reached end of brigade", e != APR_BRIGADE_SENTINEL(bb)); ABTS_ASSERT(tc, "split bucket OK", apr_bucket_split(e, 5) == APR_SUCCESS); e = APR_BUCKET_NEXT(e); ABTS_ASSERT(tc, "split OK", e != APR_BRIGADE_SENTINEL(bb)); e = APR_BUCKET_NEXT(e); } ABTS_ASSERT(tc, "four buckets split into eight", count_buckets(bb) == 8); for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) { const char *data; apr_size_t len; apr_assert_success(tc, "read alpha from bucket", apr_bucket_read(e, &data, &len, APR_BLOCK_READ)); ABTS_ASSERT(tc, "read 5 bytes", len == 5); ABTS_STR_NEQUAL(tc, "alpha", data, 5); e = APR_BUCKET_NEXT(e); apr_assert_success(tc, "read beta from bucket", apr_bucket_read(e, &data, &len, APR_BLOCK_READ)); ABTS_ASSERT(tc, "read 4 bytes", len == 4); ABTS_STR_NEQUAL(tc, "beta", data, 5); e = APR_BUCKET_NEXT(e); } /* now delete the "alpha" buckets */ for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) { apr_bucket *f; ABTS_ASSERT(tc, "reached end of brigade", e != APR_BRIGADE_SENTINEL(bb)); f = APR_BUCKET_NEXT(e); apr_bucket_delete(e); e = APR_BUCKET_NEXT(f); } ABTS_ASSERT(tc, "eight buckets reduced to four", count_buckets(bb) == 4); flatten_match(tc, "flatten beta brigade", bb, "beta" "beta" "beta" "beta"); apr_brigade_destroy(bb); apr_bucket_alloc_destroy(ba); }
static PyObject *_filter_read(filterobject *self, PyObject *args, int readline) { apr_bucket *b; long bytes_read; PyObject *result; char *buffer; long bufsize; int newline = 0; long len = -1; conn_rec *c = self->request_obj->request_rec->connection; if (! PyArg_ParseTuple(args, "|l", &len)) return NULL; if (self->closed) { PyErr_SetString(PyExc_ValueError, "I/O operation on closed filter"); return NULL; } if (self->is_input) { /* does the output brigade exist? */ if (!self->bb_in) { self->bb_in = apr_brigade_create(self->f->r->pool, c->bucket_alloc); } Py_BEGIN_ALLOW_THREADS; self->rc = ap_get_brigade(self->f->next, self->bb_in, self->mode, APR_BLOCK_READ, self->readbytes); Py_END_ALLOW_THREADS; if (!APR_STATUS_IS_EAGAIN(self->rc) && !(self->rc == APR_SUCCESS)) { PyErr_SetObject(PyExc_IOError, PyString_FromString("Input filter read error")); return NULL; } } /* * loop through the brigade reading buckets into the string */ b = APR_BRIGADE_FIRST(self->bb_in); if (b == APR_BRIGADE_SENTINEL(self->bb_in)) return PyString_FromString(""); /* reached eos ? */ if (APR_BUCKET_IS_EOS(b)) { apr_bucket_delete(b); Py_INCREF(Py_None); return Py_None; } bufsize = len < 0 ? HUGE_STRING_LEN : len; /* PYTHON 2.5: 'PyString_FromStringAndSize' uses Py_ssize_t for input parameters */ result = PyString_FromStringAndSize(NULL, bufsize); /* possibly no more memory */ if (result == NULL) return PyErr_NoMemory(); buffer = PyString_AS_STRING((PyStringObject *) result); bytes_read = 0; while ((bytes_read < len || len == -1) && !(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b) || b == APR_BRIGADE_SENTINEL(self->bb_in))) { const char *data; apr_size_t size; apr_bucket *old; int i; if (apr_bucket_read(b, &data, &size, APR_BLOCK_READ) != APR_SUCCESS) { PyErr_SetObject(PyExc_IOError, PyString_FromString("Filter read error")); return NULL; } if (bytes_read + size > bufsize) { apr_bucket_split(b, bufsize - bytes_read); size = bufsize - bytes_read; /* now the bucket is the exact size we need */ } if (readline) { /* scan for newline */ for (i=0; i<size; i++) { if (data[i] == '\n') { if (i+1 != size) { /* (no need to split if we're at end of bucket) */ /* split after newline */ apr_bucket_split(b, i+1); size = i + 1; } newline = 1; break; } } } memcpy(buffer, data, size); buffer += size; bytes_read += size; /* time to grow destination string? */ if (newline == 0 && len < 0 && bytes_read == bufsize) { /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */ _PyString_Resize(&result, bufsize + HUGE_STRING_LEN); buffer = PyString_AS_STRING((PyStringObject *) result); buffer += bytes_read; bufsize += HUGE_STRING_LEN; } if (readline && newline) { apr_bucket_delete(b); break; } old = b; b = APR_BUCKET_NEXT(b); apr_bucket_delete(old); /* if (self->is_input) { */ /* if (b == APR_BRIGADE_SENTINEL(self->bb_in)) { */ /* /\* brigade ended, but no EOS - get another */ /* brigade *\/ */ /* Py_BEGIN_ALLOW_THREADS; */ /* self->rc = ap_get_brigade(self->f->next, self->bb_in, self->mode, */ /* APR_BLOCK_READ, self->readbytes); */ /* Py_END_ALLOW_THREADS; */ /* if (! APR_STATUS_IS_SUCCESS(self->rc)) { */ /* PyErr_SetObject(PyExc_IOError, */ /* PyString_FromString("Input filter read error")); */ /* return NULL; */ /* } */ /* b = APR_BRIGADE_FIRST(self->bb_in); */ /* } */ /* } */ } /* resize if necessary */ if (bytes_read < len || len < 0) /* PYTHON 2.5: '_PyString_Resize' uses Py_ssize_t for input parameters */ if(_PyString_Resize(&result, bytes_read)) return NULL; return result; }
static apr_status_t line_edit_filter(ap_filter_t* f, apr_bucket_brigade* bb) { int i, j; unsigned int match ; unsigned int nmatch = 10 ; ap_regmatch_t pmatch[10] ; const char* bufp; const char* subs ; apr_size_t bytes ; apr_size_t fbytes ; apr_size_t offs ; const char* buf ; const char* le = NULL ; const char* le_n ; const char* le_r ; char* fbuf ; apr_bucket* b = APR_BRIGADE_FIRST(bb) ; apr_bucket* b1 ; int found = 0 ; apr_status_t rv ; apr_bucket_brigade* bbline ; line_edit_cfg* cfg = ap_get_module_config(f->r->per_dir_config, &line_edit_module) ; rewriterule* rules = (rewriterule*) cfg->rewriterules->elts ; rewriterule* newrule; line_edit_ctx* ctx = f->ctx ; if (ctx == NULL) { /* check env to see if we're wanted, to give basic control with 2.0 */ buf = apr_table_get(f->r->subprocess_env, "LineEdit"); if (buf && f->r->content_type) { char* lcbuf = apr_pstrdup(f->r->pool, buf) ; char* lctype = apr_pstrdup(f->r->pool, f->r->content_type) ; char* c ; for (c = lcbuf; *c; ++c) if (isupper(*c)) *c = tolower(*c) ; for (c = lctype; *c; ++c) if (isupper(*c)) *c = tolower(*c) ; else if (*c == ';') { *c = 0 ; break ; } if (!strstr(lcbuf, lctype)) { /* don't filter this content type */ ap_filter_t* fnext = f->next ; ap_remove_output_filter(f) ; return ap_pass_brigade(fnext, bb) ; } } ctx = f->ctx = apr_palloc(f->r->pool, sizeof(line_edit_ctx)) ; ctx->bbsave = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ; /* If we have any regex matches, we'll need to copy everything, so we * have null-terminated strings to parse. That's a lot of memory if * we're streaming anything big. So we'll use (and reuse) a local * subpool. Fall back to the request pool if anything bad happens. */ ctx->lpool = f->r->pool ; for (i = 0; i < cfg->rewriterules->nelts; ++i) { if ( rules[i].flags & M_REGEX ) { if (apr_pool_create(&ctx->lpool, f->r->pool) != APR_SUCCESS) { ctx->lpool = f->r->pool ; } break ; } } /* If we have env interpolation, we'll need a private copy of * our rewrite rules with this requests env. Otherwise we can * save processing time by using the original. * * If one ENV is found, we also have to copy all previous and * subsequent rules, even those with no interpolation. */ ctx->rewriterules = cfg->rewriterules; for (i = 0; i < cfg->rewriterules->nelts; ++i) { found |= (rules[i].flags & M_ENV) ; if ( found ) { if (ctx->rewriterules == cfg->rewriterules) { ctx->rewriterules = apr_array_make(f->r->pool, cfg->rewriterules->nelts, sizeof(rewriterule)); for (j = 0; j < i; ++j) { newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ; newrule->from = rules[j].from; newrule->to = rules[j].to; newrule->flags = rules[j].flags; newrule->length = rules[j].length; } } /* this rule needs to be interpolated */ newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ; newrule->from = rules[i].from; if (rules[i].flags & M_ENV) { newrule->to = interpolate_env(f->r, rules[i].to); } else { newrule->to = rules[i].to ; } newrule->flags = rules[i].flags; newrule->length = rules[i].length; } } /* for back-compatibility with Apache 2.0, set some protocol stuff */ apr_table_unset(f->r->headers_out, "Content-Length") ; apr_table_unset(f->r->headers_out, "Content-MD5") ; apr_table_unset(f->r->headers_out, "Accept-Ranges") ; } /* by now our rules are in ctx->rewriterules */ rules = (rewriterule*) ctx->rewriterules->elts ; /* bbline is what goes to the next filter, * so we (can) have a new one each time. */ bbline = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ; /* first ensure we have no mid-line breaks that might be in the * middle of a search string causing us to miss it! At the same * time we split into lines to avoid pattern-matching over big * chunks of memory. */ while ( b != APR_BRIGADE_SENTINEL(bb) ) { if ( !APR_BUCKET_IS_METADATA(b) ) { if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) { if ( bytes == 0 ) { APR_BUCKET_REMOVE(b) ; } else while ( bytes > 0 ) { switch (cfg->lineend) { case LINEEND_UNIX: le = memchr(buf, '\n', bytes) ; break ; case LINEEND_MAC: le = memchr(buf, '\r', bytes) ; break ; case LINEEND_DOS: /* Edge-case issue: if a \r\n spans buckets it'll get missed. * Not a problem for present purposes, but would be an issue * if we claimed to support pattern matching on the lineends. */ found = 0 ; le = memchr(buf+1, '\n', bytes-1) ; while ( le && !found ) { if ( le[-1] == '\r' ) { found = 1 ; } else { le = memchr(le+1, '\n', bytes-1 - (le+1 - buf)) ; } } if ( !found ) le = 0 ; break; case LINEEND_ANY: case LINEEND_UNSET: /* Edge-case notabug: if a \r\n spans buckets it'll get seen as * two line-ends. It'll insert the \n as a one-byte bucket. */ le_n = memchr(buf, '\n', bytes) ; le_r = memchr(buf, '\r', bytes) ; if ( le_n != NULL ) if ( le_n == le_r + sizeof(char)) le = le_n ; else if ( (le_r < le_n) && (le_r != NULL) ) le = le_r ; else le = le_n ; else le = le_r ; break; case LINEEND_NONE: le = 0 ; break; case LINEEND_CUSTOM: le = memchr(buf, cfg->lechar, bytes) ; break; } if ( le ) { /* found a lineend in this bucket. */ offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char) ; apr_bucket_split(b, offs) ; bytes -= offs ; buf += offs ; b1 = APR_BUCKET_NEXT(b) ; APR_BUCKET_REMOVE(b); /* Is there any previous unterminated content ? */ if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { /* append this to any content waiting for a lineend */ APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b) ; rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ; /* make b a new bucket of the flattened stuff */ b = apr_bucket_pool_create(fbuf, fbytes, f->r->pool, f->r->connection->bucket_alloc) ; /* bbsave has been consumed, so clear it */ apr_brigade_cleanup(ctx->bbsave) ; } /* b now contains exactly one line */ APR_BRIGADE_INSERT_TAIL(bbline, b); b = b1 ; } else { /* no lineend found. Remember the dangling content */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); bytes = 0 ; } } /* while bytes > 0 */ } else { /* bucket read failed - oops ! Let's remove it. */ APR_BUCKET_REMOVE(b); } } else if ( APR_BUCKET_IS_EOS(b) ) { /* If there's data to pass, send it in one bucket */ if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ; b1 = apr_bucket_pool_create(fbuf, fbytes, f->r->pool, f->r->connection->bucket_alloc) ; APR_BRIGADE_INSERT_TAIL(bbline, b1); } apr_brigade_cleanup(ctx->bbsave) ; /* start again rather than segfault if a seriously buggy * filter in front of us sent a bogus EOS */ f->ctx = NULL ; /* move the EOS to the new brigade */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bbline, b); } else { /* chop flush or unknown metadata bucket types */ apr_bucket_delete(b); } /* OK, reset pointer to what's left (since we're not in a for-loop) */ b = APR_BRIGADE_FIRST(bb) ; } /* OK, now we have a bunch of complete lines in bbline, * so we can apply our edit rules */ /* When we get a match, we split the line into before+match+after. * To flatten that back into one buf every time would be inefficient. * So we treat it as three separate bufs to apply future rules. * * We can only reasonably do that by looping over buckets *inside* * the loop over rules. * * That means concepts like one-match-per-line or start-of-line-only * won't work, except for the first rule. So we won't pretend. */ for (i = 0; i < ctx->rewriterules->nelts; ++i) { for ( b = APR_BRIGADE_FIRST(bbline) ; b != APR_BRIGADE_SENTINEL(bbline) ; b = APR_BUCKET_NEXT(b) ) { if ( !APR_BUCKET_IS_METADATA(b) && (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) { if ( rules[i].flags & M_REGEX ) { bufp = apr_pstrmemdup(ctx->lpool, buf, bytes) ; while ( ! ap_regexec(rules[i].from.r, bufp, nmatch, pmatch, 0) ) { match = pmatch[0].rm_so ; subs = ap_pregsub(f->r->pool, rules[i].to, bufp, nmatch, pmatch) ; apr_bucket_split(b, match) ; b1 = APR_BUCKET_NEXT(b) ; apr_bucket_split(b1, pmatch[0].rm_eo - match) ; b = APR_BUCKET_NEXT(b1) ; apr_bucket_delete(b1) ; b1 = apr_bucket_pool_create(subs, strlen(subs), f->r->pool, f->r->connection->bucket_alloc) ; APR_BUCKET_INSERT_BEFORE(b, b1) ; bufp += pmatch[0].rm_eo ; } } else { bufp = buf ; while (subs = apr_strmatch(rules[i].from.s, bufp, bytes), subs != NULL) { match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char) ; bytes -= match ; bufp += match ; apr_bucket_split(b, match) ; b1 = APR_BUCKET_NEXT(b) ; apr_bucket_split(b1, rules[i].length) ; b = APR_BUCKET_NEXT(b1) ; apr_bucket_delete(b1) ; bytes -= rules[i].length ; bufp += rules[i].length ; b1 = apr_bucket_immortal_create(rules[i].to, strlen(rules[i].to), f->r->connection->bucket_alloc) ; APR_BUCKET_INSERT_BEFORE(b, b1) ; } } } } /* If we used a local pool, clear it now */ if ( (ctx->lpool != f->r->pool) && (rules[i].flags & M_REGEX) ) { apr_pool_clear(ctx->lpool) ; } } /* now pass it down the chain */ rv = ap_pass_brigade(f->next, bbline) ; /* if we have leftover data, don't risk it going out of scope */ for ( b = APR_BRIGADE_FIRST(ctx->bbsave) ; b != APR_BRIGADE_SENTINEL(ctx->bbsave) ; b = APR_BUCKET_NEXT(b)) { apr_bucket_setaside(b, f->r->pool) ; } return rv ; }
APU_DECLARE(apr_status_t) apr_brigade_partition(apr_bucket_brigade *b, apr_off_t point, apr_bucket **after_point) { apr_bucket *e; const char *s; apr_size_t len; apr_uint64_t point64; apr_status_t rv; if (point < 0) { /* this could cause weird (not necessarily SEGV) things to happen */ return APR_EINVAL; } if (point == 0) { *after_point = APR_BRIGADE_FIRST(b); return APR_SUCCESS; } /* * Try to reduce the following casting mess: We know that point will be * larger equal 0 now and forever and thus that point (apr_off_t) and * apr_size_t will fit into apr_uint64_t in any case. */ point64 = (apr_uint64_t)point; APR_BRIGADE_CHECK_CONSISTENCY(b); for (e = APR_BRIGADE_FIRST(b); e != APR_BRIGADE_SENTINEL(b); e = APR_BUCKET_NEXT(e)) { /* For an unknown length bucket, while 'point64' is beyond the possible * size contained in apr_size_t, read and continue... */ if ((e->length == (apr_size_t)(-1)) && (point64 > (apr_uint64_t)APR_SIZE_MAX)) { /* point64 is too far out to simply split this bucket, * we must fix this bucket's size and keep going... */ rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { *after_point = e; return rv; } } else if ((point64 < (apr_uint64_t)e->length) || (e->length == (apr_size_t)(-1))) { /* We already consumed buckets where point64 is beyond * our interest ( point64 > APR_SIZE_MAX ), above. * Here point falls between 0 and APR_SIZE_MAX * and is within this bucket, or this bucket's len * is undefined, so now we are ready to split it. * First try to split the bucket natively... */ if ((rv = apr_bucket_split(e, (apr_size_t)point64)) != APR_ENOTIMPL) { *after_point = APR_BUCKET_NEXT(e); return rv; } /* if the bucket cannot be split, we must read from it, * changing its type to one that can be split */ rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { *after_point = e; return rv; } /* this assumes that len == e->length, which is okay because e * might have been morphed by the apr_bucket_read() above, but * if it was, the length would have been adjusted appropriately */ if (point64 < (apr_uint64_t)e->length) { rv = apr_bucket_split(e, (apr_size_t)point64); *after_point = APR_BUCKET_NEXT(e); return rv; } } if (point64 == (apr_uint64_t)e->length) { *after_point = APR_BUCKET_NEXT(e); return APR_SUCCESS; } point64 -= (apr_uint64_t)e->length; } *after_point = APR_BRIGADE_SENTINEL(b); return APR_INCOMPLETE; }
static apr_status_t substitute_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_size_t bytes; apr_size_t len; apr_size_t fbytes; const char *buff; const char *nl = NULL; char *bflat; apr_bucket *b; apr_bucket *tmp_b; apr_bucket_brigade *tmp_bb = NULL; apr_status_t rv; subst_dir_conf *cfg = (subst_dir_conf *) ap_get_module_config(f->r->per_dir_config, &substitute_module); substitute_module_ctx *ctx = f->ctx; /* * First time around? Create the saved bb that we used for each pass * through. Note that we can also get here when we explicitly clear ctx, * for error handling */ if (!ctx) { f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); /* * Create all the temporary brigades we need and reuse them to avoid * creating them over and over again from r->pool which would cost a * lot of memory in some cases. */ ctx->linebb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); ctx->linesbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); ctx->pattbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); /* * Everything to be passed to the next filter goes in * here, our pass brigade. */ ctx->passbb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); /* Create our temporary pool only once */ apr_pool_create(&(ctx->tpool), f->r->pool); apr_table_unset(f->r->headers_out, "Content-Length"); } /* * Shortcircuit processing */ if (APR_BRIGADE_EMPTY(bb)) return APR_SUCCESS; /* * Here's the concept: * Read in the data and look for newlines. Once we * find a full "line", add it to our working brigade. * If we've finished reading the brigade and we have * any left over data (not a "full" line), store that * for the next pass. * * Note: anything stored in ctx->linebb for sure does not have * a newline char, so we don't concat that bb with the * new bb, since we would spending time searching for the newline * in data we know it doesn't exist. So instead, we simply scan * our current bb and, if we see a newline, prepend ctx->linebb * to the front of it. This makes the code much less straight- * forward (otherwise we could APR_BRIGADE_CONCAT(ctx->linebb, bb) * and just scan for newlines and not bother with needing to know * when ctx->linebb needs to be reset) but also faster. We'll take * the speed. * * Note: apr_brigade_split_line would be nice here, but we * really can't use it since we need more control and we want * to re-use already read bucket data. * * See mod_include if still confused :) */ while ((b = APR_BRIGADE_FIRST(bb)) && (b != APR_BRIGADE_SENTINEL(bb))) { if (APR_BUCKET_IS_EOS(b)) { /* * if we see the EOS, then we need to pass along everything we * have. But if the ctx->linebb isn't empty, then we need to add * that to the end of what we'll be passing. */ if (!APR_BRIGADE_EMPTY(ctx->linebb)) { rv = apr_brigade_pflatten(ctx->linebb, &bflat, &fbytes, ctx->tpool); if (rv != APR_SUCCESS) goto err; if (fbytes > cfg->max_line_length) { rv = APR_ENOMEM; goto err; } tmp_b = apr_bucket_transient_create(bflat, fbytes, f->r->connection->bucket_alloc); rv = do_pattmatch(f, tmp_b, ctx->pattbb, ctx->tpool); if (rv != APR_SUCCESS) goto err; APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb); apr_brigade_cleanup(ctx->linebb); } APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->passbb, b); } /* * No need to handle FLUSH buckets separately as we call * ap_pass_brigade anyway at the end of the loop. */ else if (APR_BUCKET_IS_METADATA(b)) { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->passbb, b); } else { /* * We have actual "data" so read in as much as we can and start * scanning and splitting from our read buffer */ rv = apr_bucket_read(b, &buff, &bytes, APR_BLOCK_READ); if (rv != APR_SUCCESS || bytes == 0) { apr_bucket_delete(b); } else { int num = 0; while (bytes > 0) { nl = memchr(buff, APR_ASCII_LF, bytes); if (nl) { len = (apr_size_t) (nl - buff) + 1; /* split *after* the newline */ apr_bucket_split(b, len); /* * We've likely read more data, so bypass rereading * bucket data and continue scanning through this * buffer */ bytes -= len; buff += len; /* * we need b to be updated for future potential * splitting */ tmp_b = APR_BUCKET_NEXT(b); APR_BUCKET_REMOVE(b); /* * Hey, we found a newline! Don't forget the old * stuff that needs to be added to the front. So we * add the split bucket to the end, flatten the whole * bb, morph the whole shebang into a bucket which is * then added to the tail of the newline bb. */ if (!APR_BRIGADE_EMPTY(ctx->linebb)) { APR_BRIGADE_INSERT_TAIL(ctx->linebb, b); rv = apr_brigade_pflatten(ctx->linebb, &bflat, &fbytes, ctx->tpool); if (rv != APR_SUCCESS) goto err; if (fbytes > cfg->max_line_length) { /* Avoid pflattening further lines, we will * abort later on anyway. */ rv = APR_ENOMEM; goto err; } b = apr_bucket_transient_create(bflat, fbytes, f->r->connection->bucket_alloc); apr_brigade_cleanup(ctx->linebb); } rv = do_pattmatch(f, b, ctx->pattbb, ctx->tpool); if (rv != APR_SUCCESS) goto err; /* * Count how many buckets we have in ctx->passbb * so far. Yes, this is correct we count ctx->passbb * and not ctx->pattbb as we do not reset num on every * iteration. */ for (b = APR_BRIGADE_FIRST(ctx->pattbb); b != APR_BRIGADE_SENTINEL(ctx->pattbb); b = APR_BUCKET_NEXT(b)) { num++; } APR_BRIGADE_CONCAT(ctx->passbb, ctx->pattbb); /* * If the number of buckets in ctx->passbb reaches an * "insane" level, we consume much memory for all the * buckets as such. So lets flush them down the chain * in this case and thus clear ctx->passbb. This frees * the buckets memory for further processing. * Usually this condition should not become true, but * it is a safety measure for edge cases. */ if (num > AP_MAX_BUCKETS) { b = apr_bucket_flush_create( f->r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->passbb, b); rv = ap_pass_brigade(f->next, ctx->passbb); apr_brigade_cleanup(ctx->passbb); num = 0; apr_pool_clear(ctx->tpool); if (rv != APR_SUCCESS) goto err; } b = tmp_b; } else { /* * no newline in whatever is left of this buffer so * tuck data away and get next bucket */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->linebb, b); bytes = 0; } } } } if (!APR_BRIGADE_EMPTY(ctx->passbb)) { rv = ap_pass_brigade(f->next, ctx->passbb); apr_brigade_cleanup(ctx->passbb); if (rv != APR_SUCCESS) goto err; } apr_pool_clear(ctx->tpool); } /* Anything left we want to save/setaside for the next go-around */ if (!APR_BRIGADE_EMPTY(ctx->linebb)) { /* * Provide ap_save_brigade with an existing empty brigade * (ctx->linesbb) to avoid creating a new one. */ ap_save_brigade(f, &(ctx->linesbb), &(ctx->linebb), f->r->pool); tmp_bb = ctx->linebb; ctx->linebb = ctx->linesbb; ctx->linesbb = tmp_bb; } return APR_SUCCESS; err: if (rv == APR_ENOMEM) ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01328) "Line too long, URI %s", f->r->uri); apr_pool_clear(ctx->tpool); return rv; }
static apr_status_t h2_conn_io_bucket_read(h2_conn_io *io, apr_read_type_e block, h2_conn_io_on_read_cb on_read_cb, void *puser, int *pdone) { apr_status_t status = APR_SUCCESS; apr_size_t readlen = 0; *pdone = 0; while (status == APR_SUCCESS && !*pdone && !APR_BRIGADE_EMPTY(io->input)) { apr_bucket* bucket = APR_BRIGADE_FIRST(io->input); if (APR_BUCKET_IS_METADATA(bucket)) { /* we do nothing regarding any meta here */ } else { const char *bucket_data = NULL; apr_size_t bucket_length = 0; status = apr_bucket_read(bucket, &bucket_data, &bucket_length, block); if (status == APR_SUCCESS && bucket_length > 0) { if (APLOGctrace2(io->connection)) { char buffer[32]; h2_util_hex_dump(buffer, sizeof(buffer)/sizeof(buffer[0]), bucket_data, bucket_length); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io(%ld): read %ld bytes: %s", io->connection->id, bucket_length, buffer); } if (io->preface_bytes_left > 0) { /* still requiring bytes from the http/2 preface */ size_t pre_offset = HTTP2_PREFACE_LEN - io->preface_bytes_left; apr_size_t check_len = io->preface_bytes_left; if (check_len > bucket_length) { check_len = bucket_length; } if (strncmp(HTTP2_PREFACE+pre_offset, bucket_data, check_len)) { /* preface mismatch */ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EMISMATCH, io->connection, "h2_conn_io(%ld): preface check", io->connection->id); return APR_EMISMATCH; } io->preface_bytes_left -= check_len; bucket_data += check_len; bucket_length -= check_len; ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->connection, "h2_conn_io(%ld): preface check: %d bytes " "matched, remaining %d", io->connection->id, (int)check_len, io->preface_bytes_left); } if (bucket_length > 0) { apr_size_t consumed = 0; status = on_read_cb(bucket_data, bucket_length, &consumed, pdone, puser); if (status == APR_SUCCESS && bucket_length > consumed) { /* We have data left in the bucket. Split it. */ status = apr_bucket_split(bucket, consumed); } readlen += consumed; } } } apr_bucket_delete(bucket); } if (readlen == 0 && status == APR_SUCCESS && block == APR_NONBLOCK_READ) { return APR_EAGAIN; } return status; }