static apr_status_t filter_harness(ap_filter_t *f, apr_bucket_brigade *bb) { apr_status_t ret; #ifndef NO_PROTOCOL const char *cachecontrol; char *str; #endif harness_ctx *ctx = f->ctx; ap_filter_rec_t *filter = f->frec; if (f->r->status != 200 && !apr_table_get(f->r->subprocess_env, "filter-errordocs")) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } filter_trace(f->c, filter->debug, f->frec->name, bb); /* look up a handler function if we haven't already set it */ if (!ctx->func) { #ifndef NO_PROTOCOL if (f->r->proxyreq) { if (filter->proto_flags & AP_FILTER_PROTO_NO_PROXY) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } if (filter->proto_flags & AP_FILTER_PROTO_TRANSFORM) { cachecontrol = apr_table_get(f->r->headers_out, "Cache-Control"); if (cachecontrol) { str = apr_pstrdup(f->r->pool, cachecontrol); ap_str_tolower(str); if (strstr(str, "no-transform")) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } } } } #endif if (!filter_lookup(f, filter)) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } } /* call the content filter with its own context, then restore our * context */ f->ctx = ctx->fctx; ret = ctx->func(f, bb); ctx->fctx = f->ctx; f->ctx = ctx; return ret; }
static apr_status_t ef_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; ef_ctx_t *ctx = f->ctx; apr_status_t rv; if (!ctx) { if ((rv = init_filter_instance(f)) != APR_SUCCESS) { ctx = f->ctx; ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "can't initialise output filter %s: %s", f->frec->name, (ctx->dc->onfail == 1) ? "removing" : "aborting"); ap_remove_output_filter(f); if (ctx->dc->onfail == 1) { return ap_pass_brigade(f->next, bb); } else { apr_bucket *e; f->r->status_line = "500 Internal Server Error"; apr_brigade_cleanup(bb); e = ap_bucket_error_create(HTTP_INTERNAL_SERVER_ERROR, NULL, r->pool, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); e = apr_bucket_eos_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); ap_pass_brigade(f->next, bb); return AP_FILTER_ERROR; } } ctx = f->ctx; } if (ctx->noop) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } rv = ef_unified_filter(f, bb); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "ef_unified_filter() failed"); } if ((rv = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "ap_pass_brigade() failed"); } return rv; }
apr_status_t dav_svn__location_header_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; const char *master_uri; const char *location, *start_foo = NULL; /* Don't filter if we're in a subrequest or we aren't setup to proxy anything. */ master_uri = dav_svn__get_master_uri(r); master_uri = svn_path_uri_encode(master_uri, r->pool); if (r->main || !master_uri) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } location = apr_table_get(r->headers_out, "Location"); if (location) { start_foo = ap_strstr_c(location, master_uri); } if (start_foo) { const char *new_uri; start_foo += strlen(master_uri); new_uri = ap_construct_url(r->pool, apr_pstrcat(r->pool, dav_svn__get_root_dir(r), "/", start_foo, SVN_VA_NULL), r); apr_table_set(r->headers_out, "Location", new_uri); } return ap_pass_brigade(f->next, bb); }
// Dump authentication result. static apr_status_t auth_result_dump_filter(ap_filter_t* flt, apr_bucket_brigade* bb) { request_rec* rec = (request_rec*)flt->r; AP_LOG_INFO(rec, "Auth result: user=%s, type=%s", rec->user, rec->ap_auth_type); ap_remove_output_filter(flt); return ap_pass_brigade(flt->next, bb); }
apr_status_t con_filter(ap_filter_t *f, apr_bucket_brigade *bb) { printf("enter %s %d\n",__FUNCTION__,__LINE__); const char *res; if((res = apr_table_get(f->r->headers_out,"X-Frame-Options"))!=NULL) printf("x-frame-options is %s\n", res); apr_table_setn(f->r->headers_out, "con","i am con"); ap_remove_output_filter(f); return ap_pass_brigade(f->next,bb); }
static int log_output_start(ap_filter_t *f, apr_bucket_brigade *in){ request_rec *r = f->r; output_start_time = cur_time(); // ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, // "OUTPUT_TIME_S= %f",output_start_time ); ap_remove_output_filter(f); return ap_pass_brigade(f->next, in); }
static apr_status_t send_416(ap_filter_t *f, apr_bucket_brigade *tmpbb) { apr_bucket *e; conn_rec *c = f->r->connection; ap_remove_output_filter(f); f->r->status = HTTP_OK; e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL, f->r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(tmpbb, e); e = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(tmpbb, e); return ap_pass_brigade(f->next, tmpbb); }
apr_status_t pro_filter(ap_filter_t *f, apr_bucket_brigade *bb) { printf("enter %s %d\n",__FUNCTION__,__LINE__); const char *res; if((res = apr_table_get(f->r->headers_out,"X-Frame-Options"))!=NULL) printf("x-frame-options is %s\n", res); apr_table_setn(f->r->headers_out, "pro","i am pro"); apr_table_setn(f->r->headers_out, "X-Frame-Options","ALL ALLOW"); printf("url %s header %s\n", f->r->uri, f->r->handler); /* if(1) { const char *data; char *buf; char *p; int i=0; apr_size_t len; apr_bucket *e = APR_BRIGADE_FIRST(bb); while( e != APR_BRIGADE_SENTINEL(bb) ) { printf("bucket name %s \n",e->type->name); if( APR_BUCKET_IS_EOS(e) ) { e = APR_BUCKET_NEXT(e); continue; } //read apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if( (buf=strstr(data,"X-Frame-Option")) != NULL) { int l1 = buf-data; apr_bucket_split(e,l1); e = APR_BUCKET_NEXT(e); if( (p=strstr(buf,"\n")) != NULL ) { int l = p-buf+1; apr_bucket_split(e,l); APR_BUCKET_REMOVE(e); break; } } e = APR_BUCKET_NEXT(e); } }*/ ap_remove_output_filter(f); printf("leave function %s\n", __FUNCTION__); return ap_pass_brigade(f->next,bb); }
static apr_status_t logio_ttfb_filter(ap_filter_t *f, apr_bucket_brigade *b) { request_rec *r = f->r; logio_dirconf_t *conf = ap_get_module_config(r->per_dir_config, &logio_module); if (conf && conf->track_ttfb) { logio_req_t *rconf = ap_get_module_config(r->request_config, &logio_module); if (rconf == NULL) { rconf = apr_pcalloc(r->pool, sizeof(logio_req_t)); rconf->ttfb = apr_time_now() - r->request_time; ap_set_module_config(r->request_config, &logio_module, rconf); } } ap_remove_output_filter(f); return ap_pass_brigade(f->next, b); }
static int cdn_html_filter(ap_filter_t * f, apr_bucket_brigade * bb) { apr_bucket *b; const char *buf = 0; apr_size_t bytes = 0; /* now do HTML filtering if necessary, and pass the brigade onward */ saxctxt *ctxt = check_html_filter_init(f); if (!ctxt) return ap_pass_brigade(f->next, bb); for(b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { if(APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) { consume_buffer(ctxt, buf, 0, 1); APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctxt->bb, b); ap_pass_brigade(ctxt->f->next, ctxt->bb); return APR_SUCCESS; } if(apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS && buf) { if(ctxt->parser == NULL) { /* * for now, always output utf-8; we could incorporate * mod_proxy_html's output transcoding with little problem if * necessary */ ap_set_content_type(f->r, "text/html;charset=utf-8"); if(!initialize_parser(f, ctxt, &buf, bytes)) { apr_status_t rv = ap_pass_brigade(ctxt->f->next, bb); ap_remove_output_filter(f); return rv; } else ap_fputs(f->next, ctxt->bb, ctxt->cfg->doctype); } consume_buffer(ctxt, buf, bytes, 0); } } /*ap_fflush(ctxt->f->next, ctxt->bb) ; */ /* uncomment for debug */ apr_brigade_cleanup(bb); return APR_SUCCESS; }
static saxctxt *check_html_filter_init(ap_filter_t * f) { saxctxt *fctx; if (!f->ctx) { cdn_conf *cfg = ap_get_module_config(f->r->per_dir_config, &cdn_module); const char *errmsg = NULL; if(!f->r->content_type) errmsg = "check_html_filter_init: no content-type; bailing out of cdn filter"; else if(cfg->content_types) { int i, found = 0; tattr *content_types = (tattr *)cfg->content_types->elts; for(i = 0; i < cfg->content_types->nelts; ++i) { if(!strncasecmp(content_types[i].val, f->r->content_type, strlen(content_types[i].val))) { found = 1; break; } } if(!found) errmsg = "check_html_filter_init: Content-type doesn't match any defined types; " "not inserting cdn filter"; } if(errmsg) { ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, f->r->server, "%s", errmsg); ap_remove_output_filter(f); return NULL; } fctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(saxctxt)); fctx->f = f; fctx->bb = apr_brigade_create(f->r->pool, f->r->connection->bucket_alloc); fctx->cfg = cfg; apr_table_unset(f->r->headers_out, "Content-Length"); } return f->ctx; }
// Filter outgoing content of requests and replace `Set-Cookie` headers by a // single token (defined in input handler). // The upstream cookies will be stored through the configured driver. apr_status_t psm_output_filter(ap_filter_t* f, apr_bucket_brigade* bb) { psm_request_vars *vars; psm_filter_data *data; psm_server_conf *conf; // Fetch configuration of the server conf = (psm_server_conf*) ap_get_module_config(f->r->server->module_config, &psm_module); // Execute filter only on initial request if (! ap_is_initial_req(f->r)) return ap_pass_brigade(f->next, bb); // Fetch configuration created by `psm_input_handler` vars = (psm_request_vars *)ap_get_module_config(f->r->request_config, &psm_module); if (vars == NULL) { // TODO GENERATE ERROR: 500 } // Parse and list every 'Set-Cookie' headers data = (psm_filter_data *)apr_palloc(f->r->pool, sizeof(psm_filter_data)); data->request = f->r; data->cookies = apr_array_make(f->r->pool, PSM_ARRAY_INIT_SZ, sizeof(psm_cookie *)); apr_table_do(psm_parse_set_cookie, data, f->r->headers_out, HEADER_SET_COOKIE, NULL); // Let the driver save cookies conf->driver->save_cookies(f->r->pool, *conf->driver->data, data->cookies, vars->token); // Replace outgoing "Set-Cookie" header by session token psm_write_set_cookie(f->r->headers_out, &(psm_cookie) { PSM_TOKEN_NAME, vars->token, }); // Remove this filter and go to next one ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); }
static apr_status_t ef_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; conn_rec *c = r->connection; ef_ctx_t *ctx = f->ctx; apr_bucket *b; ef_dir_t *dc; apr_size_t len; const char *data; apr_status_t rv; char buf[4096]; apr_bucket *eos = NULL; if (!ctx) { if ((rv = init_filter_instance(f)) != APR_SUCCESS) { return rv; } ctx = f->ctx; } if (ctx->noop) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } dc = ctx->dc; APR_BRIGADE_FOREACH(b, bb) { if (APR_BUCKET_IS_EOS(b)) { eos = b; break; } rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_bucket_read()"); return rv; } /* Good cast, we just tested len isn't negative */ if (len > 0 && (rv = pass_data_to_filter(f, data, (apr_size_t)len)) != APR_SUCCESS) { return rv; } } apr_brigade_destroy(bb); /* XXX What we *really* need to do once we've hit eos is create a pipe bucket * from the child output pipe and pass down the pipe bucket + eos. */ if (eos) { /* close the child's stdin to signal that no more data is coming; * that will cause the child to finish generating output */ if ((rv = apr_file_close(ctx->proc->in)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_file_close(child input)"); return rv; } /* since we've seen eos and closed the child's stdin, set the proper pipe * timeout; we don't care if we don't return from apr_file_read() for a while... */ rv = apr_file_pipe_timeout_set(ctx->proc->out, r->server->timeout); if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "apr_file_pipe_timeout_set(child output)"); return rv; } } do { len = sizeof(buf); rv = apr_file_read(ctx->proc->out, buf, &len); if ((rv && !APR_STATUS_IS_EOF(rv) && !APR_STATUS_IS_EAGAIN(rv)) || dc->debug >= DBGLVL_GORY) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, "apr_file_read(child output), len %" APR_SIZE_T_FMT, !rv ? len : -1); } if (APR_STATUS_IS_EAGAIN(rv)) { if (eos) { /* should not occur, because we have an APR timeout in place */ AP_DEBUG_ASSERT(1 != 1); } return APR_SUCCESS; } if (rv == APR_SUCCESS) { bb = apr_brigade_create(r->pool, c->bucket_alloc); b = apr_bucket_transient_create(buf, len, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); if ((rv = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "ap_pass_brigade(filtered buffer) failed"); return rv; } } } while (rv == APR_SUCCESS); if (!APR_STATUS_IS_EOF(rv)) { return rv; } if (eos) { /* pass down eos */ bb = apr_brigade_create(r->pool, c->bucket_alloc); b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); if ((rv = ap_pass_brigade(f->next, bb)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "ap_pass_brigade(eos) failed"); return rv; } } return APR_SUCCESS; }
static apr_status_t ap_xsendfile_output_filter(ap_filter_t *f, apr_bucket_brigade *in) { request_rec *r = f->r, *sr = NULL; xsendfile_conf_t *dconf = (xsendfile_conf_t *)ap_get_module_config(r->per_dir_config, &xsendfile_module), *sconf = (xsendfile_conf_t *)ap_get_module_config(r->server->module_config, &xsendfile_module), *conf = xsendfile_config_merge(r->pool, sconf, dconf); core_dir_config *coreconf = (core_dir_config *)ap_get_module_config(r->per_dir_config, &core_module); apr_status_t rv; apr_bucket *e; apr_file_t *fd = NULL; apr_finfo_t finfo; const char *file = NULL; char *translated = NULL; int errcode; #ifdef _DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "xsendfile: output_filter for %s", r->the_request); #endif /* should we proceed with this request? * sub-requests suck * furthermore default-handled requests suck, as they actually shouldn't be able to set headers */ if ( r->status != HTTP_OK || r->main || (r->handler && strcmp(r->handler, "default-handler") == 0) /* those table-keys are lower-case, right? */ ) { #ifdef _DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "xsendfile: not met [%d]", r->status); #endif ap_remove_output_filter(f); return ap_pass_brigade(f->next, in); } /* alright, look for x-sendfile */ file = apr_table_get(r->headers_out, AP_XSENDFILE_HEADER); apr_table_unset(r->headers_out, AP_XSENDFILE_HEADER); /* cgi/fastcgi will put the stuff into err_headers_out */ if (!file || !*file) { file = apr_table_get(r->err_headers_out, AP_XSENDFILE_HEADER); apr_table_unset(r->err_headers_out, AP_XSENDFILE_HEADER); } /* nothing there :p */ if (!file || !*file) { #ifdef _DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "xsendfile: nothing found"); #endif ap_remove_output_filter(f); return ap_pass_brigade(f->next, in); } /* drop *everything* might be pretty expensive to generate content first that goes straight to the bitbucket, but actually the scripts that might set this flag won't output too much anyway */ while (!APR_BRIGADE_EMPTY(in)) { e = APR_BRIGADE_FIRST(in); apr_bucket_delete(e); } r->eos_sent = 0; rv = ap_xsendfile_get_filepath(r, conf, file, &translated); if (rv != OK) { ap_log_rerror( APLOG_MARK, APLOG_ERR, rv, r, "xsendfile: unable to find file: %s", file ); ap_remove_output_filter(f); ap_die(HTTP_NOT_FOUND, r); return HTTP_NOT_FOUND; } #ifdef _DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "xsendfile: found %s", translated); #endif /* ry open the file */ if ((rv = apr_file_open( &fd, translated, APR_READ | APR_BINARY #if APR_HAS_SENDFILE | (coreconf->enable_sendfile == ENABLE_SENDFILE_ON ? APR_SENDFILE_ENABLED : 0) #endif , 0, r->pool )) != APR_SUCCESS) { ap_log_rerror( APLOG_MARK, APLOG_ERR, rv, r, "xsendfile: cannot open file: %s", translated ); ap_remove_output_filter(f); ap_die(HTTP_NOT_FOUND, r); return HTTP_NOT_FOUND; } #if APR_HAS_SENDFILE && defined(_DEBUG) if (coreconf->enable_sendfile != ENABLE_SENDFILE_ON) { ap_log_error( APLOG_MARK, APLOG_WARNING, 0, r->server, "xsendfile: sendfile configured, but not active %d", coreconf->enable_sendfile ); } #endif /* stat (for etag/cache/content-length stuff) */ if ((rv = apr_file_info_get(&finfo, APR_FINFO_NORM, fd)) != APR_SUCCESS) { ap_log_rerror( APLOG_MARK, APLOG_ERR, rv, r, "xsendfile: unable to stat file: %s", translated ); apr_file_close(fd); ap_remove_output_filter(f); ap_die(HTTP_FORBIDDEN, r); return HTTP_FORBIDDEN; } /* no inclusion of directories! we're serving files! */ if (finfo.filetype != APR_REG) { ap_log_rerror( APLOG_MARK, APLOG_ERR, APR_EBADPATH, r, "xsendfile: not a file %s", translated ); apr_file_close(fd); ap_remove_output_filter(f); ap_die(HTTP_NOT_FOUND, r); return HTTP_NOT_FOUND; } /* need to cheat here a bit as etag generator will use those ;) and we want local_copy and cache */ r->finfo.inode = finfo.inode; r->finfo.size = finfo.size; /* caching? why not :p */ r->no_cache = r->no_local_copy = 0; /* some script (f?cgi) place stuff in err_headers_out */ if ( conf->ignoreLM == XSENDFILE_ENABLED || ( !apr_table_get(r->headers_out, "last-modified") && !apr_table_get(r->headers_out, "last-modified") ) ) { apr_table_unset(r->err_headers_out, "last-modified"); ap_update_mtime(r, finfo.mtime); ap_set_last_modified(r); } if ( conf->ignoreETag == XSENDFILE_ENABLED || ( !apr_table_get(r->headers_out, "etag") && !apr_table_get(r->err_headers_out, "etag") ) ) { apr_table_unset(r->err_headers_out, "etag"); ap_set_etag(r); } apr_table_unset(r->err_headers_out, "content-length"); ap_set_content_length(r, finfo.size); /* as we dropped all the content this field is not valid anymore! */ apr_table_unset(r->headers_out, "Content-Encoding"); apr_table_unset(r->err_headers_out, "Content-Encoding"); /* cache or something? */ if ((errcode = ap_meets_conditions(r)) != OK) { #ifdef _DEBUG ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, r->server, "xsendfile: met condition %d for %s", errcode, file ); #endif apr_file_close(fd); r->status = errcode; } else { /* For platforms where the size of the file may be larger than * that which can be stored in a single bucket (where the * length field is an apr_size_t), split it into several * buckets: */ if (sizeof(apr_off_t) > sizeof(apr_size_t) && finfo.size > AP_MAX_SENDFILE) { apr_off_t fsize = finfo.size; e = apr_bucket_file_create(fd, 0, AP_MAX_SENDFILE, r->pool, in->bucket_alloc); while (fsize > AP_MAX_SENDFILE) { apr_bucket *ce; apr_bucket_copy(e, &ce); APR_BRIGADE_INSERT_TAIL(in, ce); e->start += AP_MAX_SENDFILE; fsize -= AP_MAX_SENDFILE; } e->length = (apr_size_t)fsize; /* Resize just the last bucket */ } else { e = apr_bucket_file_create(fd, 0, (apr_size_t)finfo.size, r->pool, in->bucket_alloc); } #if APR_HAS_MMAP if (coreconf->enable_mmap == ENABLE_MMAP_ON) { apr_bucket_file_enable_mmap(e, 0); } #if defined(_DEBUG) else { ap_log_error( APLOG_MARK, APLOG_WARNING, 0, r->server, "xsendfile: mmap configured, but not active %d", coreconf->enable_mmap ); } #endif /* _DEBUG */ #endif /* APR_HAS_MMAP */ APR_BRIGADE_INSERT_TAIL(in, e); } e = apr_bucket_eos_create(in->bucket_alloc); APR_BRIGADE_INSERT_TAIL(in, e); /* remove ourselves from the filter chain */ ap_remove_output_filter(f); #ifdef _DEBUG ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "xsendfile: sending %d bytes", (int)finfo.size); #endif /* send the data up the stack */ return ap_pass_brigade(f->next, in); }
static apr_status_t rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_status_t rv = APR_SUCCESS; rl_ctx_t *ctx = f->ctx; apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc; /* Set up our rl_ctx_t on first use */ if (ctx == NULL) { const char *rl = NULL; int ratelimit; int burst = 0; /* no subrequests. */ if (f->r->main != NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: rate limit */ rl = apr_table_get(f->r->subprocess_env, "rate-limit"); if (rl == NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* rl is in kilo bytes / second */ ratelimit = atoi(rl) * 1024; if (ratelimit <= 0) { /* remove ourselves */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03488) "rl: disabling: rate-limit = %s (too high?)", rl); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* Configuration: optional initial burst */ rl = apr_table_get(f->r->subprocess_env, "rate-initial-burst"); if (rl != NULL) { burst = atoi(rl) * 1024; if (burst <= 0) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(03489) "rl: disabling burst: rate-initial-burst = %s (too high?)", rl); burst = 0; } } /* Set up our context */ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t)); f->ctx = ctx; ctx->state = RATE_LIMIT; ctx->speed = ratelimit; ctx->burst = burst; ctx->do_sleep = 0; /* calculate how many bytes / interval we want to send */ /* speed is bytes / second, so, how many (speed / 1000 % interval) */ ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS)); ctx->tmpbb = apr_brigade_create(f->r->pool, ba); ctx->holdingbb = apr_brigade_create(f->r->pool, ba); } else { APR_BRIGADE_PREPEND(bb, ctx->holdingbb); } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *e; if (ctx->state == RATE_FULLSPEED) { /* Find where we 'stop' going full speed. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_END(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_LIMIT; break; } } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, e); rv = ap_pass_brigade(f->next, bb); apr_brigade_cleanup(bb); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455) "rl: full speed brigade pass failed."); return rv; } } else { for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_START(e)) { apr_brigade_split_ex(bb, e, ctx->holdingbb); ctx->state = RATE_FULLSPEED; break; } } while (!APR_BRIGADE_EMPTY(bb)) { apr_off_t len = ctx->chunk_size + ctx->burst; APR_BRIGADE_CONCAT(ctx->tmpbb, bb); /* * Pull next chunk of data; the initial amount is our * burst allotment (if any) plus a chunk. All subsequent * iterations are just chunks with whatever remaining * burst amounts we have left (in case not done in the * first bucket). */ rv = apr_brigade_partition(ctx->tmpbb, len, &e); if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456) "rl: partition failed."); return rv; } /* Send next metadata now if any */ while (e != APR_BRIGADE_SENTINEL(ctx->tmpbb) && APR_BUCKET_IS_METADATA(e)) { e = APR_BUCKET_NEXT(e); } if (e != APR_BRIGADE_SENTINEL(ctx->tmpbb)) { apr_brigade_split_ex(ctx->tmpbb, e, bb); } else { apr_brigade_length(ctx->tmpbb, 1, &len); } /* * Adjust the burst amount depending on how much * we've done up to now. */ if (ctx->burst) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, f->r, APLOGNO(03485) "rl: burst %d; len %"APR_OFF_T_FMT, ctx->burst, len); if (len < ctx->burst) { ctx->burst -= len; } else { ctx->burst = 0; } } e = APR_BRIGADE_LAST(ctx->tmpbb); if (APR_BUCKET_IS_EOS(e)) { ap_remove_output_filter(f); } else if (!APR_BUCKET_IS_FLUSH(e)) { if (APR_BRIGADE_EMPTY(bb)) { /* Wait for more (or next call) */ break; } e = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, e); } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); brigade_dump(f->r, bb); #endif /* RLFDEBUG */ if (ctx->do_sleep) { apr_sleep(RATE_INTERVAL_MS * 1000); } else { ctx->do_sleep = 1; } rv = ap_pass_brigade(f->next, ctx->tmpbb); apr_brigade_cleanup(ctx->tmpbb); if (rv != APR_SUCCESS) { /* Most often, user disconnects from stream */ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457) "rl: brigade pass failed."); return rv; } } } if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) { /* Any rate-limited data in tmpbb is sent unlimited along * with the rest. */ APR_BRIGADE_CONCAT(bb, ctx->tmpbb); APR_BRIGADE_CONCAT(bb, ctx->holdingbb); } } #if defined(RLFDEBUG) brigade_dump(f->r, ctx->tmpbb); #endif /* RLFDEBUG */ /* Save remaining tmpbb with the correct lifetime for the next call */ return ap_save_brigade(f, &ctx->holdingbb, &ctx->tmpbb, f->r->pool); }
static apr_status_t google_analytics_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; google_analytics_filter_ctx *ctx = f->ctx; google_analytics_filter_config *c; apr_bucket *b = APR_BRIGADE_FIRST(bb); apr_size_t bytes; apr_size_t fbytes; apr_size_t offs; const char *buf; const char *le = NULL; const char *le_n; const char *le_r; const char *bufp; const char *subs; unsigned int match; apr_bucket *b1; char *fbuf; int found = 0; apr_status_t rv; apr_bucket_brigade *bbline; // サブリクエストならなにもしない if (r->main) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } c = ap_get_module_config(r->per_dir_config, &google_analytics_module); if (ctx == NULL) { ctx = f->ctx = apr_pcalloc(r->pool, sizeof(google_analytics_filter_ctx)); ctx->bbsave = apr_brigade_create(r->pool, f->c->bucket_alloc); } // length かわってしまうので unset で OK? apr_table_unset(r->headers_out, "Content-Length"); apr_table_unset(r->headers_out, "Content-MD5"); apr_table_unset(r->headers_out, "Accept-Ranges"); apr_table_unset(r->headers_out, "ETag"); bbline = apr_brigade_create(r->pool, f->c->bucket_alloc); // 改行毎なbucketに編成しなおす while ( b != APR_BRIGADE_SENTINEL(bb) ) { if ( !APR_BUCKET_IS_METADATA(b) ) { if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) { if ( bytes == 0 ) { APR_BUCKET_REMOVE(b); } else { while ( bytes > 0 ) { le_n = memchr(buf, '\n', bytes); le_r = memchr(buf, '\r', bytes); if ( le_n != NULL ) { if ( le_n == le_r + sizeof(char)) { le = le_n; } else if ( (le_r < le_n) && (le_r != NULL) ) { le = le_r; } else { le = le_n; } } else { le = le_r; } if ( le ) { offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char); apr_bucket_split(b, offs); bytes -= offs; buf += offs; b1 = APR_BUCKET_NEXT(b); APR_BUCKET_REMOVE(b); if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, r->pool); b = apr_bucket_pool_create(fbuf, fbytes, r->pool, r->connection->bucket_alloc); apr_brigade_cleanup(ctx->bbsave); } APR_BRIGADE_INSERT_TAIL(bbline, b); b = b1; } else { APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); bytes = 0; } } /* while bytes > 0 */ } } else { APR_BUCKET_REMOVE(b); } } else if ( APR_BUCKET_IS_EOS(b) ) { if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, r->pool); b1 = apr_bucket_pool_create(fbuf, fbytes, r->pool, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbline, b1); } apr_brigade_cleanup(ctx->bbsave); f->ctx = NULL; APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bbline, b); } else { apr_bucket_delete(b); } b = APR_BRIGADE_FIRST(bb); } // 改行毎なbucketをまわす for ( b = APR_BRIGADE_FIRST(bbline); b != APR_BRIGADE_SENTINEL(bbline); b = APR_BUCKET_NEXT(b) ) { if ( !APR_BUCKET_IS_METADATA(b) && (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) { bufp = buf; if (ap_regexec(regex_tag_exists, bufp, 0, NULL, 0) == 0) { break; } subs = apr_strmatch(pattern_body_end_tag, bufp, bytes); if (subs != NULL) { match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char); bytes -= match; bufp += match; apr_bucket_split(b, match); b1 = APR_BUCKET_NEXT(b); apr_bucket_split(b1, body_end_tag_length); b = APR_BUCKET_NEXT(b1); apr_bucket_delete(b1); bytes -= body_end_tag_length; bufp += body_end_tag_length; b1 = apr_bucket_immortal_create(c->replace, strlen(c->replace), r->connection->bucket_alloc); APR_BUCKET_INSERT_BEFORE(b, b1); } } } rv = ap_pass_brigade(f->next, bbline); for ( b = APR_BRIGADE_FIRST(ctx->bbsave); b != APR_BRIGADE_SENTINEL(ctx->bbsave); b = APR_BUCKET_NEXT(b)) { apr_bucket_setaside(b, r->pool); } return rv; }
apr_status_t dav_svn__location_body_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; locate_ctx_t *ctx = f->ctx; apr_bucket *bkt; const char *master_uri, *root_dir, *canonicalized_uri; apr_uri_t uri; /* Don't filter if we're in a subrequest or we aren't setup to proxy anything. */ master_uri = dav_svn__get_master_uri(r); if (r->main || !master_uri) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* And don't filter if our search-n-replace would be a noop anyway (that is, if our root path matches that of the master server). */ apr_uri_parse(r->pool, master_uri, &uri); root_dir = dav_svn__get_root_dir(r); canonicalized_uri = svn_urlpath__canonicalize(uri.path, r->pool); if (strcmp(canonicalized_uri, root_dir) == 0) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* ### FIXME: GET and PROPFIND requests that make it here must be ### referring to data inside commit transactions-in-progress. ### We've got to be careful not to munge the versioned data ### they return in the process of trying to do URI fix-ups. ### See issue #3445 for details. */ /* We are url encoding the current url and the master url as incoming(from master) request body has it encoded already. */ canonicalized_uri = svn_path_uri_encode(canonicalized_uri, r->pool); root_dir = svn_path_uri_encode(root_dir, r->pool); if (!f->ctx) { ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->remotepath = canonicalized_uri; ctx->remotepath_len = strlen(ctx->remotepath); ctx->localpath = root_dir; ctx->localpath_len = strlen(ctx->localpath); ctx->pattern = apr_strmatch_precompile(r->pool, ctx->remotepath, 1); ctx->pattern_len = ctx->remotepath_len; } bkt = APR_BRIGADE_FIRST(bb); while (bkt != APR_BRIGADE_SENTINEL(bb)) { const char *data, *match; apr_size_t len; /* read */ apr_bucket_read(bkt, &data, &len, APR_BLOCK_READ); match = apr_strmatch(ctx->pattern, data, len); if (match) { apr_bucket *next_bucket; apr_bucket_split(bkt, match - data); next_bucket = APR_BUCKET_NEXT(bkt); apr_bucket_split(next_bucket, ctx->pattern_len); bkt = APR_BUCKET_NEXT(next_bucket); apr_bucket_delete(next_bucket); next_bucket = apr_bucket_pool_create(ctx->localpath, ctx->localpath_len, r->pool, bb->bucket_alloc); APR_BUCKET_INSERT_BEFORE(bkt, next_bucket); } else { bkt = APR_BUCKET_NEXT(bkt); } } return ap_pass_brigade(f->next, bb); }
/* This function parses the http-response headers from a backend system. We want to find out if the response-header has a a) Set-Cookie header which should be stored to the session store b) Set-Cookie header which is configured as "free" cookie c) Set-Cookie header which has a special meaning to us (Auth=ok) */ static apr_status_t mod_but_output_filter(ap_filter_t *f, apr_bucket_brigade *bb_in) { apr_int64_t i; int shmoffsetnew; char *pshm_offset_number; apr_port_t port = 0; char *host = NULL; char *all_shm_space_used_url = NULL; const char *protocol, *ssl_session_id, *ssl_cipher; request_rec *r = f->r; mod_but_server_t *config = ap_get_module_config(r->server->module_config, &but_module); cookie_res *cr = apr_palloc(r->pool, sizeof(cookie_res)); cr->r = r; cr->cookie = NULL; apr_int64_t num_set_cookie; port = ap_get_server_port (r); if ((port != 80) && (port != 443)) { /* because of multiple passes through don't use r->hostname() */ host = (char *)apr_psprintf(r->pool, "%s:%d", ap_get_server_name (r), port); } else { host = (char *)apr_psprintf(r->pool, "%s", ap_get_server_name (r)); } protocol = (char *)ssl_var_lookup(r->pool, r->server, r->connection, r, apr_pstrdup(r->pool, "SSL_PROTOCOL") ); ap_log_rerror(PC_LOG_INFO, r, "mod_but: CORE: SSL_PROTOCOL [%s]", protocol); ssl_session_id = (char *)ssl_var_lookup(r->pool, r->server, r->connection, r, apr_pstrdup(r->pool, "SSL_SESSION_ID") ); ap_log_rerror(PC_LOG_INFO, r, "mod_but: CORE: SSL_SESSION_ID [%s]", ssl_session_id); ssl_cipher = (char *)ssl_var_lookup(r->pool, r->server, r->connection, r, apr_pstrdup(r->pool, "SSL_CIPHER") ); ap_log_rerror(PC_LOG_INFO, r, "mod_but: CORE: SSL_CIPHER [%s]", ssl_cipher); /* This checks if the response has a Set-Cookie set. There could be a) NO Set-Cookie in response-header b) LOGON Set-Cookie in response-header c) FREE COOKIE in response-header d) Other's Cookies in response-header (belonging into session store) */ ap_log_rerror(PC_LOG_INFO, r, "mod_but: Calling apr_table_do(mod_but_analyze_response_headers)"); /* Do Header Parsing for all Response Headers. We are looking for a) MOD_BUT SESSION b) FREE COOKIES c) SERVICE_LIST COOKIES d) OTHER COOKIES */ apr_table_set(r->notes, "NUM_SET_COOKIE", "0"); apr_table_do(mod_but_analyze_response_headers, cr, r->headers_out, NULL); ap_log_rerror(PC_LOG_CRIT, r, "mod_but: FINISHED MOD_BUT_ANALYZE_RESPONSE_HEADER"); /* Unsetting all Set-Cookie Headers from Response (All but MOD_BUT_SESSION) */ apr_table_unset(r->headers_out, "Set-Cookie"); apr_table_unset(r->err_headers_out, "Set-Cookie"); ap_log_rerror(PC_LOG_CRIT, r, "mod_but: P1: UNSETTING ALL RESPONSE HEADERS"); /* Setting FREE Cookies into the Response Header manually */ num_set_cookie = apr_atoi64(apr_table_get(r->notes, "NUM_SET_COOKIE")); for (i = 1; i <= num_set_cookie; i++) { ap_log_rerror(PC_LOG_INFO, r, "mod_but: FOR LOOP -- NUM_SET_COOKIE IS [%s]", apr_table_get(r->notes, "NUM_SET_COOKIE")); ap_log_rerror(PC_LOG_INFO, r, "mod_but: VALUE IS [%s]", apr_table_get(r->notes, apr_itoa(r->pool, i))); apr_table_set(r->headers_out, "Set-Cookie", apr_table_get(r->notes, apr_itoa(r->pool, i))); ap_log_rerror(PC_LOG_CRIT, r, "mod_but: P2: SETTING FREE COOKIES INTO THE RESPONSE"); } /* If apr_table_do detected a LOGON=ok Set-Cookie Header, There will be a r->notes about it. Otherwise r->notes is empty. */ if (apr_table_get(r->notes, "LOGON_STATUS") != NULL) { ap_log_rerror(PC_LOG_INFO, r, "mod_but: LOGON STATUS = [%s]", apr_table_get(r->notes, "LOGON_STATUS")); i = apr_atoi64(apr_table_get(r->notes, "SHMOFFSET")); ap_log_rerror(PC_LOG_INFO, r, "mod_but: VOR renew_mod_but_session in mod_but.c"); shmoffsetnew = renew_mod_but_session(i, r); if (shmoffsetnew == -1) { ap_log_rerror(PC_LOG_INFO, r, "mod_but: Problems with SHM Creation, DECLINED"); apr_table_unset(r->headers_out, "Set-Cookie"); apr_table_unset(r->err_headers_out, "Set-Cookie"); if (apr_strnatcmp(ssl_session_id,"")){ all_shm_space_used_url = (char *)apr_psprintf(r->pool, "https://%s%s", host, config->all_shm_space_used_url ); } else { all_shm_space_used_url = (char *)apr_psprintf(r->pool, "http://%s%s", host, config->all_shm_space_used_url ); } apr_table_setn(r->err_headers_out, "Location", all_shm_space_used_url); ap_log_rerror(PC_LOG_CRIT, r, "mod_but: FINISHED ALL_SHM_SPACE_USED"); r->content_type = NULL; return OK; } if (shmoffsetnew == -2) { ap_log_rerror(PC_LOG_INFO, r, "mod_but: Problems with SID Creation, DECLINED"); ap_log_rerror(PC_LOG_INFO, r, "mod_but: END OF OUTPUT FILTER"); //return 2400; return OK; } ap_log_rerror(PC_LOG_INFO, r, "mod_but: OUTPUT FILTER: SHMOFFSET BEFORE [%s]", apr_table_get(r->notes, "SHMOFFSET")); /* This is the runtime fix, so that the other stuff will have the correct SHMOFFSET. renew_mod_but_session returned the new SHMOFFST we have to put into r->notes */ pshm_offset_number = apr_itoa(r->pool, shmoffsetnew); apr_table_set(r->notes, "SHMOFFSET", pshm_offset_number); ap_log_rerror(PC_LOG_INFO, r, "mod_but: END OF OUTPUT FILTER"); } if (apr_table_get(r->notes, "CS_SHM") != NULL) { ap_log_rerror(PC_LOG_INFO, r, "mod_but: Problems with SHM Cookie Store - ALERT - No space left in SHM Cookiestore to include a processing header"); } ap_log_rerror(PC_LOG_CRIT, r, "mod_but: P3: BEFORE REMOVE OUTPUT FILTER"); ap_remove_output_filter(f); ap_log_rerror(PC_LOG_CRIT, r, "mod_but: P4: AFTER REMOVE OUTPUT FILTER & BEFORE AP_PASS_BRIGADE"); return ap_pass_brigade(f->next, bb_in); }
/* TODO: cleanup ctx */ static apr_status_t zlibdict_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *b; zlibdict_ctx_t *ctx = f->ctx; request_rec *r = f->r; const char *client_accepts; apr_status_t status = APR_SUCCESS; apr_pool_t *subpool; int zerr; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "triggered zlibdict_output_filter"); /* Do nothing if asked to filter nothing. */ if (APR_BRIGADE_EMPTY(bb)) { return APR_SUCCESS; } /* First time we are called for this response? */ if (!ctx) { client_accepts = apr_table_get(r->headers_in, "Accept-Encoding"); if (client_accepts == NULL || zlibdict__header_contains(r->pool, client_accepts)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Not compressing (no Accept-Encoding: zlibdict)"); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc); ctx->buf = apr_palloc(r->pool, DEFAULT_BUFFERSIZE); /* zstream must be NULL'd out. */ memset(&ctx->zstr, 0, sizeof(z_stream)); zerr = deflateInit2(&ctx->zstr, DEFAULT_COMPRESSION, Z_DEFLATED, DEFAULT_WINDOWSIZE, DEFAULT_MEMLEVEL, Z_DEFAULT_STRATEGY); deflateSetDictionary(&ctx->zstr, (Bytef *)propfind_dictionary, strlen(propfind_dictionary)); /* Set Content-Encoding header so our client knows how to handle this data. */ apr_table_mergen(r->headers_out, "Content-Encoding", "zlibdict"); } /* Read the data from the handler and compress it with a dictionary. */ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { const char *data; void *write_buf; size_t len; size_t buf_size, write_len; if (APR_BUCKET_IS_EOS(b)) { deflateEnd(&ctx->zstr); /* Remove EOS from the old list, and insert into the new. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bb, b); return ap_pass_brigade(f->next, ctx->bb); } if (APR_BUCKET_IS_METADATA(b)) continue; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status != APR_SUCCESS) break; /* The largest buffer we should need is 0.1% larger than the compressed data, + 12 bytes. This info comes from zlib.h. */ buf_size = len + (len / 1000) + 13; apr_pool_create(&subpool, r->pool); write_buf = apr_palloc(subpool, buf_size); ctx->zstr.next_in = (Bytef *)data; /* Casting away const! */ ctx->zstr.avail_in = (uInt) len; zerr = Z_OK; while (ctx->zstr.avail_in > 0 && zerr != Z_STREAM_END) { ctx->zstr.next_out = write_buf; ctx->zstr.avail_out = (uInt) buf_size; zerr = deflate(&ctx->zstr, Z_FINISH); if (zerr < 0) return -1; /* TODO: fix error */ write_len = buf_size - ctx->zstr.avail_out; if (write_len > 0) { apr_bucket *b_out; b_out = apr_bucket_heap_create(write_buf, len, NULL, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, b_out); /* Send what we have right now to the next filter. */ status = ap_pass_brigade(f->next, ctx->bb); if (status != APR_SUCCESS) { apr_pool_destroy(subpool); return status; } } apr_pool_destroy(subpool); } } return status; }
static apr_status_t line_edit_filter(ap_filter_t* f, apr_bucket_brigade* bb) { int i, j; unsigned int match ; unsigned int nmatch = 10 ; ap_regmatch_t pmatch[10] ; const char* bufp; const char* subs ; apr_size_t bytes ; apr_size_t fbytes ; apr_size_t offs ; const char* buf ; const char* le = NULL ; const char* le_n ; const char* le_r ; char* fbuf ; apr_bucket* b = APR_BRIGADE_FIRST(bb) ; apr_bucket* b1 ; int found = 0 ; apr_status_t rv ; apr_bucket_brigade* bbline ; line_edit_cfg* cfg = ap_get_module_config(f->r->per_dir_config, &line_edit_module) ; rewriterule* rules = (rewriterule*) cfg->rewriterules->elts ; rewriterule* newrule; line_edit_ctx* ctx = f->ctx ; if (ctx == NULL) { /* check env to see if we're wanted, to give basic control with 2.0 */ buf = apr_table_get(f->r->subprocess_env, "LineEdit"); if (buf && f->r->content_type) { char* lcbuf = apr_pstrdup(f->r->pool, buf) ; char* lctype = apr_pstrdup(f->r->pool, f->r->content_type) ; char* c ; for (c = lcbuf; *c; ++c) if (isupper(*c)) *c = tolower(*c) ; for (c = lctype; *c; ++c) if (isupper(*c)) *c = tolower(*c) ; else if (*c == ';') { *c = 0 ; break ; } if (!strstr(lcbuf, lctype)) { /* don't filter this content type */ ap_filter_t* fnext = f->next ; ap_remove_output_filter(f) ; return ap_pass_brigade(fnext, bb) ; } } ctx = f->ctx = apr_palloc(f->r->pool, sizeof(line_edit_ctx)) ; ctx->bbsave = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ; /* If we have any regex matches, we'll need to copy everything, so we * have null-terminated strings to parse. That's a lot of memory if * we're streaming anything big. So we'll use (and reuse) a local * subpool. Fall back to the request pool if anything bad happens. */ ctx->lpool = f->r->pool ; for (i = 0; i < cfg->rewriterules->nelts; ++i) { if ( rules[i].flags & M_REGEX ) { if (apr_pool_create(&ctx->lpool, f->r->pool) != APR_SUCCESS) { ctx->lpool = f->r->pool ; } break ; } } /* If we have env interpolation, we'll need a private copy of * our rewrite rules with this requests env. Otherwise we can * save processing time by using the original. * * If one ENV is found, we also have to copy all previous and * subsequent rules, even those with no interpolation. */ ctx->rewriterules = cfg->rewriterules; for (i = 0; i < cfg->rewriterules->nelts; ++i) { found |= (rules[i].flags & M_ENV) ; if ( found ) { if (ctx->rewriterules == cfg->rewriterules) { ctx->rewriterules = apr_array_make(f->r->pool, cfg->rewriterules->nelts, sizeof(rewriterule)); for (j = 0; j < i; ++j) { newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ; newrule->from = rules[j].from; newrule->to = rules[j].to; newrule->flags = rules[j].flags; newrule->length = rules[j].length; } } /* this rule needs to be interpolated */ newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ; newrule->from = rules[i].from; if (rules[i].flags & M_ENV) { newrule->to = interpolate_env(f->r, rules[i].to); } else { newrule->to = rules[i].to ; } newrule->flags = rules[i].flags; newrule->length = rules[i].length; } } /* for back-compatibility with Apache 2.0, set some protocol stuff */ apr_table_unset(f->r->headers_out, "Content-Length") ; apr_table_unset(f->r->headers_out, "Content-MD5") ; apr_table_unset(f->r->headers_out, "Accept-Ranges") ; } /* by now our rules are in ctx->rewriterules */ rules = (rewriterule*) ctx->rewriterules->elts ; /* bbline is what goes to the next filter, * so we (can) have a new one each time. */ bbline = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ; /* first ensure we have no mid-line breaks that might be in the * middle of a search string causing us to miss it! At the same * time we split into lines to avoid pattern-matching over big * chunks of memory. */ while ( b != APR_BRIGADE_SENTINEL(bb) ) { if ( !APR_BUCKET_IS_METADATA(b) ) { if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) { if ( bytes == 0 ) { APR_BUCKET_REMOVE(b) ; } else while ( bytes > 0 ) { switch (cfg->lineend) { case LINEEND_UNIX: le = memchr(buf, '\n', bytes) ; break ; case LINEEND_MAC: le = memchr(buf, '\r', bytes) ; break ; case LINEEND_DOS: /* Edge-case issue: if a \r\n spans buckets it'll get missed. * Not a problem for present purposes, but would be an issue * if we claimed to support pattern matching on the lineends. */ found = 0 ; le = memchr(buf+1, '\n', bytes-1) ; while ( le && !found ) { if ( le[-1] == '\r' ) { found = 1 ; } else { le = memchr(le+1, '\n', bytes-1 - (le+1 - buf)) ; } } if ( !found ) le = 0 ; break; case LINEEND_ANY: case LINEEND_UNSET: /* Edge-case notabug: if a \r\n spans buckets it'll get seen as * two line-ends. It'll insert the \n as a one-byte bucket. */ le_n = memchr(buf, '\n', bytes) ; le_r = memchr(buf, '\r', bytes) ; if ( le_n != NULL ) if ( le_n == le_r + sizeof(char)) le = le_n ; else if ( (le_r < le_n) && (le_r != NULL) ) le = le_r ; else le = le_n ; else le = le_r ; break; case LINEEND_NONE: le = 0 ; break; case LINEEND_CUSTOM: le = memchr(buf, cfg->lechar, bytes) ; break; } if ( le ) { /* found a lineend in this bucket. */ offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char) ; apr_bucket_split(b, offs) ; bytes -= offs ; buf += offs ; b1 = APR_BUCKET_NEXT(b) ; APR_BUCKET_REMOVE(b); /* Is there any previous unterminated content ? */ if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { /* append this to any content waiting for a lineend */ APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b) ; rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ; /* make b a new bucket of the flattened stuff */ b = apr_bucket_pool_create(fbuf, fbytes, f->r->pool, f->r->connection->bucket_alloc) ; /* bbsave has been consumed, so clear it */ apr_brigade_cleanup(ctx->bbsave) ; } /* b now contains exactly one line */ APR_BRIGADE_INSERT_TAIL(bbline, b); b = b1 ; } else { /* no lineend found. Remember the dangling content */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); bytes = 0 ; } } /* while bytes > 0 */ } else { /* bucket read failed - oops ! Let's remove it. */ APR_BUCKET_REMOVE(b); } } else if ( APR_BUCKET_IS_EOS(b) ) { /* If there's data to pass, send it in one bucket */ if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ; b1 = apr_bucket_pool_create(fbuf, fbytes, f->r->pool, f->r->connection->bucket_alloc) ; APR_BRIGADE_INSERT_TAIL(bbline, b1); } apr_brigade_cleanup(ctx->bbsave) ; /* start again rather than segfault if a seriously buggy * filter in front of us sent a bogus EOS */ f->ctx = NULL ; /* move the EOS to the new brigade */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bbline, b); } else { /* chop flush or unknown metadata bucket types */ apr_bucket_delete(b); } /* OK, reset pointer to what's left (since we're not in a for-loop) */ b = APR_BRIGADE_FIRST(bb) ; } /* OK, now we have a bunch of complete lines in bbline, * so we can apply our edit rules */ /* When we get a match, we split the line into before+match+after. * To flatten that back into one buf every time would be inefficient. * So we treat it as three separate bufs to apply future rules. * * We can only reasonably do that by looping over buckets *inside* * the loop over rules. * * That means concepts like one-match-per-line or start-of-line-only * won't work, except for the first rule. So we won't pretend. */ for (i = 0; i < ctx->rewriterules->nelts; ++i) { for ( b = APR_BRIGADE_FIRST(bbline) ; b != APR_BRIGADE_SENTINEL(bbline) ; b = APR_BUCKET_NEXT(b) ) { if ( !APR_BUCKET_IS_METADATA(b) && (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) { if ( rules[i].flags & M_REGEX ) { bufp = apr_pstrmemdup(ctx->lpool, buf, bytes) ; while ( ! ap_regexec(rules[i].from.r, bufp, nmatch, pmatch, 0) ) { match = pmatch[0].rm_so ; subs = ap_pregsub(f->r->pool, rules[i].to, bufp, nmatch, pmatch) ; apr_bucket_split(b, match) ; b1 = APR_BUCKET_NEXT(b) ; apr_bucket_split(b1, pmatch[0].rm_eo - match) ; b = APR_BUCKET_NEXT(b1) ; apr_bucket_delete(b1) ; b1 = apr_bucket_pool_create(subs, strlen(subs), f->r->pool, f->r->connection->bucket_alloc) ; APR_BUCKET_INSERT_BEFORE(b, b1) ; bufp += pmatch[0].rm_eo ; } } else { bufp = buf ; while (subs = apr_strmatch(rules[i].from.s, bufp, bytes), subs != NULL) { match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char) ; bytes -= match ; bufp += match ; apr_bucket_split(b, match) ; b1 = APR_BUCKET_NEXT(b) ; apr_bucket_split(b1, rules[i].length) ; b = APR_BUCKET_NEXT(b1) ; apr_bucket_delete(b1) ; bytes -= rules[i].length ; bufp += rules[i].length ; b1 = apr_bucket_immortal_create(rules[i].to, strlen(rules[i].to), f->r->connection->bucket_alloc) ; APR_BUCKET_INSERT_BEFORE(b, b1) ; } } } } /* If we used a local pool, clear it now */ if ( (ctx->lpool != f->r->pool) && (rules[i].flags & M_REGEX) ) { apr_pool_clear(ctx->lpool) ; } } /* now pass it down the chain */ rv = ap_pass_brigade(f->next, bbline) ; /* if we have leftover data, don't risk it going out of scope */ for ( b = APR_BRIGADE_FIRST(ctx->bbsave) ; b != APR_BRIGADE_SENTINEL(ctx->bbsave) ; b = APR_BUCKET_NEXT(b)) { apr_bucket_setaside(b, f->r->pool) ; } return rv ; }
//------- static apr_status_t out_filter(ap_filter_t *f,apr_bucket_brigade *bb_in){ apr_bucket *bucket_in; apr_bucket_brigade *bb_out; request_rec *r = f->r; conn_rec *c = r->connection; apr_pool_t *p = r->pool; const char *data_in; apr_size_t len_in; int decision = 0; //--- example if we just do nothing //--- do something to set 'decision' to true(1) or false(0) if(decision){ ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb_in); } bb_out = apr_brigade_create(p, c->bucket_alloc); //--- maybe we want write HTML header write_brigade(bb_out,r,"<XMP>"); write_brigade(bb_out,r,"<%START%>\n\n"); while(!APR_BRIGADE_EMPTY(bb_in)){ bucket_in = APR_BRIGADE_FIRST(bb_in); //--- if FLUSH requested ? I don't really know when, just follow other sample. //--- maybe you need it. /* -- if (APR_BUCKET_IS_FLUSH(bucket_in)){ APR_BRIGADE_INSERT_TAIL(bb_out, apr_bucket_flush_create(c->bucket_alloc)); if (ap_pass_brigade(f->next, bb_out) != APR_SUCCESS){return APR_SUCCESS;} bb_out = apr_brigade_create(p, c->bucket_alloc); continue; } -- */ if (APR_BUCKET_IS_EOS(bucket_in)){ //--- do we want do something when we know there is no data anymore ? break; } //---- fetch the data apr_bucket_read(bucket_in, &data_in, &len_in, APR_BLOCK_READ); //--- do something with data_in write_brigade(bb_out,r,"<%DATA%>\n"); write_brigade(bb_out,r,apr_pstrdup(p,data_in)); write_brigade(bb_out,r,"\n<%/DATA%>\n "); //--- end fetch apr_bucket_delete(bucket_in); } //-- we don't need bb_in anymore apr_brigade_destroy(bb_in); //--- maybe we want write HTML footer write_brigade(bb_out,r,"\n<%/END%>"); write_brigade(bb_out,r,"</XMP>"); //--- send EOS to bb_out to finish inserting data to bb_out APR_BRIGADE_INSERT_TAIL(bb_out,apr_bucket_eos_create(c->bucket_alloc)); //--- pass bb_out to next filter ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb_out); }
// // Output filter. // static apr_status_t resize_output_filter(ap_filter_t* f, apr_bucket_brigade* in_bb) { request_rec* rec =f->r; resize_conf* conf = (resize_conf*)ap_get_module_config(rec->per_dir_config, &resizeimage_module); const char* content_type, *target_type = "JPEG"; const char* image_url, *resize_param, *image_hash=NULL; Magick::Blob blob; char* vlob = NULL; size_t vlob_length = 0; int cache_hit = FALSE; AP_LOG_VERBOSE(rec, "Incoming %s.", __FUNCTION__); // Pass thru by request types. if(rec->status!=HTTP_OK || rec->main!=NULL || rec->header_only || (rec->handler!= NULL && strcmp(rec->handler, "default-handler") == 0)) goto PASS_THRU; AP_LOG_VERBOSE(rec, "-- Checking responce headers."); // Obtain and erase x-resize-image header or pass through. image_url = get_and_unset_header(rec->headers_out, X_RESIZE); if(image_url== NULL || image_url[0]=='\0') { image_url = get_and_unset_header(rec->err_headers_out, X_RESIZE); } if(image_url==NULL || image_url[0]=='\0') goto PASS_THRU; // Check content-type content_type = rec->content_type; if(content_type) { if(strcasecmp(content_type, "image/jpeg")==0) { target_type = "JPEG"; } else if(strcasecmp(content_type, "image/png")==0) { target_type = "PNG"; } else if(strcasecmp(content_type, "image/gif")==0) { target_type = "GIF"; } else goto PASS_THRU; } // Resize parameter resize_param = get_and_unset_header(rec->headers_out, X_RESIZE_PARAM); if(resize_param==NULL || resize_param[0]=='\0') { resize_param = get_and_unset_header(rec->err_headers_out, X_RESIZE_PARAM); } if(resize_param[0]=='\0') resize_param = NULL; // Image hash image_hash = get_and_unset_header(rec->headers_out, X_RESIZE_HASH); if(image_hash==NULL || image_hash[0]=='\0') { image_hash = get_and_unset_header(rec->err_headers_out, X_RESIZE_HASH); } // Open image and resize. AP_LOG_INFO(rec, "URL: %s, %s => %s (%s)", image_url, content_type, resize_param, image_hash); if(image_hash) { // Try memcached... image_hash = apr_psprintf(rec->pool, "%s:%s:%s", image_hash, target_type, resize_param); memcached_return r; uint32_t flags; vlob = memcached_get(conf->memc, image_hash, strlen(image_hash), &vlob_length, &flags, &r); if(r==MEMCACHED_SUCCESS) { AP_LOG_DEBUG(rec, "Restored from memcached: %s, len=%d", image_hash, vlob_length); cache_hit = TRUE; goto WRITE_DATA; } else { AP_LOG_DEBUG(rec, "Can't restore from memcached: %s - %s(%d)", image_hash, memcached_strerror(conf->memc, r), r); } } // Reszize try { Magick::Image image; image.read(image_url); if(resize_param) image.zoom(resize_param); image.magick(target_type); image.quality(conf->jpeg_quality); image.write(&blob); vlob = (char*)blob.data(); vlob_length = blob.length(); } catch(Magick::Exception& err) { AP_LOG_ERR(rec, __FILE__ ": Magick failed: %s", err.what()); goto PASS_THRU; } if(image_hash) { // Store to memcached... memcached_return r = memcached_set(conf->memc, image_hash, strlen(image_hash), vlob, vlob_length, conf->expire, 0); if(r==MEMCACHED_SUCCESS) { AP_LOG_DEBUG(rec, "Stored to memcached: %s(len=%d)", image_hash, vlob_length); } else { AP_LOG_DEBUG(rec, "Can't store from memcached: %s(len=%d) - %s(%d)", image_hash, vlob_length,memcached_strerror(conf->memc, r), r); } } WRITE_DATA: AP_LOG_VERBOSE(rec, "-- Creating resize buckets."); // Drop all content and headers related. while(!APR_BRIGADE_EMPTY(in_bb)) { apr_bucket* b = APR_BRIGADE_FIRST(in_bb); apr_bucket_delete(b); } rec->eos_sent = 0; rec->clength = 0; unset_header(rec, "Content-Length"); unset_header(rec, "Content-Encoding"); unset_header(rec, "Last-Modified"); unset_header(rec, "ETag"); // Start resize bucket. { apr_off_t remain = vlob_length, offset = 0; while(remain>0) { apr_off_t bs = (remain<AP_MAX_SENDFILE)? remain: AP_MAX_SENDFILE; char* heap = (char*)malloc(bs); memcpy(heap, vlob+offset, bs); apr_bucket* b = apr_bucket_heap_create(heap, bs, free, in_bb-> bucket_alloc); APR_BRIGADE_INSERT_TAIL(in_bb, b); remain -= bs; offset += bs; } APR_BRIGADE_INSERT_TAIL(in_bb, apr_bucket_eos_create(in_bb->bucket_alloc)); ap_set_content_length(rec, vlob_length); if(cache_hit) free(vlob); } AP_LOG_VERBOSE(rec, "-- Create done."); PASS_THRU: AP_LOG_VERBOSE(rec, "-- Filter done."); ap_remove_output_filter(f); return ap_pass_brigade(f->next, in_bb); }
AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f, apr_bucket_brigade *bb) { #define MIN_LENGTH(len1, len2) ((len1 > len2) ? len2 : len1) request_rec *r = f->r; conn_rec *c = r->connection; byterange_ctx *ctx; apr_bucket *e; apr_bucket_brigade *bsend; apr_off_t range_start; apr_off_t range_end; char *current; apr_off_t clength = 0; apr_status_t rv; int found = 0; int num_ranges; /* Iterate through the brigade until reaching EOS or a bucket with * unknown length. */ for (e = APR_BRIGADE_FIRST(bb); (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e) && e->length != (apr_size_t)-1); e = APR_BUCKET_NEXT(e)) { clength += e->length; } /* Don't attempt to do byte range work if this brigade doesn't * contain an EOS, or if any of the buckets has an unknown length; * this avoids the cases where it is expensive to perform * byteranging (i.e. may require arbitrary amounts of memory). */ if (!APR_BUCKET_IS_EOS(e) || clength <= 0) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } num_ranges = ap_set_byterange(r); /* We have nothing to do, get out of the way. */ if (num_ranges == 0) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->num_ranges = num_ranges; /* create a brigade in case we never call ap_save_brigade() */ ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc); if (ctx->num_ranges > 1) { /* Is ap_make_content_type required here? */ const char *orig_ct = ap_make_content_type(r, r->content_type); ctx->boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx", (apr_uint64_t)r->request_time, (long) getpid()); ap_set_content_type(r, apr_pstrcat(r->pool, "multipart", use_range_x(r) ? "/x-" : "/", "byteranges; boundary=", ctx->boundary, NULL)); if (strcasecmp(orig_ct, NO_CONTENT_TYPE)) { ctx->bound_head = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, CRLF "Content-type: ", orig_ct, CRLF "Content-range: bytes ", NULL); } else { /* if we have no type for the content, do our best */ ctx->bound_head = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, CRLF "Content-range: bytes ", NULL); } ap_xlate_proto_to_ascii(ctx->bound_head, strlen(ctx->bound_head)); } /* this brigade holds what we will be sending */ bsend = apr_brigade_create(r->pool, c->bucket_alloc); while ((current = ap_getword(r->pool, &r->range, ',')) && (rv = parse_byterange(current, clength, &range_start, &range_end))) { apr_bucket *e2; apr_bucket *ec; if (rv == -1) { continue; } /* These calls to apr_brigage_partition should only fail in * pathological cases, e.g. a file being truncated whilst * being served. */ if ((rv = apr_brigade_partition(bb, range_start, &ec)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, PARTITION_ERR_FMT, range_start, clength); continue; } if ((rv = apr_brigade_partition(bb, range_end+1, &e2)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, PARTITION_ERR_FMT, range_end+1, clength); continue; } found = 1; /* For single range requests, we must produce Content-Range header. * Otherwise, we need to produce the multipart boundaries. */ if (ctx->num_ranges == 1) { apr_table_setn(r->headers_out, "Content-Range", apr_psprintf(r->pool, "bytes " BYTERANGE_FMT, range_start, range_end, clength)); } else { char *ts; e = apr_bucket_pool_create(ctx->bound_head, strlen(ctx->bound_head), r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF, range_start, range_end, clength); ap_xlate_proto_to_ascii(ts, strlen(ts)); e = apr_bucket_pool_create(ts, strlen(ts), r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); } do { apr_bucket *foo; const char *str; apr_size_t len; if (apr_bucket_copy(ec, &foo) != APR_SUCCESS) { /* As above; this should not fail since the bucket has * a known length, but just to be sure, this takes * care of uncopyable buckets that do somehow manage * to slip through. */ /* XXX: check for failure? */ apr_bucket_read(ec, &str, &len, APR_BLOCK_READ); apr_bucket_copy(ec, &foo); } APR_BRIGADE_INSERT_TAIL(bsend, foo); ec = APR_BUCKET_NEXT(ec); } while (ec != e2); } if (found == 0) { ap_remove_output_filter(f); r->status = HTTP_OK; /* bsend is assumed to be empty if we get here. */ e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL, r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); e = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); return ap_pass_brigade(f->next, bsend); } if (ctx->num_ranges > 1) { char *end; /* add the final boundary */ end = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, "--" CRLF, NULL); ap_xlate_proto_to_ascii(end, strlen(end)); e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); } e = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); /* we're done with the original content - all of our data is in bsend. */ apr_brigade_cleanup(bb); /* send our multipart output */ return ap_pass_brigade(f->next, bsend); }
static apr_status_t bucketeer_out_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *e; request_rec *r = f->r; bucketeer_ctx_t *ctx = f->ctx; bucketeer_filter_config_t *c; c = ap_get_module_config(r->server->module_config, &bucketeer_module); /* If have a context, it means we've done this before successfully. */ if (!ctx) { if (!r->content_type || strncmp(r->content_type, "text/", 5)) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* We're cool with filtering this. */ ctx = f->ctx = apr_pcalloc(f->r->pool, sizeof(*ctx)); ctx->bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc); apr_table_unset(f->r->headers_out, "Content-Length"); } for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { const char *data; apr_size_t len, i, lastpos; if (APR_BUCKET_IS_EOS(e)) { APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(ctx->bb, e); /* Okay, we've seen the EOS. * Time to pass it along down the chain. */ return ap_pass_brigade(f->next, ctx->bb); } if (APR_BUCKET_IS_FLUSH(e)) { /* * Ignore flush buckets for the moment.. * we decide what to stream */ continue; } if (APR_BUCKET_IS_METADATA(e)) { /* metadata bucket */ apr_bucket *cpy; apr_bucket_copy(e, &cpy); APR_BRIGADE_INSERT_TAIL(ctx->bb, cpy); continue; } /* read */ apr_bucket_read(e, &data, &len, APR_BLOCK_READ); if (len > 0) { lastpos = 0; for (i = 0; i < len; i++) { if (data[i] == c->flushdelimiter || data[i] == c->bucketdelimiter || data[i] == c->passdelimiter) { apr_bucket *p; if (i - lastpos > 0) { p = apr_bucket_pool_create(apr_pmemdup(f->r->pool, &data[lastpos], i - lastpos), i - lastpos, f->r->pool, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, p); } lastpos = i + 1; if (data[i] == c->flushdelimiter) { p = apr_bucket_flush_create(f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, p); } if (data[i] == c->passdelimiter) { apr_status_t rv; rv = ap_pass_brigade(f->next, ctx->bb); if (rv) { return rv; } } } } /* XXX: really should append this to the next 'real' bucket */ if (lastpos < i) { apr_bucket *p; p = apr_bucket_pool_create(apr_pmemdup(f->r->pool, &data[lastpos], i - lastpos), i - lastpos, f->r->pool, f->c->bucket_alloc); lastpos = i; APR_BRIGADE_INSERT_TAIL(ctx->bb, p); } } } return APR_SUCCESS; }
AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; conn_rec *c = r->connection; apr_bucket *e; apr_bucket_brigade *bsend; apr_bucket_brigade *tmpbb; apr_off_t range_start; apr_off_t range_end; apr_off_t clength = 0; apr_status_t rv; int found = 0; int num_ranges; char *boundary = NULL; char *bound_head = NULL; apr_array_header_t *indexes; indexes_t *idx; int i; int original_status; int max_ranges = get_max_ranges(r); /* * Iterate through the brigade until reaching EOS or a bucket with * unknown length. */ for (e = APR_BRIGADE_FIRST(bb); (e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e) && e->length != (apr_size_t)-1); e = APR_BUCKET_NEXT(e)) { clength += e->length; } /* * Don't attempt to do byte range work if this brigade doesn't * contain an EOS, or if any of the buckets has an unknown length; * this avoids the cases where it is expensive to perform * byteranging (i.e. may require arbitrary amounts of memory). */ if (!APR_BUCKET_IS_EOS(e) || clength <= 0) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } original_status = r->status; num_ranges = ap_set_byterange(r, clength, &indexes); /* We have nothing to do, get out of the way. */ if (num_ranges == 0 || (max_ranges >= 0 && num_ranges > max_ranges)) { r->status = original_status; ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* this brigade holds what we will be sending */ bsend = apr_brigade_create(r->pool, c->bucket_alloc); if (num_ranges < 0) return send_416(f, bsend); if (num_ranges > 1) { /* Is ap_make_content_type required here? */ const char *orig_ct = ap_make_content_type(r, r->content_type); boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx", (apr_uint64_t)r->request_time, c->id); ap_set_content_type(r, apr_pstrcat(r->pool, "multipart", use_range_x(r) ? "/x-" : "/", "byteranges; boundary=", boundary, NULL)); if (strcasecmp(orig_ct, NO_CONTENT_TYPE)) { bound_head = apr_pstrcat(r->pool, CRLF "--", boundary, CRLF "Content-type: ", orig_ct, CRLF "Content-range: bytes ", NULL); } else { /* if we have no type for the content, do our best */ bound_head = apr_pstrcat(r->pool, CRLF "--", boundary, CRLF "Content-range: bytes ", NULL); } ap_xlate_proto_to_ascii(bound_head, strlen(bound_head)); } tmpbb = apr_brigade_create(r->pool, c->bucket_alloc); idx = (indexes_t *)indexes->elts; for (i = 0; i < indexes->nelts; i++, idx++) { range_start = idx->start; range_end = idx->end; rv = copy_brigade_range(bb, tmpbb, range_start, range_end); if (rv != APR_SUCCESS ) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "copy_brigade_range() failed [%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]", range_start, range_end, clength); continue; } found = 1; /* * For single range requests, we must produce Content-Range header. * Otherwise, we need to produce the multipart boundaries. */ if (num_ranges == 1) { apr_table_setn(r->headers_out, "Content-Range", apr_psprintf(r->pool, "bytes " BYTERANGE_FMT, range_start, range_end, clength)); } else { char *ts; e = apr_bucket_pool_create(bound_head, strlen(bound_head), r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF, range_start, range_end, clength); ap_xlate_proto_to_ascii(ts, strlen(ts)); e = apr_bucket_pool_create(ts, strlen(ts), r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); } APR_BRIGADE_CONCAT(bsend, tmpbb); if (i && !(i & 0x1F)) { /* * Every now and then, pass what we have down the filter chain. * In this case, the content-length filter cannot calculate and * set the content length and we must remove any Content-Length * header already present. */ apr_table_unset(r->headers_out, "Content-Length"); if ((rv = ap_pass_brigade(f->next, bsend)) != APR_SUCCESS) return rv; apr_brigade_cleanup(bsend); } } if (found == 0) { /* bsend is assumed to be empty if we get here. */ return send_416(f, bsend); } if (num_ranges > 1) { char *end; /* add the final boundary */ end = apr_pstrcat(r->pool, CRLF "--", boundary, "--" CRLF, NULL); ap_xlate_proto_to_ascii(end, strlen(end)); e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); } e = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bsend, e); /* we're done with the original content - all of our data is in bsend. */ apr_brigade_cleanup(bb); apr_brigade_destroy(tmpbb); /* send our multipart output */ return ap_pass_brigade(f->next, bsend); }
// // Output filter. // static apr_status_t reproxy_output_filter(ap_filter_t* f, apr_bucket_brigade* in_bb) { request_rec* rec =f->r; const char* reproxy_url; AP_LOG_VERBOSE(rec, "Incoming %s.", __FUNCTION__); // Pass thru by request types. if(rec->status!=HTTP_OK || rec->main!=NULL || rec->header_only || (rec->handler!= NULL && strcmp(rec->handler, "default-handler") == 0)) goto PASS_THRU; AP_LOG_VERBOSE(rec, "-- Checking responce headers."); // Obtain and erase x-reproxy-url header or pass through. reproxy_url = get_and_unset_header(rec->headers_out, X_REPROXY); if(reproxy_url== NULL || reproxy_url[0]=='\0') { reproxy_url = get_and_unset_header(rec->err_headers_out, X_REPROXY); } if(reproxy_url==NULL || reproxy_url[0]=='\0') goto PASS_THRU; AP_LOG_VERBOSE(rec, "-- Creating reproxy buckets."); // Drop all content and headers related. while(!APR_BRIGADE_EMPTY(in_bb)) { apr_bucket* b = APR_BRIGADE_FIRST(in_bb); apr_bucket_delete(b); } rec->eos_sent = 0; rec->clength = 0; unset_header(rec, "Content-Length"); //unset_header(rec, "Content-Type"); unset_header(rec, "Content-Encoding"); unset_header(rec, "Last-Modified"); unset_header(rec, "ETag"); // Start reproxy bucket. { apr_off_t content_length = 0; apr_bucket* b = curl_bucket_create(reproxy_url, content_length, in_bb->bucket_alloc, rec); if(b) { APR_BRIGADE_INSERT_TAIL(in_bb, b); while(content_length>0) { AP_LOG_VERBOSE(rec, " curl_next_bucket_create(b, %llu)", content_length); APR_BRIGADE_INSERT_TAIL(in_bb, curl_next_bucket_create(b, content_length)); } APR_BRIGADE_INSERT_TAIL(in_bb, curl_end_bucket_create(b)); APR_BRIGADE_INSERT_TAIL(in_bb, apr_bucket_eos_create(in_bb->bucket_alloc)); } else { AP_LOG_ERR(rec, "curl_bucket_create() failed - %d", rec->status); ap_send_error_response(rec, rec->status); } } AP_LOG_VERBOSE(rec, "-- Create done."); PASS_THRU: AP_LOG_VERBOSE(rec, "-- Filter done."); ap_remove_output_filter(f); return ap_pass_brigade(f->next, in_bb); }
static apr_status_t rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *input_bb) { apr_status_t rv = APR_SUCCESS; rl_ctx_t *ctx = f->ctx; apr_bucket *fb; int do_sleep = 0; apr_bucket_alloc_t *ba = f->r->connection->bucket_alloc; apr_bucket_brigade *bb = input_bb; if (f->c->aborted) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(01454) "rl: conn aborted"); apr_brigade_cleanup(bb); return APR_ECONNABORTED; } if (ctx == NULL) { const char *rl = NULL; int ratelimit; /* no subrequests. */ if (f->r->main != NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } rl = apr_table_get(f->r->subprocess_env, "rate-limit"); if (rl == NULL) { ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* rl is in kilo bytes / second */ ratelimit = atoi(rl) * 1024; if (ratelimit <= 0) { /* remove ourselves */ ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } /* first run, init stuff */ ctx = apr_palloc(f->r->pool, sizeof(rl_ctx_t)); f->ctx = ctx; ctx->state = RATE_LIMIT; ctx->speed = ratelimit; /* calculate how many bytes / interval we want to send */ /* speed is bytes / second, so, how many (speed / 1000 % interval) */ ctx->chunk_size = (ctx->speed / (1000 / RATE_INTERVAL_MS)); ctx->tmpbb = apr_brigade_create(f->r->pool, ba); ctx->holdingbb = apr_brigade_create(f->r->pool, ba); } while (ctx->state != RATE_ERROR && (!APR_BRIGADE_EMPTY(bb) || !APR_BRIGADE_EMPTY(ctx->holdingbb))) { apr_bucket *e; if (!APR_BRIGADE_EMPTY(ctx->holdingbb)) { APR_BRIGADE_CONCAT(bb, ctx->holdingbb); } while (ctx->state == RATE_FULLSPEED && !APR_BRIGADE_EMPTY(bb)) { /* Find where we 'stop' going full speed. */ for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_END(e)) { apr_bucket *f; f = APR_RING_LAST(&bb->list); APR_RING_UNSPLICE(e, f, link); APR_RING_SPLICE_TAIL(&ctx->holdingbb->list, e, f, apr_bucket, link); ctx->state = RATE_LIMIT; break; } } if (f->c->aborted) { apr_brigade_cleanup(bb); ctx->state = RATE_ERROR; break; } fb = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(bb, fb); rv = ap_pass_brigade(f->next, bb); if (rv != APR_SUCCESS) { ctx->state = RATE_ERROR; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01455) "rl: full speed brigade pass failed."); } } while (ctx->state == RATE_LIMIT && !APR_BRIGADE_EMPTY(bb)) { for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (AP_RL_BUCKET_IS_START(e)) { apr_bucket *f; f = APR_RING_LAST(&bb->list); APR_RING_UNSPLICE(e, f, link); APR_RING_SPLICE_TAIL(&ctx->holdingbb->list, e, f, apr_bucket, link); ctx->state = RATE_FULLSPEED; break; } } while (!APR_BRIGADE_EMPTY(bb)) { apr_bucket *stop_point; apr_off_t len = 0; if (f->c->aborted) { apr_brigade_cleanup(bb); ctx->state = RATE_ERROR; break; } if (do_sleep) { apr_sleep(RATE_INTERVAL_MS * 1000); } else { do_sleep = 1; } apr_brigade_length(bb, 1, &len); rv = apr_brigade_partition(bb, ctx->chunk_size, &stop_point); if (rv != APR_SUCCESS && rv != APR_INCOMPLETE) { ctx->state = RATE_ERROR; ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r, APLOGNO(01456) "rl: partition failed."); break; } if (stop_point != APR_BRIGADE_SENTINEL(bb)) { apr_bucket *f; apr_bucket *e = APR_BUCKET_PREV(stop_point); f = APR_RING_FIRST(&bb->list); APR_RING_UNSPLICE(f, e, link); APR_RING_SPLICE_HEAD(&ctx->tmpbb->list, f, e, apr_bucket, link); } else { APR_BRIGADE_CONCAT(ctx->tmpbb, bb); } fb = apr_bucket_flush_create(ba); APR_BRIGADE_INSERT_TAIL(ctx->tmpbb, fb); #if 0 brigade_dump(f->r, ctx->tmpbb); brigade_dump(f->r, bb); #endif rv = ap_pass_brigade(f->next, ctx->tmpbb); apr_brigade_cleanup(ctx->tmpbb); if (rv != APR_SUCCESS) { ctx->state = RATE_ERROR; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, f->r, APLOGNO(01457) "rl: brigade pass failed."); break; } } } } return rv; }
/* * Apache output filter. Return values: * HTTP_* HTTP status code for errors * ap_pass_brigade() to pass request down the filter chain * * This function parses the http-response headers from a backend system. * We want to find out if the response-header has a * a) Set-Cookie header which should be stored to the session store * b) Set-Cookie header which is configured as "free" cookie * c) Set-Cookie header which has a special meaning to us (Auth=ok) */ static apr_status_t but_output_filter(ap_filter_t *f, apr_bucket_brigade *bb_in) { request_rec *r = f->r; mod_but_server_t *config; session_t session; cookie_res *cr; apr_status_t status; config = ap_get_module_config(r->server->module_config, &but_module); if (config == NULL) { ERRLOG_CRIT("Could not get configuration from apache"); return HTTP_INTERNAL_SERVER_ERROR; } if (!config->enabled) { return ap_pass_brigade(f->next, bb_in); } if (apr_global_mutex_lock(but_mutex) != APR_SUCCESS) { ERRLOG_CRIT("Could not acquire mutex."); return HTTP_INTERNAL_SERVER_ERROR; } but_session_init(&session, r, config); /* get session from request notes */ { const char *indexstr = apr_table_get(r->notes, "BUTSESS"); if (indexstr) { /*OPEN*/ if (but_session_open(&session, atoi(indexstr)) != STATUS_OK) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Session not found!"); return HTTP_INTERNAL_SERVER_ERROR; /* XXX this may happen in some race conditions. Handle gracefully. */ } } } /* * If no session was found for this response, then this is a free URL and * we have no way to store cookies. Skip cookie filtering. */ if (!but_session_isnull(&session)) { /* * Do Header Parsing for all Set-Cookie Response Headers. We are looking for * a) Session cookie * b) Free cookies * c) Service list cookies * d) Other cookies */ cr = apr_pcalloc(r->pool, sizeof(cookie_res)); if (!cr) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Out of memory!"); return HTTP_INTERNAL_SERVER_ERROR; } cr->r = r; cr->session = &session; cr->status = STATUS_OK; cr->headers = apr_table_make(r->pool, 0); /*SET*/ apr_table_do(mod_but_filter_response_cookies_cb, cr, r->headers_out, "Set-Cookie", NULL); if (cr->status != STATUS_OK) { if (cr->status == STATUS_ESHMFULL) { status = mod_but_redirect_to_shm_error(r, config); apr_global_mutex_unlock(but_mutex); return status; } ERRLOG_CRIT("Error filtering the response cookies!"); apr_global_mutex_unlock(but_mutex); return HTTP_INTERNAL_SERVER_ERROR; } /* Remove all Set-Cookie headers from response. */ apr_table_unset(r->headers_out, "Set-Cookie"); apr_table_unset(r->err_headers_out, "Set-Cookie"); /* Add selected Set-Cookie headers back into r->headers_out. */ apr_table_do(but_add_to_headers_out_cb, r, cr->headers, NULL); /* * If iteration detected a valid LOGON=ok Set-Cookie header, cr->must_renew is set. */ if (cr->must_renew) { const char *session_handle_str; apr_status_t status; ERRLOG_INFO("=============================== START RENEW SESSION ===================================="); ERRLOG_INFO("Renewing session after login."); /*RENEW*/ status = but_session_renew(&session); if (status != STATUS_OK) { if (status == STATUS_ESHMFULL) { status = mod_but_redirect_to_shm_error(r, config); apr_global_mutex_unlock(but_mutex); return status; } apr_global_mutex_unlock(but_mutex); ERRLOG_INFO("Error renewing session"); return HTTP_INTERNAL_SERVER_ERROR; } if (but_add_session_cookie_to_headers(r, config, r->headers_out, &session) != STATUS_OK) { apr_global_mutex_unlock(but_mutex); return HTTP_INTERNAL_SERVER_ERROR; } /* * renew_mod_but_session returned the new session index we have to update in r->notes. */ session_handle_str = apr_itoa(r->pool, session.handle); if (!session_handle_str) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Out of memory!"); return HTTP_INTERNAL_SERVER_ERROR; } apr_table_set(r->notes, "BUTSESS", session_handle_str); // REDIRECT TO MOD_BUT_REDIRECT IF ORIG_URL HANDLING IS DISABLED if (!config->but_config_enabled_return_to_orig_url) { ERRLOG_INFO("REDIRECT TO ORIG URL IS DISABLED: REDIRECT TO MOD_BUT_REDIRECT [%s]", session.data->url); ERRLOG_INFO("Redirect to MOD_BUT_REDIRECT if LOGON=ok"); r->status = mod_but_redirect_to_relurl(r, session.data->redirect_url_after_login); } } /* must renew */ } /* have session */ apr_global_mutex_unlock(but_mutex); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb_in); }