apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos, int push) { const char *s; if (req->eoh) { return APR_EINVAL; } /* Always set the "Host" header from :authority, see rfc7540, ch. 8.1.2.3 */ if (!req->authority) { return APR_BADARG; } apr_table_setn(req->headers, "Host", req->authority); s = apr_table_get(req->headers, "Content-Length"); if (s) { if (inspect_clen(req, s) != APR_SUCCESS) { ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, pool, APLOGNO(02959) "h2_request(%d): content-length value not parsed: %s", req->id, s); return APR_EINVAL; } } else { /* no content-length given */ req->content_length = -1; if (!eos) { /* We have not seen a content-length and have no eos, * simulate a chunked encoding for our HTTP/1.1 infrastructure, * in case we have "H2SerializeHeaders on" here */ req->chunked = 1; apr_table_mergen(req->headers, "Transfer-Encoding", "chunked"); } else if (apr_table_get(req->headers, "Content-Type")) { /* If we have a content-type, but already see eos, no more * data will come. Signal a zero content length explicitly. */ apr_table_setn(req->headers, "Content-Length", "0"); } } req->eoh = 1; h2_push_policy_determine(req, pool, push); /* In the presence of trailers, force behaviour of chunked encoding */ s = apr_table_get(req->headers, "Trailer"); if (s && s[0]) { req->trailers = apr_table_make(pool, 5); if (!req->chunked) { req->chunked = 1; apr_table_mergen(req->headers, "Transfer-Encoding", "chunked"); } } return APR_SUCCESS; }
apr_status_t h2_to_h1_end_headers(h2_to_h1 *to_h1, h2_task *task, int eos) { conn_rec *c = h2_mplx_get_conn(to_h1->m); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_to_h1(%ld-%d): end headers", h2_mplx_get_id(to_h1->m), to_h1->stream_id); if (to_h1->eoh) { return APR_EINVAL; } if (!to_h1->seen_host) { /* Need to add a "Host" header if not already there to * make virtual hosts work correctly. */ if (!to_h1->authority) { return APR_BADARG; } apr_table_set(to_h1->headers, "Host", to_h1->authority); } if (eos && to_h1->chunked) { /* We had chunking figured out, but the EOS is already there. * unmark chunking and set a definitive content-length. */ to_h1->chunked = 0; apr_table_setn(to_h1->headers, "Content-Length", "0"); } else if (to_h1->chunked) { /* We have not seen a content-length. We therefore must * pass any request content in chunked form. */ apr_table_mergen(to_h1->headers, "Transfer-Encoding", "chunked"); } h2_task_set_request(task, to_h1->method, to_h1->path, to_h1->authority, to_h1->headers, eos); to_h1->eoh = 1; if (eos) { apr_status_t status = h2_to_h1_close(to_h1); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c, "h2_to_h1(%ld-%d): end headers, eos=%d", h2_mplx_get_id(to_h1->m), to_h1->stream_id, eos); } return status; } return APR_SUCCESS; }
static apr_status_t add_h1_trailer(h2_request *req, apr_pool_t *pool, const char *name, size_t nlen, const char *value, size_t vlen) { char *hname, *hvalue; if (h2_req_ignore_trailer(name, nlen)) { return APR_SUCCESS; } hname = apr_pstrndup(pool, name, nlen); hvalue = apr_pstrndup(pool, value, vlen); h2_util_camel_case_header(hname, nlen); apr_table_mergen(req->trailers, hname, hvalue); return APR_SUCCESS; }
static apr_status_t add_h1_header(h2_request *req, apr_pool_t *pool, const char *name, size_t nlen, const char *value, size_t vlen) { char *hname, *hvalue; if (h2_req_ignore_header(name, nlen)) { return APR_SUCCESS; } else if (H2_HD_MATCH_LIT("cookie", name, nlen)) { const char *existing = apr_table_get(req->headers, "cookie"); if (existing) { char *nval; /* Cookie header come separately in HTTP/2, but need * to be merged by "; " (instead of default ", ") */ hvalue = apr_pstrndup(pool, value, vlen); nval = apr_psprintf(pool, "%s; %s", existing, hvalue); apr_table_setn(req->headers, "Cookie", nval); return APR_SUCCESS; } } else if (H2_HD_MATCH_LIT("host", name, nlen)) { if (apr_table_get(req->headers, "Host")) { return APR_SUCCESS; /* ignore duplicate */ } } hname = apr_pstrndup(pool, name, nlen); hvalue = apr_pstrndup(pool, value, vlen); h2_util_camel_case_header(hname, nlen); apr_table_mergen(req->headers, hname, hvalue); return APR_SUCCESS; }
static int check_speling(request_rec *r) { spconfig *cfg; char *good, *bad, *postgood, *url; apr_finfo_t dirent; int filoc, dotloc, urlen, pglen; apr_array_header_t *candidates = NULL; apr_dir_t *dir; cfg = ap_get_module_config(r->per_dir_config, &speling_module); if (!cfg->enabled) { return DECLINED; } /* We only want to worry about GETs */ if (r->method_number != M_GET) { return DECLINED; } /* We've already got a file of some kind or another */ if (r->finfo.filetype != APR_NOFILE) { return DECLINED; } /* Not a file request */ if (r->proxyreq || !r->filename) { return DECLINED; } /* This is a sub request - don't mess with it */ if (r->main) { return DECLINED; } /* * The request should end up looking like this: * r->uri: /correct-url/mispelling/more * r->filename: /correct-file/mispelling r->path_info: /more * * So we do this in steps. First break r->filename into two pieces */ filoc = ap_rind(r->filename, '/'); /* * Don't do anything if the request doesn't contain a slash, or * requests "/" */ if (filoc == -1 || strcmp(r->uri, "/") == 0) { return DECLINED; } /* good = /correct-file */ good = apr_pstrndup(r->pool, r->filename, filoc); /* bad = mispelling */ bad = apr_pstrdup(r->pool, r->filename + filoc + 1); /* postgood = mispelling/more */ postgood = apr_pstrcat(r->pool, bad, r->path_info, NULL); urlen = strlen(r->uri); pglen = strlen(postgood); /* Check to see if the URL pieces add up */ if (strcmp(postgood, r->uri + (urlen - pglen))) { return DECLINED; } /* url = /correct-url */ url = apr_pstrndup(r->pool, r->uri, (urlen - pglen)); /* Now open the directory and do ourselves a check... */ if (apr_dir_open(&dir, good, r->pool) != APR_SUCCESS) { /* Oops, not a directory... */ return DECLINED; } candidates = apr_array_make(r->pool, 2, sizeof(misspelled_file)); dotloc = ap_ind(bad, '.'); if (dotloc == -1) { dotloc = strlen(bad); } while (apr_dir_read(&dirent, APR_FINFO_DIRENT, dir) == APR_SUCCESS) { sp_reason q; /* * If we end up with a "fixed" URL which is identical to the * requested one, we must have found a broken symlink or some such. * Do _not_ try to redirect this, it causes a loop! */ if (strcmp(bad, dirent.name) == 0) { apr_dir_close(dir); return OK; } /* * miscapitalization errors are checked first (like, e.g., lower case * file, upper case request) */ else if (strcasecmp(bad, dirent.name) == 0) { misspelled_file *sp_new; sp_new = (misspelled_file *) apr_array_push(candidates); sp_new->name = apr_pstrdup(r->pool, dirent.name); sp_new->quality = SP_MISCAPITALIZED; } /* * simple typing errors are checked next (like, e.g., * missing/extra/transposed char) */ else if ((cfg->check_case_only == 0) && ((q = spdist(bad, dirent.name)) != SP_VERYDIFFERENT)) { misspelled_file *sp_new; sp_new = (misspelled_file *) apr_array_push(candidates); sp_new->name = apr_pstrdup(r->pool, dirent.name); sp_new->quality = q; } /* * The spdist() should have found the majority of the misspelled * requests. It is of questionable use to continue looking for * files with the same base name, but potentially of totally wrong * type (index.html <-> index.db). * * If you're using MultiViews, and have a file named foobar.html, * which you refer to as "foobar", and someone tried to access * "Foobar", without CheckBasenameMatch, mod_speling won't find it, * because it won't find anything matching that spelling. * With the extension-munging, it would locate "foobar.html". */ else if ((cfg->check_case_only == 0) && (cfg->check_basename_match == 1)) { /* * Okay... we didn't find anything. Now we take out the hard-core * power tools. There are several cases here. Someone might have * entered a wrong extension (.htm instead of .html or vice * versa) or the document could be negotiated. At any rate, now * we just compare stuff before the first dot. If it matches, we * figure we got us a match. This can result in wrong things if * there are files of different content types but the same prefix * (e.g. foo.gif and foo.html) This code will pick the first one * it finds. Better than a Not Found, though. */ int entloc = ap_ind(dirent.name, '.'); if (entloc == -1) { entloc = strlen(dirent.name); } if ((dotloc == entloc) && !strncasecmp(bad, dirent.name, dotloc)) { misspelled_file *sp_new; sp_new = (misspelled_file *) apr_array_push(candidates); sp_new->name = apr_pstrdup(r->pool, dirent.name); sp_new->quality = SP_VERYDIFFERENT; } } } apr_dir_close(dir); if (candidates->nelts != 0) { /* Wow... we found us a mispelling. Construct a fixed url */ char *nuri; const char *ref; misspelled_file *variant = (misspelled_file *) candidates->elts; int i; ref = apr_table_get(r->headers_in, "Referer"); qsort((void *) candidates->elts, candidates->nelts, sizeof(misspelled_file), sort_by_quality); /* * Conditions for immediate redirection: * a) the first candidate was not found by stripping the suffix * AND b) there exists only one candidate OR the best match is not * ambiguous * then return a redirection right away. */ if (variant[0].quality != SP_VERYDIFFERENT && (candidates->nelts == 1 || variant[0].quality != variant[1].quality)) { nuri = ap_escape_uri(r->pool, apr_pstrcat(r->pool, url, variant[0].name, r->path_info, NULL)); if (r->parsed_uri.query) nuri = apr_pstrcat(r->pool, nuri, "?", r->parsed_uri.query, NULL); apr_table_setn(r->headers_out, "Location", ap_construct_url(r->pool, nuri, r)); ap_log_rerror(APLOG_MARK, APLOG_INFO, APR_SUCCESS, r, ref ? "Fixed spelling: %s to %s from %s" : "Fixed spelling: %s to %s%s", r->uri, nuri, (ref ? ref : "")); return HTTP_MOVED_PERMANENTLY; } /* * Otherwise, a "[300] Multiple Choices" list with the variants is * returned. */ else { apr_pool_t *p; apr_table_t *notes; apr_pool_t *sub_pool; apr_array_header_t *t; apr_array_header_t *v; if (r->main == NULL) { p = r->pool; notes = r->notes; } else { p = r->main->pool; notes = r->main->notes; } if (apr_pool_create(&sub_pool, p) != APR_SUCCESS) return DECLINED; t = apr_array_make(sub_pool, candidates->nelts * 8 + 8, sizeof(char *)); v = apr_array_make(sub_pool, candidates->nelts * 5, sizeof(char *)); /* Generate the response text. */ *(const char **)apr_array_push(t) = "The document name you requested (<code>"; *(const char **)apr_array_push(t) = ap_escape_html(sub_pool, r->uri); *(const char **)apr_array_push(t) = "</code>) could not be found on this server.\n" "However, we found documents with names similar " "to the one you requested.<p>" "Available documents:\n<ul>\n"; for (i = 0; i < candidates->nelts; ++i) { char *vuri; const char *reason; reason = sp_reason_str[(int) (variant[i].quality)]; /* The format isn't very neat... */ vuri = apr_pstrcat(sub_pool, url, variant[i].name, r->path_info, (r->parsed_uri.query != NULL) ? "?" : "", (r->parsed_uri.query != NULL) ? r->parsed_uri.query : "", NULL); *(const char **)apr_array_push(v) = "\""; *(const char **)apr_array_push(v) = ap_escape_uri(sub_pool, vuri); *(const char **)apr_array_push(v) = "\";\""; *(const char **)apr_array_push(v) = reason; *(const char **)apr_array_push(v) = "\""; *(const char **)apr_array_push(t) = "<li><a href=\""; *(const char **)apr_array_push(t) = ap_escape_uri(sub_pool, vuri); *(const char **)apr_array_push(t) = "\">"; *(const char **)apr_array_push(t) = ap_escape_html(sub_pool, vuri); *(const char **)apr_array_push(t) = "</a> ("; *(const char **)apr_array_push(t) = reason; *(const char **)apr_array_push(t) = ")\n"; /* * when we have printed the "close matches" and there are * more "distant matches" (matched by stripping the suffix), * then we insert an additional separator text to suggest * that the user LOOK CLOSELY whether these are really the * files she wanted. */ if (i > 0 && i < candidates->nelts - 1 && variant[i].quality != SP_VERYDIFFERENT && variant[i + 1].quality == SP_VERYDIFFERENT) { *(const char **)apr_array_push(t) = "</ul>\nFurthermore, the following related " "documents were found:\n<ul>\n"; } } *(const char **)apr_array_push(t) = "</ul>\n"; /* If we know there was a referring page, add a note: */ if (ref != NULL) { *(const char **)apr_array_push(t) = "Please consider informing the owner of the " "<a href=\""; *(const char **)apr_array_push(t) = ap_escape_uri(sub_pool, ref); *(const char **)apr_array_push(t) = "\">referring page</a> " "about the broken link.\n"; } /* Pass our apr_table_t to http_protocol.c (see mod_negotiation): */ apr_table_setn(notes, "variant-list", apr_array_pstrcat(p, t, 0)); apr_table_mergen(r->subprocess_env, "VARIANTS", apr_array_pstrcat(p, v, ',')); apr_pool_destroy(sub_pool); ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, ref ? "Spelling fix: %s: %d candidates from %s" : "Spelling fix: %s: %d candidates%s", r->uri, candidates->nelts, (ref ? ref : "")); return HTTP_MULTIPLE_CHOICES; } } return OK; }
apr_status_t h2_to_h1_add_header(h2_to_h1 *to_h1, const char *name, size_t nlen, const char *value, size_t vlen) { if (H2_HD_MATCH_LIT("transfer-encoding", name, nlen)) { if (!apr_strnatcasecmp("chunked", value)) { /* This should never arrive here in a HTTP/2 request */ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_BADARG, h2_mplx_get_conn(to_h1->m), "h2_to_h1: 'transfer-encoding: chunked' received"); return APR_BADARG; } } else if (H2_HD_MATCH_LIT("content-length", name, nlen)) { char *end; to_h1->content_len = apr_strtoi64(value, &end, 10); if (value == end) { ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, h2_mplx_get_conn(to_h1->m), "h2_request(%d): content-length value not parsed: %s", to_h1->stream_id, value); return APR_EINVAL; } to_h1->remain_len = to_h1->content_len; to_h1->chunked = 0; } else if (H2_HD_MATCH_LIT("content-type", name, nlen)) { /* If we see a content-type and have no length (yet), * we need to chunk. */ to_h1->chunked = (to_h1->content_len == -1); } else if ((to_h1->seen_host && H2_HD_MATCH_LIT("host", name, nlen)) || H2_HD_MATCH_LIT("expect", name, nlen) || H2_HD_MATCH_LIT("upgrade", name, nlen) || H2_HD_MATCH_LIT("connection", name, nlen) || H2_HD_MATCH_LIT("proxy-connection", name, nlen) || H2_HD_MATCH_LIT("keep-alive", name, nlen) || H2_HD_MATCH_LIT("http2-settings", name, nlen)) { // ignore these. return APR_SUCCESS; } else if (H2_HD_MATCH_LIT("cookie", name, nlen)) { const char *existing = apr_table_get(to_h1->headers, "cookie"); if (existing) { /* Cookie headers come separately in HTTP/2, but need * to be merged by "; " (instead of default ", ") */ char *hvalue = apr_pstrndup(to_h1->pool, value, vlen); char *nval = apr_psprintf(to_h1->pool, "%s; %s", existing, hvalue); apr_table_setn(to_h1->headers, "Cookie", nval); return APR_SUCCESS; } } else if (H2_HD_MATCH_LIT("host", name, nlen)) { to_h1->seen_host = 1; } char *hname = apr_pstrndup(to_h1->pool, name, nlen); char *hvalue = apr_pstrndup(to_h1->pool, value, vlen); h2_util_camel_case_header(hname, nlen); apr_table_mergen(to_h1->headers, hname, hvalue); return APR_SUCCESS; }
/* TODO: cleanup ctx */ static apr_status_t zlibdict_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { apr_bucket *b; zlibdict_ctx_t *ctx = f->ctx; request_rec *r = f->r; const char *client_accepts; apr_status_t status = APR_SUCCESS; apr_pool_t *subpool; int zerr; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "triggered zlibdict_output_filter"); /* Do nothing if asked to filter nothing. */ if (APR_BRIGADE_EMPTY(bb)) { return APR_SUCCESS; } /* First time we are called for this response? */ if (!ctx) { client_accepts = apr_table_get(r->headers_in, "Accept-Encoding"); if (client_accepts == NULL || zlibdict__header_contains(r->pool, client_accepts)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Not compressing (no Accept-Encoding: zlibdict)"); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb); } ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx)); ctx->bb = apr_brigade_create(r->pool, f->c->bucket_alloc); ctx->buf = apr_palloc(r->pool, DEFAULT_BUFFERSIZE); /* zstream must be NULL'd out. */ memset(&ctx->zstr, 0, sizeof(z_stream)); zerr = deflateInit2(&ctx->zstr, DEFAULT_COMPRESSION, Z_DEFLATED, DEFAULT_WINDOWSIZE, DEFAULT_MEMLEVEL, Z_DEFAULT_STRATEGY); deflateSetDictionary(&ctx->zstr, (Bytef *)propfind_dictionary, strlen(propfind_dictionary)); /* Set Content-Encoding header so our client knows how to handle this data. */ apr_table_mergen(r->headers_out, "Content-Encoding", "zlibdict"); } /* Read the data from the handler and compress it with a dictionary. */ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { const char *data; void *write_buf; size_t len; size_t buf_size, write_len; if (APR_BUCKET_IS_EOS(b)) { deflateEnd(&ctx->zstr); /* Remove EOS from the old list, and insert into the new. */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bb, b); return ap_pass_brigade(f->next, ctx->bb); } if (APR_BUCKET_IS_METADATA(b)) continue; status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status != APR_SUCCESS) break; /* The largest buffer we should need is 0.1% larger than the compressed data, + 12 bytes. This info comes from zlib.h. */ buf_size = len + (len / 1000) + 13; apr_pool_create(&subpool, r->pool); write_buf = apr_palloc(subpool, buf_size); ctx->zstr.next_in = (Bytef *)data; /* Casting away const! */ ctx->zstr.avail_in = (uInt) len; zerr = Z_OK; while (ctx->zstr.avail_in > 0 && zerr != Z_STREAM_END) { ctx->zstr.next_out = write_buf; ctx->zstr.avail_out = (uInt) buf_size; zerr = deflate(&ctx->zstr, Z_FINISH); if (zerr < 0) return -1; /* TODO: fix error */ write_len = buf_size - ctx->zstr.avail_out; if (write_len > 0) { apr_bucket *b_out; b_out = apr_bucket_heap_create(write_buf, len, NULL, f->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(ctx->bb, b_out); /* Send what we have right now to the next filter. */ status = ap_pass_brigade(f->next, ctx->bb); if (status != APR_SUCCESS) { apr_pool_destroy(subpool); return status; } } apr_pool_destroy(subpool); } } return status; }
int cache_check_freshness(cache_handle_t *h, cache_request_rec *cache, request_rec *r) { apr_status_t status; apr_int64_t age, maxage_req, maxage_cresp, maxage, smaxage, maxstale; apr_int64_t minfresh; const char *cc_req; const char *pragma; const char *agestr = NULL; apr_time_t age_c = 0; cache_info *info = &(h->cache_obj->info); const char *warn_head; cache_server_conf *conf = (cache_server_conf *)ap_get_module_config(r->server->module_config, &cache_module); /* * We now want to check if our cached data is still fresh. This depends * on a few things, in this order: * * - RFC2616 14.9.4 End to end reload, Cache-Control: no-cache. no-cache * in either the request or the cached response means that we must * perform the request unconditionally, and ignore cached content. We * should never reach here, but if we do, mark the content as stale, * as this is the best we can do. * * - RFC2616 14.32 Pragma: no-cache This is treated the same as * Cache-Control: no-cache. * * - RFC2616 14.9.3 Cache-Control: max-stale, must-revalidate, * proxy-revalidate if the max-stale request header exists, modify the * stale calculations below so that an object can be at most <max-stale> * seconds stale before we request a revalidation, _UNLESS_ a * must-revalidate or proxy-revalidate cached response header exists to * stop us doing this. * * - RFC2616 14.9.3 Cache-Control: s-maxage the origin server specifies the * maximum age an object can be before it is considered stale. This * directive has the effect of proxy|must revalidate, which in turn means * simple ignore any max-stale setting. * * - RFC2616 14.9.4 Cache-Control: max-age this header can appear in both * requests and responses. If both are specified, the smaller of the two * takes priority. * * - RFC2616 14.21 Expires: if this request header exists in the cached * entity, and it's value is in the past, it has expired. * */ /* This value comes from the client's initial request. */ cc_req = apr_table_get(r->headers_in, "Cache-Control"); pragma = apr_table_get(r->headers_in, "Pragma"); ap_cache_control(r, &cache->control_in, cc_req, pragma, r->headers_in); if (cache->control_in.no_cache) { if (!conf->ignorecachecontrol) { /* Treat as stale, causing revalidation */ return 0; } ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(00781) "Incoming request is asking for a uncached version of " "%s, but we have been configured to ignore it and " "serve a cached response anyway", r->unparsed_uri); } /* These come from the cached entity. */ if (h->cache_obj->info.control.no_cache || h->cache_obj->info.control.invalidated) { /* * The cached entity contained Cache-Control: no-cache, or a * no-cache with a header present, or a private with a header * present, or the cached entity has been invalidated in the * past, so treat as stale causing revalidation. */ return 0; } if ((agestr = apr_table_get(h->resp_hdrs, "Age"))) { char *endp; apr_off_t offt; if (!apr_strtoff(&offt, agestr, &endp, 10) && endp > agestr && !*endp) { age_c = offt; } } /* calculate age of object */ age = ap_cache_current_age(info, age_c, r->request_time); /* extract s-maxage */ smaxage = h->cache_obj->info.control.s_maxage_value; /* extract max-age from request */ maxage_req = -1; if (!conf->ignorecachecontrol) { maxage_req = cache->control_in.max_age_value; } /* * extract max-age from response, if both s-maxage and max-age, s-maxage * takes priority */ if (smaxage != -1) { maxage_cresp = smaxage; } else { maxage_cresp = h->cache_obj->info.control.max_age_value; } /* * if both maxage request and response, the smaller one takes priority */ if (maxage_req == -1) { maxage = maxage_cresp; } else if (maxage_cresp == -1) { maxage = maxage_req; } else { maxage = MIN(maxage_req, maxage_cresp); } /* extract max-stale */ if (cache->control_in.max_stale) { if(cache->control_in.max_stale_value != -1) { maxstale = cache->control_in.max_stale_value; } else { /* * If no value is assigned to max-stale, then the client is willing * to accept a stale response of any age (RFC2616 14.9.3). We will * set it to one year in this case as this situation is somewhat * similar to a "never expires" Expires header (RFC2616 14.21) * which is set to a date one year from the time the response is * sent in this case. */ maxstale = APR_INT64_C(86400*365); } } else { maxstale = 0; } /* extract min-fresh */ if (!conf->ignorecachecontrol && cache->control_in.min_fresh) { minfresh = cache->control_in.min_fresh_value; } else { minfresh = 0; } /* override maxstale if must-revalidate, proxy-revalidate or s-maxage */ if (maxstale && (h->cache_obj->info.control.must_revalidate || h->cache_obj->info.control.proxy_revalidate || smaxage != -1)) { maxstale = 0; } /* handle expiration */ if (((maxage != -1) && (age < (maxage + maxstale - minfresh))) || ((smaxage == -1) && (maxage == -1) && (info->expire != APR_DATE_BAD) && (age < (apr_time_sec(info->expire - info->date) + maxstale - minfresh)))) { warn_head = apr_table_get(h->resp_hdrs, "Warning"); /* it's fresh darlings... */ /* set age header on response */ apr_table_set(h->resp_hdrs, "Age", apr_psprintf(r->pool, "%lu", (unsigned long)age)); /* add warning if maxstale overrode freshness calculation */ if (!(((maxage != -1) && age < maxage) || (info->expire != APR_DATE_BAD && (apr_time_sec(info->expire - info->date)) > age))) { /* make sure we don't stomp on a previous warning */ if ((warn_head == NULL) || ((warn_head != NULL) && (ap_strstr_c(warn_head, "110") == NULL))) { apr_table_mergen(h->resp_hdrs, "Warning", "110 Response is stale"); } } /* * If none of Expires, Cache-Control: max-age, or Cache-Control: * s-maxage appears in the response, and the response header age * calculated is more than 24 hours add the warning 113 */ if ((maxage_cresp == -1) && (smaxage == -1) && (apr_table_get( h->resp_hdrs, "Expires") == NULL) && (age > 86400)) { /* Make sure we don't stomp on a previous warning, and don't dup * a 113 marning that is already present. Also, make sure to add * the new warning to the correct *headers_out location. */ if ((warn_head == NULL) || ((warn_head != NULL) && (ap_strstr_c(warn_head, "113") == NULL))) { apr_table_mergen(h->resp_hdrs, "Warning", "113 Heuristic expiration"); } } return 1; /* Cache object is fresh (enough) */ } /* * At this point we are stale, but: if we are under load, we may let * a significant number of stale requests through before the first * stale request successfully revalidates itself, causing a sudden * unexpected thundering herd which in turn brings angst and drama. * * So. * * We want the first stale request to go through as normal. But the * second and subsequent request, we must pretend to be fresh until * the first request comes back with either new content or confirmation * that the stale content is still fresh. * * To achieve this, we create a very simple file based lock based on * the key of the cached object. We attempt to open the lock file with * exclusive write access. If we succeed, woohoo! we're first, and we * follow the stale path to the backend server. If we fail, oh well, * we follow the fresh path, and avoid being a thundering herd. * * The lock lives only as long as the stale request that went on ahead. * If the request succeeds, the lock is deleted. If the request fails, * the lock is deleted, and another request gets to make a new lock * and try again. * * At any time, a request marked "no-cache" will force a refresh, * ignoring the lock, ensuring an extended lockout is impossible. * * A lock that exceeds a maximum age will be deleted, and another * request gets to make a new lock and try again. */ status = cache_try_lock(conf, cache, r); if (APR_SUCCESS == status) { /* we obtained a lock, follow the stale path */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00782) "Cache lock obtained for stale cached URL, " "revalidating entry: %s", r->unparsed_uri); return 0; } else if (APR_STATUS_IS_EEXIST(status)) { /* lock already exists, return stale data anyway, with a warning */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(00783) "Cache already locked for stale cached URL, " "pretend it is fresh: %s", r->unparsed_uri); /* make sure we don't stomp on a previous warning */ warn_head = apr_table_get(h->resp_hdrs, "Warning"); if ((warn_head == NULL) || ((warn_head != NULL) && (ap_strstr_c(warn_head, "110") == NULL))) { apr_table_mergen(h->resp_hdrs, "Warning", "110 Response is stale"); } return 1; } else { /* some other error occurred, just treat the object as stale */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(00784) "Attempt to obtain a cache lock for stale " "cached URL failed, revalidating entry anyway: %s", r->unparsed_uri); return 0; } }
PROXY_DECLARE(int) ap_proxy_create_hdrbrgd(apr_pool_t *p, apr_bucket_brigade *header_brigade, request_rec *r, proxy_conn_rec *p_conn, proxy_worker *worker, proxy_server_conf *conf, apr_uri_t *uri, char *url, char *server_portstr, char **old_cl_val, char **old_te_val) { conn_rec *c = r->connection; int counter; char *buf; const apr_array_header_t *headers_in_array; const apr_table_entry_t *headers_in; apr_table_t *headers_in_copy; apr_bucket *e; int do_100_continue; conn_rec *origin = p_conn->connection; proxy_dir_conf *dconf = ap_get_module_config(r->per_dir_config, &proxy_module); /* * To be compliant, we only use 100-Continue for requests with bodies. * We also make sure we won't be talking HTTP/1.0 as well. */ do_100_continue = (worker->ping_timeout_set && !r->header_only && (apr_table_get(r->headers_in, "Content-Length") || apr_table_get(r->headers_in, "Transfer-Encoding")) && (PROXYREQ_REVERSE == r->proxyreq) && !(apr_table_get(r->subprocess_env, "force-proxy-request-1.0"))); if (apr_table_get(r->subprocess_env, "force-proxy-request-1.0")) { /* * According to RFC 2616 8.2.3 we are not allowed to forward an * Expect: 100-continue to an HTTP/1.0 server. Instead we MUST return * a HTTP_EXPECTATION_FAILED */ if (r->expecting_100) { return HTTP_EXPECTATION_FAILED; } buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.0" CRLF, NULL); p_conn->close = 1; } else { buf = apr_pstrcat(p, r->method, " ", url, " HTTP/1.1" CRLF, NULL); } if (apr_table_get(r->subprocess_env, "proxy-nokeepalive")) { origin->keepalive = AP_CONN_CLOSE; p_conn->close = 1; } ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); if (conf->preserve_host == 0) { if (ap_strchr_c(uri->hostname, ':')) { /* if literal IPv6 address */ if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { buf = apr_pstrcat(p, "Host: [", uri->hostname, "]:", uri->port_str, CRLF, NULL); } else { buf = apr_pstrcat(p, "Host: [", uri->hostname, "]", CRLF, NULL); } } else { if (uri->port_str && uri->port != DEFAULT_HTTP_PORT) { buf = apr_pstrcat(p, "Host: ", uri->hostname, ":", uri->port_str, CRLF, NULL); } else { buf = apr_pstrcat(p, "Host: ", uri->hostname, CRLF, NULL); } } } else { /* don't want to use r->hostname, as the incoming header might have a * port attached */ const char* hostname = apr_table_get(r->headers_in,"Host"); if (!hostname) { hostname = r->server->server_hostname; ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "AH01092: " "no HTTP 0.9 request (with no host line) " "on incoming request and preserve host set " "forcing hostname to be %s for uri %s", hostname, r->uri); } buf = apr_pstrcat(p, "Host: ", hostname, CRLF, NULL); } ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); /* handle Via */ if (conf->viaopt == via_block) { /* Block all outgoing Via: headers */ apr_table_unset(r->headers_in, "Via"); } else if (conf->viaopt != via_off) { const char *server_name = ap_get_server_name(r); /* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, * then the server name returned by ap_get_server_name() is the * origin server name (which does make too much sense with Via: headers) * so we use the proxy vhost's name instead. */ if (server_name == r->hostname) server_name = r->server->server_hostname; /* Create a "Via:" request header entry and merge it */ /* Generate outgoing Via: header with/without server comment: */ apr_table_mergen(r->headers_in, "Via", (conf->viaopt == via_full) ? apr_psprintf(p, "%d.%d %s%s (%s)", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, server_portstr, AP_SERVER_BASEVERSION) : apr_psprintf(p, "%d.%d %s%s", HTTP_VERSION_MAJOR(r->proto_num), HTTP_VERSION_MINOR(r->proto_num), server_name, server_portstr) ); } /* Use HTTP/1.1 100-Continue as quick "HTTP ping" test * to backend */ if (do_100_continue) { apr_table_mergen(r->headers_in, "Expect", "100-Continue"); r->expecting_100 = 1; } /* X-Forwarded-*: handling * * XXX Privacy Note: * ----------------- * * These request headers are only really useful when the mod_proxy * is used in a reverse proxy configuration, so that useful info * about the client can be passed through the reverse proxy and on * to the backend server, which may require the information to * function properly. * * In a forward proxy situation, these options are a potential * privacy violation, as information about clients behind the proxy * are revealed to arbitrary servers out there on the internet. * * The HTTP/1.1 Via: header is designed for passing client * information through proxies to a server, and should be used in * a forward proxy configuation instead of X-Forwarded-*. See the * ProxyVia option for details. */ if (PROXYREQ_REVERSE == r->proxyreq) { const char *buf; /* Add X-Forwarded-For: so that the upstream has a chance to * determine, where the original request came from. */ apr_table_mergen(r->headers_in, "X-Forwarded-For", c->remote_ip); /* Add X-Forwarded-Host: so that upstream knows what the * original request hostname was. */ if ((buf = apr_table_get(r->headers_in, "Host"))) { apr_table_mergen(r->headers_in, "X-Forwarded-Host", buf); } /* Add X-Forwarded-Server: so that upstream knows what the * name of this proxy server is (if there are more than one) * XXX: This duplicates Via: - do we strictly need it? */ apr_table_mergen(r->headers_in, "X-Forwarded-Server", r->server->server_hostname); } proxy_run_fixups(r); /* * Make a copy of the headers_in table before clearing the connection * headers as we need the connection headers later in the http output * filter to prepare the correct response headers. * * Note: We need to take r->pool for apr_table_copy as the key / value * pairs in r->headers_in have been created out of r->pool and * p might be (and actually is) a longer living pool. * This would trigger the bad pool ancestry abort in apr_table_copy if * apr is compiled with APR_POOL_DEBUG. */ headers_in_copy = apr_table_copy(r->pool, r->headers_in); proxy_clear_connection(p, headers_in_copy); /* send request headers */ headers_in_array = apr_table_elts(headers_in_copy); headers_in = (const apr_table_entry_t *) headers_in_array->elts; for (counter = 0; counter < headers_in_array->nelts; counter++) { if (headers_in[counter].key == NULL || headers_in[counter].val == NULL /* Already sent */ || !strcasecmp(headers_in[counter].key, "Host") /* Clear out hop-by-hop request headers not to send * RFC2616 13.5.1 says we should strip these headers */ || !strcasecmp(headers_in[counter].key, "Keep-Alive") || !strcasecmp(headers_in[counter].key, "TE") || !strcasecmp(headers_in[counter].key, "Trailer") || !strcasecmp(headers_in[counter].key, "Upgrade") ) { continue; } /* Do we want to strip Proxy-Authorization ? * If we haven't used it, then NO * If we have used it then MAYBE: RFC2616 says we MAY propagate it. * So let's make it configurable by env. */ if (!strcasecmp(headers_in[counter].key,"Proxy-Authorization")) { if (r->user != NULL) { /* we've authenticated */ if (!apr_table_get(r->subprocess_env, "Proxy-Chain-Auth")) { continue; } } } /* Skip Transfer-Encoding and Content-Length for now. */ if (!strcasecmp(headers_in[counter].key, "Transfer-Encoding")) { *old_te_val = headers_in[counter].val; continue; } if (!strcasecmp(headers_in[counter].key, "Content-Length")) { *old_cl_val = headers_in[counter].val; continue; } /* for sub-requests, ignore freshness/expiry headers */ if (r->main) { if ( !strcasecmp(headers_in[counter].key, "If-Match") || !strcasecmp(headers_in[counter].key, "If-Modified-Since") || !strcasecmp(headers_in[counter].key, "If-Range") || !strcasecmp(headers_in[counter].key, "If-Unmodified-Since") || !strcasecmp(headers_in[counter].key, "If-None-Match")) { continue; } } buf = apr_pstrcat(p, headers_in[counter].key, ": ", headers_in[counter].val, CRLF, NULL); ap_xlate_proto_to_ascii(buf, strlen(buf)); e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(header_brigade, e); } return OK; }