static int write_http_response(mapcache_context_apache_request *ctx, mapcache_http_response *response) { request_rec *r = ctx->request; int rc; if(response->mtime) { ap_update_mtime(r, response->mtime); if((rc = ap_meets_conditions(r)) != OK) { return rc; } char *timestr = apr_palloc(r->pool, APR_RFC822_DATE_LEN); apr_rfc822_date(timestr, response->mtime); apr_table_setn(r->headers_out, "Last-Modified", timestr); } if(response->headers && !apr_is_empty_table(response->headers)) { const apr_array_header_t *elts = apr_table_elts(response->headers); int i; for(i=0;i<elts->nelts;i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts,i,apr_table_entry_t); if(!strcasecmp(entry.key,"Content-Type")) { ap_set_content_type(r,entry.val); } else { apr_table_set(r->headers_out, entry.key, entry.val); } } } if(response->data) { ap_set_content_length(r,response->data->size); ap_rwrite((void*)response->data->buf, response->data->size, r); } r->status = response->code; return OK; }
char* mapcache_tileset_metatile_resource_key(mapcache_context *ctx, mapcache_metatile *mt) { char *lockname = apr_psprintf(ctx->pool, "%d-%d-%d-%s", mt->z,mt->y,mt->x, mt->map.tileset->name); /* if the tileset has multiple grids, add the name of the current grid to the lock key*/ if(mt->map.tileset->grid_links->nelts > 1) { lockname = apr_pstrcat(ctx->pool,lockname,mt->map.grid_link->grid->name,NULL); } if(mt->map.dimensions && !apr_is_empty_table(mt->map.dimensions)) { const apr_array_header_t *elts = apr_table_elts(mt->map.dimensions); int i; for(i=0; i<elts->nelts; i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts,i,apr_table_entry_t); char *dimvalue = apr_pstrdup(ctx->pool,entry.val); char *iter = dimvalue; while(*iter) { if(*iter == '/') *iter='_'; iter++; } lockname = apr_pstrcat(ctx->pool,lockname,dimvalue,NULL); } } return lockname; }
/** * Fixup routine for request header processing. * * @params r The request record */ static apr_status_t replace_input_fixup(request_rec *r) { replace_server_t *conf = ap_get_module_config(r->server->module_config, &replace_module); replace_filter_t *filter = (replace_filter_t*)apr_hash_get(conf->h, REQUEST_REPLACE_FILTER, APR_HASH_KEY_STRING); header_replace_pattern_t *pattern; /* Exit if there is no filter definition. */ if (filter == NULL) { return DECLINED; } /* Loop the configured patterns */ for (pattern = filter->header_pattern; pattern != NULL; pattern = pattern->next) { apr_table_t *header_table = apr_table_make(r->pool, 2); header_replace_cb_t *hrcb = apr_palloc(r->pool, sizeof(header_replace_cb_t)); hrcb->header_table = header_table; hrcb->pattern = pattern->pattern; hrcb->extra = pattern->extra; hrcb->replacement = pattern->replacement; hrcb->r = r; apr_table_do(replace_header_cb, hrcb, r->headers_in, pattern->header, NULL); if (!apr_is_empty_table(header_table)) { apr_table_unset(r->headers_in, pattern->header); r->headers_in = apr_table_overlay(r->pool, r->headers_in, header_table); } } return OK; }
char* mapcache_http_build_url(mapcache_context *ctx, char *base, apr_table_t *params) { if(!apr_is_empty_table(params)) { int baseLength; header_cb_struct hcs; baseLength = strlen(base); hcs.pool = ctx->pool; hcs.str = base; if(strchr(base,'?')) { /* base already contains a '?' , shall we be adding a '&' to the end */ if(base[baseLength-1] != '?' && base[baseLength-1] != '&') { hcs.str = apr_pstrcat(ctx->pool, hcs.str, "&", NULL); } } else { /* base does not contain a '?', we will be adding it */ hcs.str = apr_pstrcat(ctx->pool, hcs.str, "?", NULL); } apr_table_do(_mapcache_key_value_append_callback, (void*)&hcs, params, NULL); baseLength = strlen(hcs.str); hcs.str[baseLength-1] = '\0'; return hcs.str; } else { return base; } }
void _mapcache_source_wms_query(mapcache_context *ctx, mapcache_feature_info *fi) { mapcache_map *map = (mapcache_map*)fi; mapcache_source_wms *wms = (mapcache_source_wms*)map->tileset->source; apr_table_t *params = apr_table_clone(ctx->pool,wms->wms_default_params); apr_table_overlap(params,wms->getmap_params,0); apr_table_setn(params,"BBOX",apr_psprintf(ctx->pool,"%f,%f,%f,%f", map->extent.minx,map->extent.miny,map->extent.maxx,map->extent.maxy)); apr_table_setn(params,"REQUEST","GetFeatureInfo"); apr_table_setn(params,"WIDTH",apr_psprintf(ctx->pool,"%d",map->width)); apr_table_setn(params,"HEIGHT",apr_psprintf(ctx->pool,"%d",map->height)); apr_table_setn(params,"SRS",map->grid_link->grid->srs); apr_table_setn(params,"X",apr_psprintf(ctx->pool,"%d",fi->i)); apr_table_setn(params,"Y",apr_psprintf(ctx->pool,"%d",fi->j)); apr_table_setn(params,"INFO_FORMAT",fi->format); apr_table_overlap(params,wms->getfeatureinfo_params,0); if(map->dimensions && !apr_is_empty_table(map->dimensions)) { const apr_array_header_t *elts = apr_table_elts(map->dimensions); int i; for(i=0; i<elts->nelts; i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts,i,apr_table_entry_t); apr_table_setn(params,entry.key,entry.val); } } fi->data = mapcache_buffer_create(30000,ctx->pool); mapcache_http_do_request_with_params(ctx,wms->http,params,fi->data,NULL,NULL); GC_CHECK_ERROR(ctx); }
static authz_status group_check_authorization(request_rec *r, const char *require_args, const void *parsed_require_args) { authz_groupfile_config_rec *conf = ap_get_module_config(r->per_dir_config, &authz_groupfile_module); char *user = r->user; const char *t, *w; apr_table_t *grpstatus = NULL; apr_status_t status; if (!user) { return AUTHZ_DENIED_NO_USER; } /* If there is no group file - then we are not * configured. So decline. */ if (!(conf->groupfile)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01664) "No group file was specified in the configuration"); return AUTHZ_DENIED; } status = groups_for_user(r->pool, user, conf->groupfile, &grpstatus); if (status != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01665) "Could not open group file: %s", conf->groupfile); return AUTHZ_DENIED; } if (apr_is_empty_table(grpstatus)) { /* no groups available, so exit immediately */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01666) "Authorization of user %s to access %s failed, reason: " "user doesn't appear in group file (%s).", r->user, r->uri, conf->groupfile); return AUTHZ_DENIED; } t = require_args; while ((w = ap_getword_conf(r->pool, &t)) && w[0]) { if (apr_table_get(grpstatus, w)) { return AUTHZ_GRANTED; } } ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01667) "Authorization of user %s to access %s failed, reason: " "user is not part of the 'require'ed group(s).", r->user, r->uri); return AUTHZ_DENIED; }
apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool, const char *name, size_t nlen, const char *value, size_t vlen) { apr_status_t status = APR_SUCCESS; if (nlen <= 0) { return status; } if (name[0] == ':') { /* pseudo header, see ch. 8.1.2.3, always should come first */ if (!apr_is_empty_table(req->headers)) { ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool, APLOGNO(02917) "h2_request(%d): pseudo header after request start", req->id); return APR_EGENERAL; } if (H2_HEADER_METHOD_LEN == nlen && !strncmp(H2_HEADER_METHOD, name, nlen)) { req->method = apr_pstrndup(pool, value, vlen); } else if (H2_HEADER_SCHEME_LEN == nlen && !strncmp(H2_HEADER_SCHEME, name, nlen)) { req->scheme = apr_pstrndup(pool, value, vlen); } else if (H2_HEADER_PATH_LEN == nlen && !strncmp(H2_HEADER_PATH, name, nlen)) { req->path = apr_pstrndup(pool, value, vlen); } else if (H2_HEADER_AUTH_LEN == nlen && !strncmp(H2_HEADER_AUTH, name, nlen)) { req->authority = apr_pstrndup(pool, value, vlen); } else { char buffer[32]; memset(buffer, 0, 32); strncpy(buffer, name, (nlen > 31)? 31 : nlen); ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool, APLOGNO(02954) "h2_request(%d): ignoring unknown pseudo header %s", req->id, buffer); } } else { /* non-pseudo header, append to work bucket of stream */ status = add_h1_header(req, pool, name, nlen, value, vlen); } return status; }
static int fixup_env_module(request_rec *r) { env_dir_config_rec *sconf = ap_get_module_config(r->per_dir_config, &env_module); if (apr_is_empty_table(sconf->vars)) { return DECLINED; } r->subprocess_env = apr_table_overlay(r->pool, r->subprocess_env, sconf->vars); return OK; }
/** * \private \memberof mapcache_source_wms * \sa mapcache_source::render_map() */ void _mapcache_source_wms_render_map(mapcache_context *ctx, mapcache_map *map) { mapcache_source_wms *wms = (mapcache_source_wms*)map->tileset->source; mapcache_http *http; apr_table_t *params = apr_table_clone(ctx->pool,wms->wms_default_params); apr_table_setn(params,"BBOX",apr_psprintf(ctx->pool,"%f,%f,%f,%f", map->extent.minx,map->extent.miny,map->extent.maxx,map->extent.maxy)); apr_table_setn(params,"WIDTH",apr_psprintf(ctx->pool,"%d",map->width)); apr_table_setn(params,"HEIGHT",apr_psprintf(ctx->pool,"%d",map->height)); apr_table_setn(params,"FORMAT","image/png"); apr_table_setn(params,"SRS",map->grid_link->grid->srs); apr_table_overlap(params,wms->getmap_params,APR_OVERLAP_TABLES_SET); if(map->dimensions && !apr_is_empty_table(map->dimensions)) { const apr_array_header_t *elts = apr_table_elts(map->dimensions); int i; for(i=0; i<elts->nelts; i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts,i,apr_table_entry_t); /* set both DIM_key=val and key=val KVP params */ apr_table_setn(params,entry.key,entry.val); if(strcasecmp(entry.key,"TIME") && strcasecmp(entry.key,"ELEVATION")) { char *dim_name = apr_pstrcat(ctx->pool,"DIM_",entry.key,NULL); apr_table_setn(params,dim_name,entry.val); } } } /* if the source has no LAYERS parameter defined, then use the tileset name * as the LAYERS to request. When using mirror-mode, the source has no layers * defined, it is added based on the incoming request */ if(!apr_table_get(params,"layers")) { apr_table_set(params,"LAYERS",map->tileset->name); } map->encoded_data = mapcache_buffer_create(30000,ctx->pool); http = mapcache_http_clone(ctx, wms->http); http->url = mapcache_http_build_url(ctx,http->url,params); mapcache_http_do_request(ctx,http,map->encoded_data,NULL,NULL); GC_CHECK_ERROR(ctx); if(!mapcache_imageio_is_valid_format(ctx,map->encoded_data)) { char *returned_data = apr_pstrndup(ctx->pool,(char*)map->encoded_data->buf,map->encoded_data->size); ctx->set_error(ctx, 502, "wms request for tileset %s returned an unsupported format:\n%s", map->tileset->name, returned_data); } }
static void fcgi_write_response(mapcache_context_fcgi *ctx, mapcache_http_response *response) { if(response->code != 200) { printf("Status: %ld %s\r\n",response->code, err_msg(response->code)); } if(response->headers && !apr_is_empty_table(response->headers)) { const apr_array_header_t *elts = apr_table_elts(response->headers); int i; for(i=0; i<elts->nelts; i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts,i,apr_table_entry_t); printf("%s: %s\r\n", entry.key, entry.val); } } if(response->mtime) { char *datestr; char *if_modified_since = getenv("HTTP_IF_MODIFIED_SINCE"); datestr = apr_palloc(ctx->ctx.pool, APR_RFC822_DATE_LEN); apr_rfc822_date(datestr, response->mtime); printf("Last-Modified: %s\r\n", datestr); if(if_modified_since) { apr_time_t ims_time; apr_int64_t ims,mtime; mtime = apr_time_sec(response->mtime); ims_time = apr_date_parse_http(if_modified_since); ims = apr_time_sec(ims_time); if(ims >= mtime) { printf("Status: 304 Not Modified\r\n"); /* * "The 304 response MUST NOT contain a message-body" * https://tools.ietf.org/html/rfc2616#section-10.3.5 */ printf("\r\n"); return; } } } if(response->data) { printf("Content-Length: %ld\r\n\r\n", response->data->size); fwrite((char*)response->data->buf, response->data->size,1,stdout); } }
static void accept_headers(cache_handle_t *h, request_rec *r) { apr_table_t *cookie_table; const char *v; v = apr_table_get(h->resp_hdrs, "Content-Type"); if (v) { ap_set_content_type(r, v); apr_table_unset(h->resp_hdrs, "Content-Type"); } /* If the cache gave us a Last-Modified header, we can't just * pass it on blindly because of restrictions on future values. */ v = apr_table_get(h->resp_hdrs, "Last-Modified"); if (v) { ap_update_mtime(r, apr_date_parse_http(v)); ap_set_last_modified(r); apr_table_unset(h->resp_hdrs, "Last-Modified"); } /* The HTTP specification says that it is legal to merge duplicate * headers into one. Some browsers that support Cookies don't like * merged headers and prefer that each Set-Cookie header is sent * separately. Lets humour those browsers by not merging. * Oh what a pain it is. */ cookie_table = apr_table_make(r->pool, 2); apr_table_do(set_cookie_doo_doo, cookie_table, r->err_headers_out, "Set-Cookie", NULL); apr_table_do(set_cookie_doo_doo, cookie_table, h->resp_hdrs, "Set-Cookie", NULL); apr_table_unset(r->err_headers_out, "Set-Cookie"); apr_table_unset(h->resp_hdrs, "Set-Cookie"); apr_table_overlap(r->headers_out, h->resp_hdrs, APR_OVERLAP_TABLES_SET); apr_table_overlap(r->err_headers_out, h->resp_err_hdrs, APR_OVERLAP_TABLES_SET); if (!apr_is_empty_table(cookie_table)) { r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out, cookie_table); } }
ZEKE_API(apr_table_t*) apr_table_clone(apr_pool_t *pool, const apr_table_t *table) { apr_table_t *new_table; const apr_array_header_t *ta = NULL; int nelts = 0; if(!apr_is_empty_table(table)) { ta = apr_table_elts(table); nelts= ta->nelts; } new_table = apr_table_make(pool,nelts); assert(new_table != NULL); if(!apr_is_empty_array(ta)) { int i; apr_table_entry_t *te = (apr_table_entry_t*)ta->elts; for(i = 0; i < ta->nelts; i++) apr_table_set(new_table,te[i].key,te[i].val); } return new_table; }
apr_status_t h2_stream_close_input(h2_stream *stream) { apr_status_t status = APR_SUCCESS; AP_DEBUG_ASSERT(stream); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->session->c, "h2_stream(%ld-%d): closing input", stream->session->id, stream->id); if (stream->rst_error) { return APR_ECONNRESET; } H2_STREAM_IN(APLOG_TRACE2, stream, "close_pre"); if (close_input(stream) && stream->bbin) { if (stream->request->chunked) { apr_table_t *trailers = stream->request->trailers; if (trailers && !apr_is_empty_table(trailers)) { status = input_add_data(stream, "0\r\n", 3, 0); apr_table_do(input_add_header, stream, trailers, NULL); status = input_add_data(stream, "\r\n", 2, 0); } else { status = input_add_data(stream, "0\r\n\r\n", 5, 0); } } if (status == APR_SUCCESS) { status = h2_stream_input_flush(stream); } if (status == APR_SUCCESS) { status = h2_mplx_in_close(stream->session->mplx, stream->id); } } H2_STREAM_IN(APLOG_TRACE2, stream, "close_post"); return status; }
static int _sqlite_set_pragmas(apr_pool_t *pool, mapcache_cache_sqlite* cache, struct sqlite_conn *conn) { if (cache->pragmas && !apr_is_empty_table(cache->pragmas)) { const apr_array_header_t *elts = apr_table_elts(cache->pragmas); /* FIXME dynamically allocate this string */ int i,ret; char *pragma_stmt; for (i = 0; i < elts->nelts; i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts, i, apr_table_entry_t); pragma_stmt = apr_psprintf(pool,"PRAGMA %s=%s",entry.key,entry.val); do { ret = sqlite3_exec(conn->handle, pragma_stmt, 0, 0, NULL); if (ret != SQLITE_OK && ret != SQLITE_BUSY && ret != SQLITE_LOCKED) { break; } } while (ret == SQLITE_BUSY || ret == SQLITE_LOCKED); if (ret != SQLITE_OK) { conn->errmsg = apr_psprintf(pool,"failed to execute pragma statement %s",pragma_stmt); return MAPCACHE_FAILURE; } } } return MAPCACHE_SUCCESS; }
/** * The output filter routine. This one gets called whenever a response is * generated that passes this filter. Returns APR_SUCCESS if everything works * out. * * @param f The filter definition. * @param bb The bucket brigade containing the data. */ static apr_status_t replace_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) { request_rec *r = f->r; conn_rec *c = r->connection; replace_ctx_t *ctx = f->ctx; apr_bucket *b; apr_size_t len; const char *data; const char *header; apr_status_t rv; int re_vector[RE_VECTOR_SIZE]; // 3 elements per matched pattern replace_pattern_t *next; header_replace_pattern_t *next_header; int modified = 0; // flag to determine if a replacement has // occured. if (!ctx) { /* Initialize context */ ctx = apr_pcalloc(f->r->pool, sizeof(replace_ctx_t)); f->ctx = ctx; ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc); } /* parse config settings */ /* look for the user-defined filter */ ctx->filter = find_filter_def(f->r->server, f->frec->name); if (!ctx->filter) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, "couldn't find definition of filter '%s'", f->frec->name); return APR_EINVAL; } ctx->p = f->r->pool; if (ctx->filter->intype && ctx->filter->intype != INTYPE_ALL) { if (!f->r->content_type) { ctx->noop = 1; } else { const char *ctypes = f->r->content_type; const char *ctype = ap_getword(f->r->pool, &ctypes, ';'); if (strcasecmp(ctx->filter->intype, ctype)) { /* wrong IMT for us; don't mess with the output */ ctx->noop = 1; } } } /* exit immediately if there are indications that the filter shouldn't be * executed. */ if (ctx->noop == 1) { ap_pass_brigade(f->next, bb); return APR_SUCCESS; } /** * Loop through the configured header patterns. */ for (next_header = ctx->filter->header_pattern; next_header != NULL; next_header = next_header->next) { // create a separate table with the requested HTTP header entries and // unset those headers in the original request. apr_table_t *header_table; header_table = apr_table_make(r->pool, 2); // create a data structure for the callback function header_replace_cb_t *hrcb; hrcb = apr_palloc(r->pool, sizeof(header_replace_cb_t)); hrcb->header_table = header_table; hrcb->pattern = next_header->pattern; hrcb->extra = next_header->extra; hrcb->replacement = next_header->replacement; hrcb->r = r; // pass any header that is defined to be processed to the callback // function and unset those headers in the original outgoing record. apr_table_do(replace_header_cb, hrcb, r->headers_out, next_header->header, NULL); // only touch the header if the changed header table is not empty. if (!apr_is_empty_table(header_table)) { apr_table_unset(r->headers_out, next_header->header); // overlay the original header table with the new one to reintegrate // the changed headers. r->headers_out = apr_table_overlay(r->pool, r->headers_out, header_table); } } /* Not nice but neccessary: Unset the ETag , because we cannot adjust the * value correctly, because we do not know how. */ apr_table_unset(f->r->headers_out, "ETag"); int eos = 0; // flag to check if an EOS bucket is in the brigade. apr_bucket *eos_bucket; // Backup for the EOS bucket. /* Interate through the available data. Stop if there is an EOS */ for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) { if (APR_BUCKET_IS_EOS(b)) { eos = 1; ap_save_brigade(f, &ctx->bb, &bb, ctx->p); APR_BUCKET_REMOVE(b); eos_bucket = b; break; } } /* If the iteration over the brigade hasn't found an EOS bucket, just save * the brigade and return. */ if (eos != 1) { ap_save_brigade(f, &ctx->bb, &bb, ctx->p); return APR_SUCCESS; } if ((rv = apr_brigade_pflatten(ctx->bb, (char **)&data, &len, ctx->p)) != APR_SUCCESS) { /* Return if the flattening didn't work. */ return rv; } else { /* Remove the original data from the bucket brigade. Otherwise it would * be passed twice (original data and the processed, flattened copy) to * the next filter. */ apr_brigade_cleanup(ctx->bb); } /* Good cast, we just tested len isn't negative or zero */ if (len > 0) { /* start checking for the regex's. */ for (next = ctx->filter->pattern; next != NULL; next = next->next) { int rc = 0; int offset = 0; /* loop through the configured patterns */ do { rc = pcre_exec(next->pattern, next->extra, data, len, offset, 0, re_vector, RE_VECTOR_SIZE); if (rc < 0 && rc != PCRE_ERROR_NOMATCH) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, "Matching Error %d", rc); return rc; } /* This shouldn´t happen */ if (rc == 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, "PCRE output vector too small (%d)", RE_VECTOR_SIZE/3-1); } /* If the result count is greater than 0 then there are * matches in the data string. Thus we try to replace those * strings with the user provided string. */ if (rc > 0) { char *prefix; // the string before the matching part. char *postfix; // the string after the matching part. char *newdata; // the concatenated string of prefix, // the replaced string and postfix. char *replacement; // the string with the data to replace // (after the subpattern processing has // been done). char *to_replace[10]; // the string array containing the // strings that are to be replaced. int match_diff; // the difference between the matching // string and its replacement. int x; // a simple counter. char *pos; // the starting position within the // replacement string, where there is a // subpattern to replace. /* start with building the replacement string */ replacement = apr_pstrcat(ctx->p, next->replacement, NULL); /* look for the subpatterns \0 to \9 */ for (x = 0; x < rc && x < 10; x++) { /* extract the x'ths subpattern */ to_replace[x] = substr(data, re_vector[x*2], re_vector[x*2+1] - re_vector[x*2], r); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "Found match: %s", to_replace[x]); /* the token ( \0 to \9) we are looking for */ char *token = apr_pstrcat(ctx->p, "\\", apr_itoa(ctx->p, x), NULL); /* allocate memory for the replacement operation */ char *tmp; if (!to_replace[x] || strlen(to_replace[x]) < 2) { tmp = malloc(strlen(replacement) + 1); } else { tmp = malloc(strlen(replacement) - 1 + strlen(to_replace[x])); } /* copy the replacement string to the new * location. */ memcpy(tmp, replacement, strlen(replacement) + 1); replacement = tmp; /* try to replace each occurence of the token with * its matched subpattern. */ pos = ap_strstr(replacement, token); while (pos) { if (!to_replace[x]) { break; } substr_replace(pos, to_replace[x], strlen(pos), strlen(to_replace[x])); if (strlen(to_replace[x]) < 2) { tmp = malloc(strlen(replacement) + 1); } else { tmp = malloc(strlen(replacement) - 1 + strlen(to_replace[x])); } memcpy(tmp, replacement, strlen(replacement) + 1); /* clean up. */ free(replacement); replacement = tmp; pos = ap_strstr(replacement, token); } } match_diff = strlen(replacement) - (re_vector[1] - re_vector[0]); /* Allocate memory for a buffer to copy the first part * of the data string up to (but not including) the * the matching pattern. */ prefix = apr_pcalloc(ctx->p, re_vector[0] + 1); if (prefix == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Unable to allocate memory for prefix", NULL); return -1; } /* Copy the string from the offset (beginning of * pattern matching) to the first occurence of the * pattern and add a trailing \0. */ memcpy(prefix, data, (size_t)re_vector[0]); /* Copy the string from the end of the pattern to the * end of the data string itself. */ postfix = apr_pcalloc(ctx->p, len); if (postfix == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Unable to allocate memory for postfix", NULL); return -1; } memcpy(postfix, (data + re_vector[1]), len - re_vector[1]); /* Create the new data string, replace the old one * and clean up. */ newdata = apr_pstrcat(ctx->p, prefix, replacement, postfix, NULL); /* update the point of the data and free the allocated * memory for the replacement string. */ data = newdata; free(replacement); /* Calculate the new offset in the data string, where * the new matching round is to begin. */ offset = re_vector[1] + match_diff; len += match_diff; modified = 1; } } while (rc > 0); } /* Adjust the real length of the processed data. */ if (apr_table_get(f->r->headers_out, "Content-Length") != NULL) { apr_table_set(f->r->headers_out, "Content-Length", apr_itoa(ctx->p, len)); } /* If an Entity Tag is set, change the mtime and generate a new ETag.*/ if (apr_table_get(f->r->headers_out, "ETag") != NULL) { r->mtime = time(NULL); ap_set_etag(r); } } /* Create a new bucket with the processed data, insert that one into our * brigade, then insert the saved EOS bucket at the end of the brigade * and pass the brigade to the next filter. */ APR_BRIGADE_INSERT_TAIL(ctx->bb, apr_bucket_transient_create(data, len, apr_bucket_alloc_create(ctx->p))); APR_BRIGADE_INSERT_TAIL(ctx->bb, eos_bucket); ap_pass_brigade(f->next, ctx->bb); return APR_SUCCESS; }
AP_DECLARE(int) ap_scan_script_header_err_core(request_rec *r, char *buffer, int (*getsfunc) (char *, int, void *), void *getsfunc_data) { char x[MAX_STRING_LEN]; char *w, *l; int p; int cgi_status = HTTP_UNSET; apr_table_t *merge; apr_table_t *cookie_table; if (buffer) { *buffer = '\0'; } w = buffer ? buffer : x; /* temporary place to hold headers to merge in later */ merge = apr_table_make(r->pool, 10); /* The HTTP specification says that it is legal to merge duplicate * headers into one. Some browsers that support Cookies don't like * merged headers and prefer that each Set-Cookie header is sent * separately. Lets humour those browsers by not merging. * Oh what a pain it is. */ cookie_table = apr_table_make(r->pool, 2); apr_table_do(set_cookie_doo_doo, cookie_table, r->err_headers_out, "Set-Cookie", NULL); while (1) { int rv = (*getsfunc) (w, MAX_STRING_LEN - 1, getsfunc_data); if (rv == 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "Premature end of script headers: %s", apr_filepath_name_get(r->filename)); return HTTP_INTERNAL_SERVER_ERROR; } else if (rv == -1) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "Script timed out before returning headers: %s", apr_filepath_name_get(r->filename)); return HTTP_GATEWAY_TIME_OUT; } /* Delete terminal (CR?)LF */ p = strlen(w); /* Indeed, the host's '\n': '\012' for UNIX; '\015' for MacOS; '\025' for OS/390 -- whatever the script generates. */ if (p > 0 && w[p - 1] == '\n') { if (p > 1 && w[p - 2] == CR) { w[p - 2] = '\0'; } else { w[p - 1] = '\0'; } } /* * If we've finished reading the headers, check to make sure any * HTTP/1.1 conditions are met. If so, we're done; normal processing * will handle the script's output. If not, just return the error. * The appropriate thing to do would be to send the script process a * SIGPIPE to let it know we're ignoring it, close the channel to the * script process, and *then* return the failed-to-meet-condition * error. Otherwise we'd be waiting for the script to finish * blithering before telling the client the output was no good. * However, we don't have the information to do that, so we have to * leave it to an upper layer. */ if (w[0] == '\0') { int cond_status = OK; /* PR#38070: This fails because it gets confused when a * CGI Status header overrides ap_meets_conditions. * * We can fix that by dropping ap_meets_conditions when * Status has been set. Since this is the only place * cgi_status gets used, let's test it explicitly. * * The alternative would be to ignore CGI Status when * ap_meets_conditions returns anything interesting. * That would be safer wrt HTTP, but would break CGI. */ if ((cgi_status == HTTP_UNSET) && (r->method_number == M_GET)) { cond_status = ap_meets_conditions(r); } apr_table_overlap(r->err_headers_out, merge, APR_OVERLAP_TABLES_MERGE); if (!apr_is_empty_table(cookie_table)) { /* the cookies have already been copied to the cookie_table */ apr_table_unset(r->err_headers_out, "Set-Cookie"); r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out, cookie_table); } return cond_status; } /* if we see a bogus header don't ignore it. Shout and scream */ #if APR_CHARSET_EBCDIC /* Chances are that we received an ASCII header text instead of * the expected EBCDIC header lines. Try to auto-detect: */ if (!(l = strchr(w, ':'))) { int maybeASCII = 0, maybeEBCDIC = 0; unsigned char *cp, native; apr_size_t inbytes_left, outbytes_left; for (cp = w; *cp != '\0'; ++cp) { native = apr_xlate_conv_byte(ap_hdrs_from_ascii, *cp); if (apr_isprint(*cp) && !apr_isprint(native)) ++maybeEBCDIC; if (!apr_isprint(*cp) && apr_isprint(native)) ++maybeASCII; } if (maybeASCII > maybeEBCDIC) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, "CGI Interface Error: Script headers apparently ASCII: (CGI = %s)", r->filename); inbytes_left = outbytes_left = cp - w; apr_xlate_conv_buffer(ap_hdrs_from_ascii, w, &inbytes_left, w, &outbytes_left); } } #endif /*APR_CHARSET_EBCDIC*/ if (!(l = strchr(w, ':'))) { char malformed[(sizeof MALFORMED_MESSAGE) + 1 + MALFORMED_HEADER_LENGTH_TO_SHOW]; strcpy(malformed, MALFORMED_MESSAGE); strncat(malformed, w, MALFORMED_HEADER_LENGTH_TO_SHOW); if (!buffer) { /* Soak up all the script output - may save an outright kill */ while ((*getsfunc) (w, MAX_STRING_LEN - 1, getsfunc_data)) { continue; } } ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "%s: %s", malformed, apr_filepath_name_get(r->filename)); return HTTP_INTERNAL_SERVER_ERROR; } *l++ = '\0'; while (*l && apr_isspace(*l)) { ++l; } if (!strcasecmp(w, "Content-type")) { char *tmp; /* Nuke trailing whitespace */ char *endp = l + strlen(l) - 1; while (endp > l && apr_isspace(*endp)) { *endp-- = '\0'; } tmp = apr_pstrdup(r->pool, l); ap_content_type_tolower(tmp); ap_set_content_type(r, tmp); } /* * If the script returned a specific status, that's what * we'll use - otherwise we assume 200 OK. */ else if (!strcasecmp(w, "Status")) { r->status = cgi_status = atoi(l); r->status_line = apr_pstrdup(r->pool, l); } else if (!strcasecmp(w, "Location")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Content-Length")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Content-Range")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Transfer-Encoding")) { apr_table_set(r->headers_out, w, l); } /* * If the script gave us a Last-Modified header, we can't just * pass it on blindly because of restrictions on future values. */ else if (!strcasecmp(w, "Last-Modified")) { ap_update_mtime(r, apr_date_parse_http(l)); ap_set_last_modified(r); } else if (!strcasecmp(w, "Set-Cookie")) { apr_table_add(cookie_table, w, l); } else { apr_table_add(merge, w, l); } } return OK; }
static void ngx_http_mapcache_write_response(mapcache_context *ctx, ngx_http_request_t *r, mapcache_http_response *response) { if(response->mtime) { time_t if_modified_since; if(r->headers_in.if_modified_since) { if_modified_since = ngx_http_parse_time(r->headers_in.if_modified_since->value.data, r->headers_in.if_modified_since->value.len); if (if_modified_since != NGX_ERROR) { apr_time_t apr_if_m_s; apr_time_ansi_put ( &apr_if_m_s, if_modified_since); if(apr_if_m_s<response->mtime) { r->headers_out.status = NGX_HTTP_NOT_MODIFIED; ngx_http_send_header(r); return; } } } char *datestr; datestr = apr_palloc(ctx->pool, APR_RFC822_DATE_LEN); apr_rfc822_date(datestr, response->mtime); apr_table_setn(response->headers,"Last-Modified",datestr); } if(response->headers && !apr_is_empty_table(response->headers)) { const apr_array_header_t *elts = apr_table_elts(response->headers); int i; for(i=0;i<elts->nelts;i++) { apr_table_entry_t entry = APR_ARRAY_IDX(elts,i,apr_table_entry_t); if(!strcasecmp(entry.key,"Content-Type")) { r->headers_out.content_type.len = strlen(entry.val); r->headers_out.content_type.data = (u_char*)entry.val; } else { ngx_table_elt_t *h; h = ngx_list_push(&r->headers_out.headers); if (h == NULL) { return; } h->key.len = strlen(entry.key) ; h->key.data = (u_char*)entry.key ; h->value.len = strlen(entry.val) ; h->value.data = (u_char*)entry.val ; h->hash = 1; } } } if(response->data) { r->headers_out.content_length_n = response->data->size; } int rc; r->headers_out.status = response->code; rc = ngx_http_send_header(r); if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) { return; } if(response->data) { ngx_buf_t *b; ngx_chain_t out; b = ngx_pcalloc(r->pool, sizeof(ngx_buf_t)); if (b == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "Failed to allocate response buffer."); return; } b->pos = ngx_pcalloc(r->pool,response->data->size); memcpy(b->pos,response->data->buf,response->data->size); b->last = b->pos + response->data->size; b->memory = 1; b->last_buf = 1; b->flush = 1; out.buf = b; out.next = NULL; ngx_http_output_filter(r, &out); } }
AP_DECLARE(void) ap_add_common_vars(request_rec *r) { apr_table_t *e; server_rec *s = r->server; conn_rec *c = r->connection; const char *rem_logname; char *env_path; #if defined(WIN32) || defined(OS2) || defined(BEOS) char *env_temp; #endif const char *host; const apr_array_header_t *hdrs_arr = apr_table_elts(r->headers_in); const apr_table_entry_t *hdrs = (const apr_table_entry_t *) hdrs_arr->elts; int i; apr_port_t rport; /* use a temporary apr_table_t which we'll overlap onto * r->subprocess_env later * (exception: if r->subprocess_env is empty at the start, * write directly into it) */ if (apr_is_empty_table(r->subprocess_env)) { e = r->subprocess_env; } else { e = apr_table_make(r->pool, 25 + hdrs_arr->nelts); } /* First, add environment vars from headers... this is as per * CGI specs, though other sorts of scripting interfaces see * the same vars... */ for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) { continue; } /* A few headers are special cased --- Authorization to prevent * rogue scripts from capturing passwords; content-type and -length * for no particular reason. */ if (!strcasecmp(hdrs[i].key, "Content-type")) { apr_table_addn(e, "CONTENT_TYPE", hdrs[i].val); } else if (!strcasecmp(hdrs[i].key, "Content-length")) { apr_table_addn(e, "CONTENT_LENGTH", hdrs[i].val); } /* * You really don't want to disable this check, since it leaves you * wide open to CGIs stealing passwords and people viewing them * in the environment with "ps -e". But, if you must... */ #ifndef SECURITY_HOLE_PASS_AUTHORIZATION else if (!strcasecmp(hdrs[i].key, "Authorization") || !strcasecmp(hdrs[i].key, "Proxy-Authorization")) { continue; } #endif else { apr_table_addn(e, http2env(r->pool, hdrs[i].key), hdrs[i].val); } } if (!(env_path = getenv("PATH"))) { env_path = DEFAULT_PATH; } apr_table_addn(e, "PATH", apr_pstrdup(r->pool, env_path)); #ifdef WIN32 if ((env_temp = getenv("SystemRoot")) != NULL) { apr_table_addn(e, "SystemRoot", env_temp); } if ((env_temp = getenv("COMSPEC")) != NULL) { apr_table_addn(e, "COMSPEC", env_temp); } if ((env_temp = getenv("PATHEXT")) != NULL) { apr_table_addn(e, "PATHEXT", env_temp); } if ((env_temp = getenv("WINDIR")) != NULL) { apr_table_addn(e, "WINDIR", env_temp); } #endif #ifdef OS2 if ((env_temp = getenv("COMSPEC")) != NULL) { apr_table_addn(e, "COMSPEC", env_temp); } if ((env_temp = getenv("ETC")) != NULL) { apr_table_addn(e, "ETC", env_temp); } if ((env_temp = getenv("DPATH")) != NULL) { apr_table_addn(e, "DPATH", env_temp); } if ((env_temp = getenv("PERLLIB_PREFIX")) != NULL) { apr_table_addn(e, "PERLLIB_PREFIX", env_temp); } #endif #ifdef BEOS if ((env_temp = getenv("LIBRARY_PATH")) != NULL) { apr_table_addn(e, "LIBRARY_PATH", env_temp); } #endif apr_table_addn(e, "SERVER_SIGNATURE", ap_psignature("", r)); apr_table_addn(e, "SERVER_SOFTWARE", ap_get_server_banner()); apr_table_addn(e, "SERVER_NAME", ap_escape_html(r->pool, ap_get_server_name(r))); apr_table_addn(e, "SERVER_ADDR", r->connection->local_ip); /* Apache */ apr_table_addn(e, "SERVER_PORT", apr_psprintf(r->pool, "%u", ap_get_server_port(r))); host = ap_get_remote_host(c, r->per_dir_config, REMOTE_HOST, NULL); if (host) { apr_table_addn(e, "REMOTE_HOST", host); } apr_table_addn(e, "REMOTE_ADDR", c->remote_ip); apr_table_addn(e, "DOCUMENT_ROOT", ap_document_root(r)); /* Apache */ apr_table_addn(e, "SERVER_ADMIN", s->server_admin); /* Apache */ apr_table_addn(e, "SCRIPT_FILENAME", r->filename); /* Apache */ rport = c->remote_addr->port; apr_table_addn(e, "REMOTE_PORT", apr_itoa(r->pool, rport)); if (r->user) { apr_table_addn(e, "REMOTE_USER", r->user); } else if (r->prev) { request_rec *back = r->prev; while (back) { if (back->user) { apr_table_addn(e, "REDIRECT_REMOTE_USER", back->user); break; } back = back->prev; } } if (r->ap_auth_type) { apr_table_addn(e, "AUTH_TYPE", r->ap_auth_type); } rem_logname = ap_get_remote_logname(r); if (rem_logname) { apr_table_addn(e, "REMOTE_IDENT", apr_pstrdup(r->pool, rem_logname)); } /* Apache custom error responses. If we have redirected set two new vars */ if (r->prev) { if (r->prev->args) { apr_table_addn(e, "REDIRECT_QUERY_STRING", r->prev->args); } if (r->prev->uri) { apr_table_addn(e, "REDIRECT_URL", r->prev->uri); } } if (e != r->subprocess_env) { apr_table_overlap(r->subprocess_env, e, APR_OVERLAP_TABLES_SET); } }
/* Process authentication request from Apache*/ static int pg_authenticate_basic_user(request_rec * r) { pg_auth_config_rec *sec = (pg_auth_config_rec *) ap_get_module_config(r->per_dir_config, &auth_pgsql_module); char *val = NULL; char *sent_pw, *real_pw; int res; char *user; if ((res = ap_get_basic_auth_pw(r, (const char **) &sent_pw))) return res; user = r->user; #ifdef DEBUG_AUTH_PGSQL ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "[mod_auth_pgsql.c] - pg_authenticate_basic_user - going to auth user \"%s\" pass \"%s\" uri \"%s\"", user, sent_pw, r->unparsed_uri); #endif /* DEBUG_AUTH_PGSQL */ /* if *password* checking is configured in any way, i.e. then * handle it, if not decline and leave it to the next in line.. * We do not check on dbase, group, userid or host name, as it is * perfectly possible to only do group control and leave * user control to the next guy in line. */ if ((!sec->auth_pg_pwd_table) && (!sec->auth_pg_pwd_field)) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "[mod_auth_pgsql.c] - missing configuration parameters"); return DECLINED; } pg_errstr[0] = '\0'; if (sec->auth_pg_cache_passwords && (!apr_is_empty_table(sec->cache_pass_table))) { val = (char *) apr_table_get(sec->cache_pass_table, user); if (val) real_pw = val; else real_pw = get_pg_pw(r, user, sec); } else real_pw = get_pg_pw(r, user, sec); if (!real_pw) { if (pg_errstr[0]) { res = HTTP_INTERNAL_SERVER_ERROR; } else { if (sec->auth_pg_authoritative) { /* force error and access denied */ apr_snprintf(pg_errstr, MAX_STRING_LEN, "mod_auth_pgsql: Password for user %s not found (PG-Authoritative)", user); ap_note_basic_auth_failure(r); res = HTTP_UNAUTHORIZED; } else { /* allow fall through to another module */ return DECLINED; } } ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "[mod_auth_pgsql.c] - ERROR - %s", pg_errstr); return res; } /* allow no password, if the flag is set and the password * is empty. But be sure to log this. */ if ((sec->auth_pg_nopasswd) && (!strlen(real_pw))) { apr_snprintf(pg_errstr, MAX_STRING_LEN, "[mod_auth_pgsql.c] - Empty password accepted for user \"%s\"", user); ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "[mod_auth_pgsql.c] - ERROR - %s", pg_errstr); pg_log_auth_user(r, sec, user, sent_pw); return OK; }; /* if the flag is off however, keep that kind of stuff at * an arms length. */ if ((!strlen(real_pw)) || (!strlen(sent_pw))) { apr_snprintf(pg_errstr, MAX_STRING_LEN, "[mod_auth_pgsql.c] - Empty password rejected for user \"%s\"", user); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "[mod_auth_pgsql.c] - ERROR - %s", pg_errstr); ap_note_basic_auth_failure(r); return HTTP_UNAUTHORIZED; }; if (sec->auth_pg_encrypted) switch (sec->auth_pg_hash_type) { case AUTH_PG_HASH_TYPE_MD5: sent_pw = auth_pg_md5(sent_pw); break; case AUTH_PG_HASH_TYPE_CRYPT: sent_pw = (char *) crypt(sent_pw, real_pw); break; case AUTH_PG_HASH_TYPE_BASE64: sent_pw = auth_pg_base64(sent_pw); break; } if (sec->auth_pg_hash_type == AUTH_PG_HASH_TYPE_NETEPI) { char *netepi_pw; if (netepi_pw = netepi_pwd_check(sec, r, real_pw, sent_pw)) goto netepi_ok; apr_snprintf(pg_errstr, MAX_STRING_LEN, "PG user %s: password mismatch", user); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "[mod_auth_pgsql.c] - ERROR - %s", pg_errstr); ap_note_basic_auth_failure(r); return HTTP_UNAUTHORIZED; netepi_ok: sent_pw = netepi_pw; } else if ((sec->auth_pg_hash_type == AUTH_PG_HASH_TYPE_MD5 || sec->auth_pg_hash_type == AUTH_PG_HASH_TYPE_BASE64 || sec->auth_pg_pwdignorecase) ? strcasecmp(real_pw, sent_pw) : strcmp(real_pw, sent_pw)) { apr_snprintf(pg_errstr, MAX_STRING_LEN, "PG user %s: password mismatch", user); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "[mod_auth_pgsql.c] - ERROR - %s", pg_errstr); ap_note_basic_auth_failure(r); return HTTP_UNAUTHORIZED; } /* store password in the cache */ if (sec->auth_pg_cache_passwords && !val && sec->cache_pass_table) { if ((apr_table_elts(sec->cache_pass_table))->nelts >= MAX_TABLE_LEN) { apr_table_clear(sec->cache_pass_table); } apr_table_set(sec->cache_pass_table, user, real_pw); } pg_log_auth_user(r, sec, user, sent_pw); return OK; }
/* Drive EDITOR to affect the change represented by OPERATION. HEAD is the last-known youngest revision in the repository. */ static svn_error_t * drive(struct operation *operation, svn_revnum_t head, const svn_delta_editor_t *editor, apr_pool_t *pool) { apr_pool_t *subpool = svn_pool_create(pool); apr_hash_index_t *hi; struct driver_state state; for (hi = apr_hash_first(pool, operation->children); hi; hi = apr_hash_next(hi)) { const void *key; void *val; struct operation *child; void *file_baton = NULL; svn_pool_clear(subpool); apr_hash_this(hi, &key, NULL, &val); child = val; /* Deletes and replacements are simple -- delete something. */ if (child->operation == OP_DELETE || child->operation == OP_REPLACE) { SVN_ERR(editor->delete_entry(key, head, operation->baton, subpool)); } /* Opens could be for directories or files. */ if (child->operation == OP_OPEN) { if (child->kind == svn_node_dir) { SVN_ERR(editor->open_directory(key, operation->baton, head, subpool, &child->baton)); } else { SVN_ERR(editor->open_file(key, operation->baton, head, subpool, &file_baton)); } } /* Adds and replacements could also be for directories or files. */ if (child->operation == OP_ADD || child->operation == OP_REPLACE || child->operation == OP_PROPSET) { if (child->kind == svn_node_dir) { SVN_ERR(editor->add_directory(key, operation->baton, child->url, child->rev, subpool, &child->baton)); } else { SVN_ERR(editor->add_file(key, operation->baton, child->url, child->rev, subpool, &file_baton)); } } /* If there's a source file and an open file baton, we get to change textual contents. */ if ((child->src_file) && (file_baton)) { svn_txdelta_window_handler_t handler; void *handler_baton; svn_stream_t *contents; apr_file_t *f = NULL; SVN_ERR(editor->apply_textdelta(file_baton, NULL, subpool, &handler, &handler_baton)); if (strcmp(child->src_file, "-")) { SVN_ERR(svn_io_file_open(&f, child->src_file, APR_READ, APR_OS_DEFAULT, pool)); } else { apr_status_t apr_err = apr_file_open_stdin(&f, pool); if (apr_err) return svn_error_wrap_apr(apr_err, "Can't open stdin"); } contents = svn_stream_from_aprfile(f, pool); SVN_ERR(svn_txdelta_send_stream(contents, handler, handler_baton, NULL, pool)); SVN_ERR(svn_io_file_close(f, pool)); } /* If we opened a file, we need to apply outstanding propmods, then close it. */ if (file_baton) { if ((child->kind == svn_node_file) && (! apr_is_empty_table(child->props))) { state.baton = file_baton; state.pool = subpool; state.editor = editor; state.kind = child->kind; if (! apr_table_do(set_props, &state, child->props, NULL)) SVN_ERR(state.err); } SVN_ERR(editor->close_file(file_baton, NULL, subpool)); } /* If we opened, added, or replaced a directory, we need to recurse, apply outstanding propmods, and then close it. */ if ((child->kind == svn_node_dir) && (child->operation == OP_OPEN || child->operation == OP_ADD || child->operation == OP_REPLACE)) { SVN_ERR(drive(child, head, editor, subpool)); if ((child->kind == svn_node_dir) && (! apr_is_empty_table(child->props))) { state.baton = child->baton; state.pool = subpool; state.editor = editor; state.kind = child->kind; if (! apr_table_do(set_props, &state, child->props, NULL)) SVN_ERR(state.err); } SVN_ERR(editor->close_directory(child->baton, subpool)); } } svn_pool_destroy(subpool); return SVN_NO_ERROR; }
AP_DECLARE(void) ap_add_common_vars(request_rec *r) { apr_table_t *e; server_rec *s = r->server; conn_rec *c = r->connection; core_dir_config *conf = (core_dir_config *)ap_get_core_module_config(r->per_dir_config); const char *env_temp; const apr_array_header_t *hdrs_arr = apr_table_elts(r->headers_in); const apr_table_entry_t *hdrs = (const apr_table_entry_t *) hdrs_arr->elts; int i; apr_port_t rport; char *q; /* use a temporary apr_table_t which we'll overlap onto * r->subprocess_env later * (exception: if r->subprocess_env is empty at the start, * write directly into it) */ if (apr_is_empty_table(r->subprocess_env)) { e = r->subprocess_env; } else { e = apr_table_make(r->pool, 25 + hdrs_arr->nelts); } /* First, add environment vars from headers... this is as per * CGI specs, though other sorts of scripting interfaces see * the same vars... */ for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) { continue; } /* A few headers are special cased --- Authorization to prevent * rogue scripts from capturing passwords; content-type and -length * for no particular reason. */ if (!strcasecmp(hdrs[i].key, "Content-type")) { apr_table_addn(e, "CONTENT_TYPE", hdrs[i].val); } else if (!strcasecmp(hdrs[i].key, "Content-length")) { apr_table_addn(e, "CONTENT_LENGTH", hdrs[i].val); } /* * You really don't want to disable this check, since it leaves you * wide open to CGIs stealing passwords and people viewing them * in the environment with "ps -e". But, if you must... */ #ifndef SECURITY_HOLE_PASS_AUTHORIZATION else if (!strcasecmp(hdrs[i].key, "Authorization") || !strcasecmp(hdrs[i].key, "Proxy-Authorization")) { if (conf->cgi_pass_auth == AP_CGI_PASS_AUTH_ON) { add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); } } #endif else add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); } env_temp = apr_table_get(r->subprocess_env, "PATH"); if (env_temp == NULL) { env_temp = getenv("PATH"); } if (env_temp == NULL) { env_temp = DEFAULT_PATH; } apr_table_addn(e, "PATH", apr_pstrdup(r->pool, env_temp)); #if defined(WIN32) env2env(e, "SystemRoot"); env2env(e, "COMSPEC"); env2env(e, "PATHEXT"); env2env(e, "WINDIR"); #elif defined(OS2) env2env(e, "COMSPEC"); env2env(e, "ETC"); env2env(e, "DPATH"); env2env(e, "PERLLIB_PREFIX"); #elif defined(BEOS) env2env(e, "LIBRARY_PATH"); #elif defined(DARWIN) env2env(e, "DYLD_LIBRARY_PATH"); #elif defined(_AIX) env2env(e, "LIBPATH"); #elif defined(__HPUX__) /* HPUX PARISC 2.0W knows both, otherwise redundancy is harmless */ env2env(e, "SHLIB_PATH"); env2env(e, "LD_LIBRARY_PATH"); #else /* Some Unix */ env2env(e, "LD_LIBRARY_PATH"); #endif apr_table_addn(e, "SERVER_SIGNATURE", ap_psignature("", r)); apr_table_addn(e, "SERVER_SOFTWARE", ap_get_server_banner()); apr_table_addn(e, "SERVER_NAME", ap_escape_html(r->pool, ap_get_server_name_for_url(r))); apr_table_addn(e, "SERVER_ADDR", r->connection->local_ip); /* Apache */ apr_table_addn(e, "SERVER_PORT", apr_psprintf(r->pool, "%u", ap_get_server_port(r))); add_unless_null(e, "REMOTE_HOST", ap_get_remote_host(c, r->per_dir_config, REMOTE_HOST, NULL)); apr_table_addn(e, "REMOTE_ADDR", r->useragent_ip); apr_table_addn(e, "DOCUMENT_ROOT", ap_document_root(r)); /* Apache */ apr_table_setn(e, "REQUEST_SCHEME", ap_http_scheme(r)); apr_table_addn(e, "CONTEXT_PREFIX", ap_context_prefix(r)); apr_table_addn(e, "CONTEXT_DOCUMENT_ROOT", ap_context_document_root(r)); apr_table_addn(e, "SERVER_ADMIN", s->server_admin); /* Apache */ if (apr_table_get(r->notes, "proxy-noquery") && (q = ap_strchr(r->filename, '?'))) { *q = '\0'; apr_table_addn(e, "SCRIPT_FILENAME", apr_pstrdup(r->pool, r->filename)); *q = '?'; } else { apr_table_addn(e, "SCRIPT_FILENAME", r->filename); /* Apache */ } rport = c->client_addr->port; apr_table_addn(e, "REMOTE_PORT", apr_itoa(r->pool, rport)); if (r->user) { apr_table_addn(e, "REMOTE_USER", r->user); } else if (r->prev) { request_rec *back = r->prev; while (back) { if (back->user) { apr_table_addn(e, "REDIRECT_REMOTE_USER", back->user); break; } back = back->prev; } } add_unless_null(e, "AUTH_TYPE", r->ap_auth_type); env_temp = ap_get_remote_logname(r); if (env_temp) { apr_table_addn(e, "REMOTE_IDENT", apr_pstrdup(r->pool, env_temp)); } /* Apache custom error responses. If we have redirected set two new vars */ if (r->prev) { /* PR#57785: reconstruct full URL here */ apr_uri_t *uri = &r->prev->parsed_uri; if (!uri->scheme) { uri->scheme = (char*)ap_http_scheme(r->prev); } if (!uri->port) { uri->port = ap_get_server_port(r->prev); uri->port_str = apr_psprintf(r->pool, "%u", uri->port); } if (!uri->hostname) { uri->hostname = (char*)ap_get_server_name_for_url(r->prev); } add_unless_null(e, "REDIRECT_QUERY_STRING", r->prev->args); add_unless_null(e, "REDIRECT_URL", apr_uri_unparse(r->pool, uri, 0)); } if (e != r->subprocess_env) { apr_table_overlap(r->subprocess_env, e, APR_OVERLAP_TABLES_SET); } }
static authz_status filegroup_check_authorization(request_rec *r, const char *require_args, const void *parsed_require_args) { authz_groupfile_config_rec *conf = ap_get_module_config(r->per_dir_config, &authz_groupfile_module); char *user = r->user; apr_table_t *grpstatus = NULL; apr_status_t status; const char *filegroup = NULL; if (!user) { return AUTHZ_DENIED_NO_USER; } /* If there is no group file - then we are not * configured. So decline. */ if (!(conf->groupfile)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01668) "No group file was specified in the configuration"); return AUTHZ_DENIED; } status = groups_for_user(r->pool, user, conf->groupfile, &grpstatus); if (status != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01669) "Could not open group file: %s", conf->groupfile); return AUTHZ_DENIED; } if (apr_is_empty_table(grpstatus)) { /* no groups available, so exit immediately */ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01670) "Authorization of user %s to access %s failed, reason: " "user doesn't appear in group file (%s).", r->user, r->uri, conf->groupfile); return AUTHZ_DENIED; } filegroup = authz_owner_get_file_group(r); if (filegroup) { if (apr_table_get(grpstatus, filegroup)) { return AUTHZ_GRANTED; } } else { /* No need to emit a error log entry because the call to authz_owner_get_file_group already did it for us. */ return AUTHZ_DENIED; } ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01671) "Authorization of user %s to access %s failed, reason: " "user is not part of the 'require'ed file group.", r->user, r->uri); return AUTHZ_DENIED; }