/** * article_generic_submit_serve: Submit article or reply. * @vr: The #VirguleReq context. * @title: Title, as raw text. * @lead: Lead, as raw text. * @body: Body, as raw text. * @olddate: Null for new posts. Original post date for edit of existing post * @oldkey: Null for new posts. Original article/reply key for edits * @submit_type: "article" or "reply". * @key_base: Base pathname of db key. * @key_suffix: Suffix of db key, after article number. * @art_num_str: The article number being replied to, or NULL if article. * * Submits article or reply. * * Return value: Response code. * * ToDo: There are a lot potential conflicts between char and xmlChar * pointers that should be resolved to make the code more consistent. **/ static int article_generic_submit_serve (VirguleReq *vr, const char *topic, const char *title, const char *lead, const char *body, const char *olddate, const char *oldkey, const char *submit_type, const char *key_base, const char *key_suffix, const char *art_num_str) { apr_pool_t *p = vr->r->pool; apr_table_t *args; Buffer *b = NULL; const Topic **t; const char *date; char *key; xmlDoc *doc; xmlNode *root; xmlNode *tree; int status; char *str = NULL; char *lead_error, *body_error; char *nice_title; char *nice_lead; char *nice_body; virgule_auth_user (vr); if (vr->u == NULL) return virgule_send_error_page (vr, vERROR, "forbidden", "You can't post <x>an article</x> because you're not logged in."); if (!virgule_req_ok_to_reply (vr)) return virgule_send_error_page (vr, vERROR, "forbidden", "You can't post because you're not certified. Please see the <a href=\"%s/certs.html\">certification overview</a> for more details.", vr->prefix); date = virgule_iso_now (p); if (title == NULL || title[0] == 0) return virgule_send_error_page (vr, vERROR, "Need title", "Your <x>%s</x> needs a title. Go back and try again.", submit_type); if (!strcmp (submit_type, "article") && (lead == NULL || lead[0] == 0)) return virgule_send_error_page (vr, vERROR, "Need lead", "Your <x>article</x> needs a lead. Go back and try again."); if (!strcmp (submit_type, "reply") && (body == NULL || body[0] == 0)) return virgule_send_error_page (vr, vERROR, "Need body", "Your reply needs a body. Go back and try again."); nice_title = virgule_nice_text (p, title); nice_lead = lead == NULL ? "" : virgule_nice_htext (vr, lead, &lead_error); nice_body = body == NULL ? "" : virgule_nice_htext (vr, body, &body_error); args = virgule_get_args_table (vr); if(olddate != NULL) apr_table_set (args, "preview", "Preview"); else olddate = apr_table_get (args, "olddate"); if(oldkey == NULL) oldkey = apr_table_get (args, "oldkey"); if (apr_table_get (args, "preview")) { /* render a preview */ if (virgule_set_temp_buffer (vr) != 0) return HTTP_INTERNAL_SERVER_ERROR; b = vr->b; if (!strcmp (submit_type, "reply")) { str = apr_pstrdup (p, "Reply preview"); virgule_render_cert_level_begin (vr, vr->u, CERT_STYLE_MEDIUM); virgule_buffer_printf (b, "<font size=+2><b>%s</b></font><br>\n", nice_title); virgule_render_cert_level_end (vr, CERT_STYLE_MEDIUM); virgule_buffer_printf (b, "<p>%s</p>\n", nice_body); virgule_buffer_puts (b, "<hr>\n"); virgule_buffer_printf (b, "<p>Edit your reply:</p>\n" "<form method=\"POST\" action=\"replysubmit.html\" accept-charset=\"UTF-8\">\n" "<p><x>Article</x> title: <br>\n" "<input type=\"text\" name=\"title\" value=\"%s\" size=\"40\" maxlength=\"60\"></p>\n" "<p>Body of <x>article</x>: <br>\n" "<textarea name=\"body\" cols=\"72\" rows=\"16\" wrap=\"hard\">%s" "</textarea></p>\n" "<input type=\"hidden\" name=\"art_num\" value=\"%s\">\n" "<p><input type=\"submit\" name=\"post\" value=\"Post\">\n" "<input type=\"submit\" name=\"preview\" value=\"Preview\">\n" "</form>\n", virgule_str_subst (p, title, "\"", """), ap_escape_html (p, body), art_num_str); virgule_render_acceptable_html (vr); } else if (!strcmp (submit_type, "article")) { str = apr_pstrdup (p, "<x>Article</x> preview"); if(vr->priv->use_article_topics) { virgule_buffer_puts (b, "<table><tr><td>"); article_render_topic (vr, (char *)topic); virgule_buffer_puts (b, "</td><td>"); } virgule_render_cert_level_begin (vr, vr->u, CERT_STYLE_LARGE); virgule_buffer_printf (b, "<span class=\"article-title\">%s</span>",nice_title); virgule_render_cert_level_end (vr, CERT_STYLE_LARGE); if(vr->priv->use_article_topics) virgule_buffer_puts (b, "</td></tr></table>\n"); virgule_buffer_printf (b, "<p>%s</p>\n", nice_lead); virgule_buffer_printf (b, "<p>%s</p>\n", nice_body); virgule_buffer_puts (b, "<hr>\n"); virgule_buffer_puts (b, "<p>Edit your <x>article</x>:</p>\n" "<form method=\"POST\" action=\"postsubmit.html\" accept-charset=\"UTF-8\">\n"); if(olddate && oldkey) { virgule_buffer_printf (b, "<input type=\"hidden\" name=\"olddate\" value=\"%s\" />\n", olddate); virgule_buffer_printf (b, "<input type=\"hidden\" name=\"oldkey\" value=\"%s\" />\n", oldkey); } if(vr->priv->use_article_topics) { virgule_buffer_puts (b, "<p><b><x>Article</x> topic</b>:<br>\n <select name=\"topic\">\n"); for (t = vr->priv->topics; *t; t++) virgule_buffer_printf (b, "<option%s>%s</option>\n", strcmp((*t)->desc,topic) ? "" : " selected",(*t)->desc); virgule_buffer_puts (b, " </select></p>\n"); } virgule_buffer_printf (b, "<p><b><x>Article</x> title</b>:<br>\n" "<input type=\"text\" name=\"title\" value=\"%s\" size=\"40\" maxlength=\"%i\"></p>\n" "<p><b><x>Article</x> lead</b>. This should be a one paragraph summary " "of the story complete with links to the original " "sources when appropriate.<br>" "<textarea name=\"lead\" cols=72 rows=6 wrap=hard>%s" "</textarea> </p>\n", virgule_str_subst (p, title, "\"", """), vr->priv->article_title_maxsize, ap_escape_html (p, lead)); if (lead_error != NULL) virgule_buffer_printf (b, "<p><b>Warning:</b> %s</p>\n", lead_error); virgule_buffer_printf (b,"<p><b><x>Article</x> Body</b>. This should " "contain the body of your article and may be as long as " "needed. If your entire article is only one paragraph, " "put it in the lead field above and leave this one empty<br>" "<textarea name=\"body\" cols=72 rows=16 wrap=hard>%s" "</textarea></p>\n" "<p><b>Warning:</b> Please proof read your article " "and verify spelling and any html markup before posting. " "Click the <b>Preview</b> button to see changes. Once " "you click the <b>Post</b> button your article will be " "posted and changes are no longer possible." "<p><input type=\"submit\" name=post value=\"Post\">\n" "<input type=\"submit\" name=preview value=\"Preview\">\n" "</form>\n", ap_escape_html (p, (body ? body : ""))); if (body_error != NULL) virgule_buffer_printf (b, "<p><b>Warning:</b> %s </p>\n", body_error); virgule_render_acceptable_html (vr); } virgule_set_main_buffer (vr); return virgule_render_in_template (vr, "/templates/default.xml", "content", str); } key = apr_psprintf (p, "%s/_%d%s", key_base, oldkey ? atoi (oldkey) : virgule_db_dir_max (vr->db, key_base) + 1, key_suffix); doc = virgule_db_xml_doc_new (p); root = xmlNewDocNode (doc, NULL, (xmlChar *)"article", NULL); doc->xmlRootNode = root; if(olddate != NULL) { xmlNewChild (root, NULL, (xmlChar *)"date", (xmlChar *)olddate); xmlNewChild (root, NULL, (xmlChar *)"update", (xmlChar *)date); } else { tree = xmlNewChild (root, NULL, (xmlChar *)"date", (xmlChar *)date); } tree = xmlNewChild (root, NULL, (xmlChar *)"author", (xmlChar *)vr->u); tree = xmlNewChild (root, NULL, (xmlChar *)"title", NULL); xmlAddChild (tree, xmlNewDocText (doc, (xmlChar *)nice_title)); if(vr->priv->use_article_topics) { tree = xmlNewChild (root, NULL, (xmlChar *)"topic", NULL); xmlAddChild (tree, xmlNewDocText (doc, (xmlChar *)topic)); } if (lead && lead[0]) { tree = xmlNewChild (root, NULL, (xmlChar *)"lead", NULL); xmlAddChild (tree, xmlNewDocText (doc, (xmlChar *)nice_lead)); } if (body != NULL && body[0]) { tree = xmlNewChild (root, NULL, (xmlChar *)"body", NULL); xmlAddChild (tree, xmlNewDocText (doc, (xmlChar *)nice_body)); } /* sanity-check edit qualifications before saving */ if (olddate || oldkey) { char *a, *d; time_t t; xmlNodePtr r; int art_num = atoi (oldkey); char *k = apr_psprintf (vr->r->pool, "articles/_%d/article.xml", art_num); xmlDocPtr old = virgule_db_xml_get (vr->r->pool, vr->db, k); if (old == NULL) return virgule_send_error_page (vr, vERROR, "not found", "The specified <x>article</x> does not exist."); r = xmlDocGetRootElement (old); /* verify the article is not too old to edit */ d = virgule_xml_find_child_string (r, "date", NULL); t = virgule_virgule_to_time_t (vr, d); if (t + (vr->priv->article_days_to_edit * 86400) < time (NULL)) return virgule_send_error_page (vr, vERROR, "forbidden", "This <x>article</x> is too old to be edited."); /* verify this user can edit this article */ a = virgule_xml_find_child_string (r, "author", NULL); if (strcmp (vr->u, a)) return virgule_send_error_page (vr, vERROR, "forbidden", "Only <x>articles</x> posted by you may be edited."); } status = virgule_db_xml_put (p, vr->db, key, doc); if (status) return virgule_send_error_page (vr, vERROR, "database", "There was an error storing the <x>%s</x>. This means there's something wrong with the site.", submit_type); if (!strcmp (submit_type, "reply")) apr_table_add (vr->r->headers_out, "refresh", apr_psprintf(p, "0;URL=/article/%s.html#lastread", art_num_str)); else apr_table_add (vr->r->headers_out, "refresh", apr_psprintf(p, "0;URL=/article/%d.html", oldkey ? atoi (oldkey) : virgule_db_dir_max (vr->db, key_base))); str = apr_psprintf (p, "Ok, your <x>%s</x> was posted. Thanks!", submit_type); return virgule_send_error_page (vr, vINFO, "Posted", str); }
static apr_status_t rpaf_cleanup(void *data) { rpaf_cleanup_rec *rcr = (rpaf_cleanup_rec *)data; rcr->r->DEF_IP = apr_pstrdup(rcr->r->connection->pool, rcr->old_ip); memcpy(rcr->r->DEF_ADDR, &rcr->old_addr, sizeof(apr_sockaddr_t)); return APR_SUCCESS; }
static mrb_value ap_mrb_get_request_document_root(mrb_state *mrb, mrb_value str) { request_rec *r = ap_mrb_get_request(); char *val = apr_pstrdup(r->pool, ap_document_root(r)); return mrb_str_new(mrb, val, strlen(val)); }
static apr_status_t line_edit_filter(ap_filter_t* f, apr_bucket_brigade* bb) { int i, j; unsigned int match ; unsigned int nmatch = 10 ; ap_regmatch_t pmatch[10] ; const char* bufp; const char* subs ; apr_size_t bytes ; apr_size_t fbytes ; apr_size_t offs ; const char* buf ; const char* le = NULL ; const char* le_n ; const char* le_r ; char* fbuf ; apr_bucket* b = APR_BRIGADE_FIRST(bb) ; apr_bucket* b1 ; int found = 0 ; apr_status_t rv ; apr_bucket_brigade* bbline ; line_edit_cfg* cfg = ap_get_module_config(f->r->per_dir_config, &line_edit_module) ; rewriterule* rules = (rewriterule*) cfg->rewriterules->elts ; rewriterule* newrule; line_edit_ctx* ctx = f->ctx ; if (ctx == NULL) { /* check env to see if we're wanted, to give basic control with 2.0 */ buf = apr_table_get(f->r->subprocess_env, "LineEdit"); if (buf && f->r->content_type) { char* lcbuf = apr_pstrdup(f->r->pool, buf) ; char* lctype = apr_pstrdup(f->r->pool, f->r->content_type) ; char* c ; for (c = lcbuf; *c; ++c) if (isupper(*c)) *c = tolower(*c) ; for (c = lctype; *c; ++c) if (isupper(*c)) *c = tolower(*c) ; else if (*c == ';') { *c = 0 ; break ; } if (!strstr(lcbuf, lctype)) { /* don't filter this content type */ ap_filter_t* fnext = f->next ; ap_remove_output_filter(f) ; return ap_pass_brigade(fnext, bb) ; } } ctx = f->ctx = apr_palloc(f->r->pool, sizeof(line_edit_ctx)) ; ctx->bbsave = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ; /* If we have any regex matches, we'll need to copy everything, so we * have null-terminated strings to parse. That's a lot of memory if * we're streaming anything big. So we'll use (and reuse) a local * subpool. Fall back to the request pool if anything bad happens. */ ctx->lpool = f->r->pool ; for (i = 0; i < cfg->rewriterules->nelts; ++i) { if ( rules[i].flags & M_REGEX ) { if (apr_pool_create(&ctx->lpool, f->r->pool) != APR_SUCCESS) { ctx->lpool = f->r->pool ; } break ; } } /* If we have env interpolation, we'll need a private copy of * our rewrite rules with this requests env. Otherwise we can * save processing time by using the original. * * If one ENV is found, we also have to copy all previous and * subsequent rules, even those with no interpolation. */ ctx->rewriterules = cfg->rewriterules; for (i = 0; i < cfg->rewriterules->nelts; ++i) { found |= (rules[i].flags & M_ENV) ; if ( found ) { if (ctx->rewriterules == cfg->rewriterules) { ctx->rewriterules = apr_array_make(f->r->pool, cfg->rewriterules->nelts, sizeof(rewriterule)); for (j = 0; j < i; ++j) { newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ; newrule->from = rules[j].from; newrule->to = rules[j].to; newrule->flags = rules[j].flags; newrule->length = rules[j].length; } } /* this rule needs to be interpolated */ newrule = apr_array_push (((line_edit_ctx*)ctx)->rewriterules) ; newrule->from = rules[i].from; if (rules[i].flags & M_ENV) { newrule->to = interpolate_env(f->r, rules[i].to); } else { newrule->to = rules[i].to ; } newrule->flags = rules[i].flags; newrule->length = rules[i].length; } } /* for back-compatibility with Apache 2.0, set some protocol stuff */ apr_table_unset(f->r->headers_out, "Content-Length") ; apr_table_unset(f->r->headers_out, "Content-MD5") ; apr_table_unset(f->r->headers_out, "Accept-Ranges") ; } /* by now our rules are in ctx->rewriterules */ rules = (rewriterule*) ctx->rewriterules->elts ; /* bbline is what goes to the next filter, * so we (can) have a new one each time. */ bbline = apr_brigade_create(f->r->pool, f->c->bucket_alloc) ; /* first ensure we have no mid-line breaks that might be in the * middle of a search string causing us to miss it! At the same * time we split into lines to avoid pattern-matching over big * chunks of memory. */ while ( b != APR_BRIGADE_SENTINEL(bb) ) { if ( !APR_BUCKET_IS_METADATA(b) ) { if ( apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS ) { if ( bytes == 0 ) { APR_BUCKET_REMOVE(b) ; } else while ( bytes > 0 ) { switch (cfg->lineend) { case LINEEND_UNIX: le = memchr(buf, '\n', bytes) ; break ; case LINEEND_MAC: le = memchr(buf, '\r', bytes) ; break ; case LINEEND_DOS: /* Edge-case issue: if a \r\n spans buckets it'll get missed. * Not a problem for present purposes, but would be an issue * if we claimed to support pattern matching on the lineends. */ found = 0 ; le = memchr(buf+1, '\n', bytes-1) ; while ( le && !found ) { if ( le[-1] == '\r' ) { found = 1 ; } else { le = memchr(le+1, '\n', bytes-1 - (le+1 - buf)) ; } } if ( !found ) le = 0 ; break; case LINEEND_ANY: case LINEEND_UNSET: /* Edge-case notabug: if a \r\n spans buckets it'll get seen as * two line-ends. It'll insert the \n as a one-byte bucket. */ le_n = memchr(buf, '\n', bytes) ; le_r = memchr(buf, '\r', bytes) ; if ( le_n != NULL ) if ( le_n == le_r + sizeof(char)) le = le_n ; else if ( (le_r < le_n) && (le_r != NULL) ) le = le_r ; else le = le_n ; else le = le_r ; break; case LINEEND_NONE: le = 0 ; break; case LINEEND_CUSTOM: le = memchr(buf, cfg->lechar, bytes) ; break; } if ( le ) { /* found a lineend in this bucket. */ offs = 1 + ((unsigned int)le-(unsigned int)buf) / sizeof(char) ; apr_bucket_split(b, offs) ; bytes -= offs ; buf += offs ; b1 = APR_BUCKET_NEXT(b) ; APR_BUCKET_REMOVE(b); /* Is there any previous unterminated content ? */ if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { /* append this to any content waiting for a lineend */ APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b) ; rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ; /* make b a new bucket of the flattened stuff */ b = apr_bucket_pool_create(fbuf, fbytes, f->r->pool, f->r->connection->bucket_alloc) ; /* bbsave has been consumed, so clear it */ apr_brigade_cleanup(ctx->bbsave) ; } /* b now contains exactly one line */ APR_BRIGADE_INSERT_TAIL(bbline, b); b = b1 ; } else { /* no lineend found. Remember the dangling content */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(ctx->bbsave, b); bytes = 0 ; } } /* while bytes > 0 */ } else { /* bucket read failed - oops ! Let's remove it. */ APR_BUCKET_REMOVE(b); } } else if ( APR_BUCKET_IS_EOS(b) ) { /* If there's data to pass, send it in one bucket */ if ( !APR_BRIGADE_EMPTY(ctx->bbsave) ) { rv = apr_brigade_pflatten(ctx->bbsave, &fbuf, &fbytes, f->r->pool) ; b1 = apr_bucket_pool_create(fbuf, fbytes, f->r->pool, f->r->connection->bucket_alloc) ; APR_BRIGADE_INSERT_TAIL(bbline, b1); } apr_brigade_cleanup(ctx->bbsave) ; /* start again rather than segfault if a seriously buggy * filter in front of us sent a bogus EOS */ f->ctx = NULL ; /* move the EOS to the new brigade */ APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(bbline, b); } else { /* chop flush or unknown metadata bucket types */ apr_bucket_delete(b); } /* OK, reset pointer to what's left (since we're not in a for-loop) */ b = APR_BRIGADE_FIRST(bb) ; } /* OK, now we have a bunch of complete lines in bbline, * so we can apply our edit rules */ /* When we get a match, we split the line into before+match+after. * To flatten that back into one buf every time would be inefficient. * So we treat it as three separate bufs to apply future rules. * * We can only reasonably do that by looping over buckets *inside* * the loop over rules. * * That means concepts like one-match-per-line or start-of-line-only * won't work, except for the first rule. So we won't pretend. */ for (i = 0; i < ctx->rewriterules->nelts; ++i) { for ( b = APR_BRIGADE_FIRST(bbline) ; b != APR_BRIGADE_SENTINEL(bbline) ; b = APR_BUCKET_NEXT(b) ) { if ( !APR_BUCKET_IS_METADATA(b) && (apr_bucket_read(b, &buf, &bytes, APR_BLOCK_READ) == APR_SUCCESS)) { if ( rules[i].flags & M_REGEX ) { bufp = apr_pstrmemdup(ctx->lpool, buf, bytes) ; while ( ! ap_regexec(rules[i].from.r, bufp, nmatch, pmatch, 0) ) { match = pmatch[0].rm_so ; subs = ap_pregsub(f->r->pool, rules[i].to, bufp, nmatch, pmatch) ; apr_bucket_split(b, match) ; b1 = APR_BUCKET_NEXT(b) ; apr_bucket_split(b1, pmatch[0].rm_eo - match) ; b = APR_BUCKET_NEXT(b1) ; apr_bucket_delete(b1) ; b1 = apr_bucket_pool_create(subs, strlen(subs), f->r->pool, f->r->connection->bucket_alloc) ; APR_BUCKET_INSERT_BEFORE(b, b1) ; bufp += pmatch[0].rm_eo ; } } else { bufp = buf ; while (subs = apr_strmatch(rules[i].from.s, bufp, bytes), subs != NULL) { match = ((unsigned int)subs - (unsigned int)bufp) / sizeof(char) ; bytes -= match ; bufp += match ; apr_bucket_split(b, match) ; b1 = APR_BUCKET_NEXT(b) ; apr_bucket_split(b1, rules[i].length) ; b = APR_BUCKET_NEXT(b1) ; apr_bucket_delete(b1) ; bytes -= rules[i].length ; bufp += rules[i].length ; b1 = apr_bucket_immortal_create(rules[i].to, strlen(rules[i].to), f->r->connection->bucket_alloc) ; APR_BUCKET_INSERT_BEFORE(b, b1) ; } } } } /* If we used a local pool, clear it now */ if ( (ctx->lpool != f->r->pool) && (rules[i].flags & M_REGEX) ) { apr_pool_clear(ctx->lpool) ; } } /* now pass it down the chain */ rv = ap_pass_brigade(f->next, bbline) ; /* if we have leftover data, don't risk it going out of scope */ for ( b = APR_BRIGADE_FIRST(ctx->bbsave) ; b != APR_BRIGADE_SENTINEL(ctx->bbsave) ; b = APR_BUCKET_NEXT(b)) { apr_bucket_setaside(b, f->r->pool) ; } return rv ; }
/** * Loads the (sorted) CSV file into the memory. * * @param pool * @param db Path to the db file * @param size Returns the size f the db (elements in the array) * @param msg Error message if something got wrong * @return Array with all entries from the CSV file (or NULL on error) */ static qos_geo_t *qos_loadgeo(apr_pool_t *pool, const char *db, int *size, char **msg) { regmatch_t ma[MAX_REG_MATCH]; regex_t preg; regex_t pregd; qos_geo_t *geo = NULL; qos_geo_t *g = NULL; qos_geo_t *last = NULL; int lines = 0; char line[HUGE_STRING_LEN]; char buf[HUGE_STRING_LEN]; FILE *file; const qos_inj_t *inj = m_inj; *size = 0; if(regcomp(&preg, QS_GEO_PATTERN, REG_EXTENDED)) { // internal error *msg = apr_pstrdup(pool, "failed to compile regular expression "QS_GEO_PATTERN); return NULL; } if(regcomp(&pregd, QS_GEO_PATTERN_D, REG_EXTENDED)) { // internal error *msg = apr_pstrdup(pool, "failed to compile regular expression "QS_GEO_PATTERN_D); return NULL; } file = fopen(db, "r"); if(!file) { return NULL; } while(fgets(line, sizeof(line), file) != NULL) { if(strlen(line) > 0) { if(regexec(&preg, line, 0, NULL, 0) == 0) { lines++; } else { *msg = apr_psprintf(pool, "invalid entry in database: '%s'", line); } } } *size = lines; geo = apr_pcalloc(pool, sizeof(qos_geo_t) * lines); g = geo; fseek(file, 0, SEEK_SET); lines = 0; while(fgets(line, sizeof(line), file) != NULL) { lines++; if(strlen(line) > 0) { int plus = 0; if(m_inject) { strcpy(buf, line); } if(regexec(&pregd, line, MAX_REG_MATCH, ma, 0) == 0) { plus = 1; } if(plus || regexec(&preg, line, MAX_REG_MATCH, ma, 0) == 0) { line[ma[1].rm_eo] = '\0'; line[ma[2].rm_eo] = '\0'; line[ma[3].rm_eo] = '\0'; g->start = atoll(&line[ma[1].rm_so]); g->end = atoll(&line[ma[2].rm_so]); g->c[0] = '\0'; if(m_inject) { if(inj->start && (g->start > inj->start)) { printf("%s\n", inj->c); inj++; } else { printf("%s", buf); } } strncpy(g->country, &line[ma[3].rm_so], 2); if(last) { if(g->start < last->start) { *msg = apr_psprintf(pool, "wrong order/lines not sorted (line %d)", lines); } } if(plus) { line[ma[4].rm_eo] = '\0'; strncpy(g->c, &line[ma[4].rm_so], 500); } last = g; g++; } } } fclose(file); return geo; }
/* * resolve and validate an access_token against the configured Authorization Server */ static apr_byte_t oidc_oauth_resolve_access_token(request_rec *r, oidc_cfg *c, const char *access_token, json_t **token, char **response) { json_t *result = NULL; const char *json = NULL; /* see if we've got the claims for this access_token cached already */ c->cache->get(r, OIDC_CACHE_SECTION_ACCESS_TOKEN, access_token, &json); if (json == NULL) { /* not cached, go out and validate the access_token against the Authorization server and get the JSON claims back */ if (oidc_oauth_validate_access_token(r, c, access_token, &json) == FALSE) { oidc_error(r, "could not get a validation response from the Authorization server"); return FALSE; } /* decode and see if it is not an error response somehow */ if (oidc_util_decode_json_and_check_error(r, json, &result) == FALSE) return FALSE; json_t *active = json_object_get(result, "active"); if (active != NULL) { if ((!json_is_boolean(active)) || (!json_is_true(active))) { oidc_debug(r, "no \"active\" boolean object with value \"true\" found in response JSON object"); json_decref(result); return FALSE; } json_t *exp = json_object_get(result, "exp"); if ((exp != NULL) && (json_is_number(exp))) { /* set it in the cache so subsequent request don't need to validate the access_token and get the claims anymore */ c->cache->set(r, OIDC_CACHE_SECTION_ACCESS_TOKEN, access_token, json, apr_time_from_sec(json_integer_value(exp))); } else if (json_integer_value(exp) <= 0) { oidc_debug(r, "response JSON object did not contain an \"exp\" integer number; introspection result will not be cached"); } } else { /* assume PingFederate validation: get and check the expiry timestamp */ json_t *expires_in = json_object_get(result, "expires_in"); if ((expires_in == NULL) || (!json_is_number(expires_in))) { oidc_error(r, "response JSON object did not contain an \"expires_in\" number"); json_decref(result); return FALSE; } if (json_integer_value(expires_in) <= 0) { oidc_warn(r, "\"expires_in\" number <= 0 (%" JSON_INTEGER_FORMAT "); token already expired...", json_integer_value(expires_in)); json_decref(result); return FALSE; } /* set it in the cache so subsequent request don't need to validate the access_token and get the claims anymore */ c->cache->set(r, OIDC_CACHE_SECTION_ACCESS_TOKEN, access_token, json, apr_time_now() + apr_time_from_sec(json_integer_value(expires_in))); } } else { /* we got the claims for this access_token in our cache, decode it in to a JSON structure */ json_error_t json_error; result = json_loads(json, 0, &json_error); if (result == NULL) { oidc_error(r, "cached JSON was corrupted: %s", json_error.text); return FALSE; } } /* return the access_token JSON object */ json_t *tkn = json_object_get(result, "access_token"); if ((tkn != NULL) && (json_is_object(tkn))) { /* * assume PingFederate validation: copy over those claims from the access_token * that are relevant for authorization purposes */ json_object_set(tkn, "client_id", json_object_get(result, "client_id")); json_object_set(tkn, "scope", json_object_get(result, "scope")); //oidc_oauth_spaced_string_to_array(r, result, "scope", tkn, "scopes"); /* return only the pimped access_token results */ *token = json_deep_copy(tkn); char *s_token = json_dumps(*token, 0); *response = apr_pstrdup(r->pool, s_token); free(s_token); json_decref(result); } else { //oidc_oauth_spaced_string_to_array(r, result, "scope", result, "scopes"); /* assume spec compliant introspection */ *token = result; *response = apr_pstrdup(r->pool, json); } return TRUE; }
static apr_status_t cache_canonicalise_key(request_rec *r, apr_pool_t* p, const char *uri, apr_uri_t *parsed_uri, const char **key) { cache_server_conf *conf; char *port_str, *hn, *lcs; const char *hostname, *scheme; int i; const char *path; char *querystring; if (*key) { /* * We have been here before during the processing of this request. */ return APR_SUCCESS; } /* * Get the module configuration. We need this for the CacheIgnoreQueryString * option below. */ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config, &cache_module); /* * Use the canonical name to improve cache hit rate, but only if this is * not a proxy request or if this is a reverse proxy request. * We need to handle both cases in the same manner as for the reverse proxy * case we have the following situation: * * If a cached entry is looked up by mod_cache's quick handler r->proxyreq * is still unset in the reverse proxy case as it only gets set in the * translate name hook (either by ProxyPass or mod_rewrite) which is run * after the quick handler hook. This is different to the forward proxy * case where it gets set before the quick handler is run (in the * post_read_request hook). * If a cache entry is created by the CACHE_SAVE filter we always have * r->proxyreq set correctly. * So we must ensure that in the reverse proxy case we use the same code * path and using the canonical name seems to be the right thing to do * in the reverse proxy case. */ if (!r->proxyreq || (r->proxyreq == PROXYREQ_REVERSE)) { if (conf->base_uri && conf->base_uri->hostname) { hostname = conf->base_uri->hostname; } else { /* Use _default_ as the hostname if none present, as in mod_vhost */ hostname = ap_get_server_name(r); if (!hostname) { hostname = "_default_"; } } } else if (parsed_uri->hostname) { /* Copy the parsed uri hostname */ hn = apr_pstrdup(p, parsed_uri->hostname); ap_str_tolower(hn); /* const work-around */ hostname = hn; } else { /* We are a proxied request, with no hostname. Unlikely * to get very far - but just in case */ hostname = "_default_"; } /* * Copy the scheme, ensuring that it is lower case. If the parsed uri * contains no string or if this is not a proxy request get the http * scheme for this request. As r->parsed_uri.scheme is not set if this * is a reverse proxy request, it is ensured that the cases * "no proxy request" and "reverse proxy request" are handled in the same * manner (see above why this is needed). */ if (r->proxyreq && parsed_uri->scheme) { /* Copy the scheme and lower-case it */ lcs = apr_pstrdup(p, parsed_uri->scheme); ap_str_tolower(lcs); /* const work-around */ scheme = lcs; } else { if (conf->base_uri && conf->base_uri->scheme) { scheme = conf->base_uri->scheme; } else { scheme = ap_http_scheme(r); } } /* * If this is a proxy request, but not a reverse proxy request (see comment * above why these cases must be handled in the same manner), copy the * URI's port-string (which may be a service name). If the URI contains * no port-string, use apr-util's notion of the default port for that * scheme - if available. Otherwise use the port-number of the current * server. */ if (r->proxyreq && (r->proxyreq != PROXYREQ_REVERSE)) { if (parsed_uri->port_str) { port_str = apr_pcalloc(p, strlen(parsed_uri->port_str) + 2); port_str[0] = ':'; for (i = 0; parsed_uri->port_str[i]; i++) { port_str[i + 1] = apr_tolower(parsed_uri->port_str[i]); } } else if (apr_uri_port_of_scheme(scheme)) { port_str = apr_psprintf(p, ":%u", apr_uri_port_of_scheme(scheme)); } else { /* No port string given in the AbsoluteUri, and we have no * idea what the default port for the scheme is. Leave it * blank and live with the inefficiency of some extra cached * entities. */ port_str = ""; } } else { if (conf->base_uri && conf->base_uri->port_str) { port_str = conf->base_uri->port_str; } else if (conf->base_uri && conf->base_uri->hostname) { port_str = ""; } else { /* Use the server port */ port_str = apr_psprintf(p, ":%u", ap_get_server_port(r)); } } /* * Check if we need to ignore session identifiers in the URL and do so * if needed. */ path = uri; querystring = parsed_uri->query; if (conf->ignore_session_id->nelts) { int i; char **identifier; identifier = (char **) conf->ignore_session_id->elts; for (i = 0; i < conf->ignore_session_id->nelts; i++, identifier++) { int len; const char *param; len = strlen(*identifier); /* * Check that we have a parameter separator in the last segment * of the path and that the parameter matches our identifier */ if ((param = ap_strrchr_c(path, ';')) && !strncmp(param + 1, *identifier, len) && (*(param + len + 1) == '=') && !ap_strchr_c(param + len + 2, '/')) { path = apr_pstrndup(p, path, param - path); continue; } /* * Check if the identifier is in the querystring and cut it out. */ if (querystring) { /* * First check if the identifier is at the beginning of the * querystring and followed by a '=' */ if (!strncmp(querystring, *identifier, len) && (*(querystring + len) == '=')) { param = querystring; } else { char *complete; /* * In order to avoid subkey matching (PR 48401) prepend * identifier with a '&' and append a '=' */ complete = apr_pstrcat(p, "&", *identifier, "=", NULL); param = strstr(querystring, complete); /* If we found something we are sitting on the '&' */ if (param) { param++; } } if (param) { const char *amp; if (querystring != param) { querystring = apr_pstrndup(p, querystring, param - querystring); } else { querystring = ""; } if ((amp = ap_strchr_c(param + len + 1, '&'))) { querystring = apr_pstrcat(p, querystring, amp + 1, NULL); } else { /* * If querystring is not "", then we have the case * that the identifier parameter we removed was the * last one in the original querystring. Hence we have * a trailing '&' which needs to be removed. */ if (*querystring) { querystring[strlen(querystring) - 1] = '\0'; } } } } } } /* Key format is a URI, optionally without the query-string */ if (conf->ignorequerystring) { *key = apr_pstrcat(p, scheme, "://", hostname, port_str, path, "?", NULL); } else { *key = apr_pstrcat(p, scheme, "://", hostname, port_str, path, "?", querystring, NULL); } /* * Store the key in the request_config for the cache as r->parsed_uri * might have changed in the time from our first visit here triggered by the * quick handler and our possible second visit triggered by the CACHE_SAVE * filter (e.g. r->parsed_uri got unescaped). In this case we would save the * resource in the cache under a key where it is never found by the quick * handler during following requests. */ ap_log_rerror( APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00698) "cache: Key for entity %s?%s is %s", uri, parsed_uri->query, *key); return APR_SUCCESS; }
/* Implements serf__auth_handler_func_t callback. */ static apr_status_t serf__handle_digest_auth(int code, serf_request_t *request, serf_bucket_t *response, const char *auth_hdr, const char *auth_attr, apr_pool_t *pool) { char *attrs; char *nextkv; const char *realm, *realm_name = NULL; const char *nonce = NULL; const char *algorithm = NULL; const char *qop = NULL; const char *opaque = NULL; const char *key; serf_connection_t *conn = request->conn; serf_context_t *ctx = conn->ctx; serf__authn_info_t *authn_info; digest_authn_info_t *digest_info; apr_status_t status; apr_pool_t *cred_pool; char *username, *password; /* Can't do Digest authentication if there's no callback to get username & password. */ if (!ctx->cred_cb) { return SERF_ERROR_AUTHN_FAILED; } if (code == 401) { authn_info = serf__get_authn_info_for_server(conn); } else { authn_info = &ctx->proxy_authn_info; } digest_info = authn_info->baton; /* Need a copy cuz we're going to write NUL characters into the string. */ attrs = apr_pstrdup(pool, auth_attr); /* We're expecting a list of key=value pairs, separated by a comma. Ex. realm="SVN Digest", nonce="f+zTl/leBAA=e371bd3070adfb47b21f5fc64ad8cc21adc371a5", algorithm=MD5, qop="auth" */ for ( ; (key = apr_strtok(attrs, ",", &nextkv)) != NULL; attrs = NULL) { char *val; val = strchr(key, '='); if (val == NULL) continue; *val++ = '\0'; /* skip leading spaces */ while (*key && *key == ' ') key++; /* If the value is quoted, then remove the quotes. */ if (*val == '"') { apr_size_t last = strlen(val) - 1; if (val[last] == '"') { val[last] = '\0'; val++; } } if (strcmp(key, "realm") == 0) realm_name = val; else if (strcmp(key, "nonce") == 0) nonce = val; else if (strcmp(key, "algorithm") == 0) algorithm = val; else if (strcmp(key, "qop") == 0) qop = val; else if (strcmp(key, "opaque") == 0) opaque = val; /* Ignore all unsupported attributes. */ } if (!realm_name) { return SERF_ERROR_AUTHN_MISSING_ATTRIBUTE; } realm = serf__construct_realm(code == 401 ? HOST : PROXY, conn, realm_name, pool); /* Ask the application for credentials */ apr_pool_create(&cred_pool, pool); status = serf__provide_credentials(ctx, &username, &password, request, code, authn_info->scheme->name, realm, cred_pool); if (status) { apr_pool_destroy(cred_pool); return status; } digest_info->header = (code == 401) ? "Authorization" : "Proxy-Authorization"; /* Store the digest authentication parameters in the context cached for this server in the serf context, so we can use it to create the Authorization header when setting up requests on the same or different connections (e.g. in case of KeepAlive off on the server). TODO: we currently don't cache this info per realm, so each time a request 'switches realms', we have to ask the application for new credentials. */ digest_info->pool = conn->pool; digest_info->qop = apr_pstrdup(digest_info->pool, qop); digest_info->nonce = apr_pstrdup(digest_info->pool, nonce); digest_info->cnonce = NULL; digest_info->opaque = apr_pstrdup(digest_info->pool, opaque); digest_info->algorithm = apr_pstrdup(digest_info->pool, algorithm); digest_info->realm = apr_pstrdup(digest_info->pool, realm_name); digest_info->username = apr_pstrdup(digest_info->pool, username); digest_info->digest_nc++; status = build_digest_ha1(&digest_info->ha1, username, password, digest_info->realm, digest_info->pool); apr_pool_destroy(cred_pool); /* If the handshake is finished tell serf it can send as much requests as it likes. */ serf__connection_set_pipelining(conn, 1); return status; }
/* Implements serf__validate_response_func_t callback. */ static apr_status_t serf__validate_response_digest_auth(const serf__authn_scheme_t *scheme, peer_t peer, int code, serf_connection_t *conn, serf_request_t *request, serf_bucket_t *response, apr_pool_t *pool) { const char *key; char *auth_attr; char *nextkv; const char *rspauth = NULL; const char *qop = NULL; const char *nc_str = NULL; serf_bucket_t *hdrs; serf_context_t *ctx = conn->ctx; apr_status_t status; hdrs = serf_bucket_response_get_headers(response); /* Need a copy cuz we're going to write NUL characters into the string. */ if (peer == HOST) auth_attr = apr_pstrdup(pool, serf_bucket_headers_get(hdrs, "Authentication-Info")); else auth_attr = apr_pstrdup(pool, serf_bucket_headers_get(hdrs, "Proxy-Authentication-Info")); /* If there's no Authentication-Info header there's nothing to validate. */ if (! auth_attr) return APR_SUCCESS; /* We're expecting a list of key=value pairs, separated by a comma. Ex. rspauth="8a4b8451084b082be6b105e2b7975087", cnonce="346531653132652d303033392d3435", nc=00000007, qop=auth */ for ( ; (key = apr_strtok(auth_attr, ",", &nextkv)) != NULL; auth_attr = NULL) { char *val; val = strchr(key, '='); if (val == NULL) continue; *val++ = '\0'; /* skip leading spaces */ while (*key && *key == ' ') key++; /* If the value is quoted, then remove the quotes. */ if (*val == '"') { apr_size_t last = strlen(val) - 1; if (val[last] == '"') { val[last] = '\0'; val++; } } if (strcmp(key, "rspauth") == 0) rspauth = val; else if (strcmp(key, "qop") == 0) qop = val; else if (strcmp(key, "nc") == 0) nc_str = val; } if (rspauth) { const char *ha2, *tmp, *resp_hdr_hex; unsigned char resp_hdr[APR_MD5_DIGESTSIZE]; const char *req_uri = request->auth_baton; serf__authn_info_t *authn_info; digest_authn_info_t *digest_info; if (peer == HOST) { authn_info = serf__get_authn_info_for_server(conn); } else { authn_info = &ctx->proxy_authn_info; } digest_info = authn_info->baton; status = build_digest_ha2(&ha2, req_uri, "", qop, pool); if (status) return status; tmp = apr_psprintf(pool, "%s:%s:%s:%s:%s:%s", digest_info->ha1, digest_info->nonce, nc_str, digest_info->cnonce, digest_info->qop, ha2); apr_md5(resp_hdr, tmp, strlen(tmp)); resp_hdr_hex = hex_encode(resp_hdr, pool); /* Incorrect response-digest in Authentication-Info header. */ if (strcmp(rspauth, resp_hdr_hex) != 0) { return SERF_ERROR_AUTHN_FAILED; } } return APR_SUCCESS; }
/* This implements serf_bucket_headers_do_callback_fn_t. */ static int capabilities_headers_iterator_callback(void *baton, const char *key, const char *val) { options_context_t *opt_ctx = baton; svn_ra_serf__session_t *session = opt_ctx->session; if (svn_cstring_casecmp(key, "dav") == 0) { /* Each header may contain multiple values, separated by commas, e.g.: DAV: version-control,checkout,working-resource DAV: merge,baseline,activity,version-controlled-collection DAV: http://subversion.tigris.org/xmlns/dav/svn/depth */ apr_array_header_t *vals = svn_cstring_split(val, ",", TRUE, opt_ctx->pool); /* Right now we only have a few capabilities to detect, so just seek for them directly. This could be written slightly more efficiently, but that wouldn't be worth it until we have many more capabilities. */ if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_DEPTH, vals)) { apr_hash_set(session->capabilities, SVN_RA_CAPABILITY_DEPTH, APR_HASH_KEY_STRING, capability_yes); } if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_MERGEINFO, vals)) { /* The server doesn't know what repository we're referring to, so it can't just say capability_yes. */ apr_hash_set(session->capabilities, SVN_RA_CAPABILITY_MERGEINFO, APR_HASH_KEY_STRING, capability_server_yes); } if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_LOG_REVPROPS, vals)) { apr_hash_set(session->capabilities, SVN_RA_CAPABILITY_LOG_REVPROPS, APR_HASH_KEY_STRING, capability_yes); } if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_ATOMIC_REVPROPS, vals)) { apr_hash_set(session->capabilities, SVN_RA_CAPABILITY_ATOMIC_REVPROPS, APR_HASH_KEY_STRING, capability_yes); } if (svn_cstring_match_list(SVN_DAV_NS_DAV_SVN_PARTIAL_REPLAY, vals)) { apr_hash_set(session->capabilities, SVN_RA_CAPABILITY_PARTIAL_REPLAY, APR_HASH_KEY_STRING, capability_yes); } } /* SVN-specific headers -- if present, server supports HTTP protocol v2 */ else if (strncmp(key, "SVN", 3) == 0) { /* If we've not yet seen any information about supported POST requests, we'll initialize the list/hash with "create-txn" (which we know is supported by virtue of the server speaking HTTPv2 at all. */ if (! session->supported_posts) { session->supported_posts = apr_hash_make(session->pool); apr_hash_set(session->supported_posts, "create-txn", 10, (void *)1); } if (svn_cstring_casecmp(key, SVN_DAV_ROOT_URI_HEADER) == 0) { session->repos_root = session->session_url; session->repos_root.path = (char *)svn_fspath__canonicalize(val, session->pool); session->repos_root_str = svn_urlpath__canonicalize( apr_uri_unparse(session->pool, &session->repos_root, 0), session->pool); } else if (svn_cstring_casecmp(key, SVN_DAV_ME_RESOURCE_HEADER) == 0) { #ifdef SVN_DEBUG char *ignore_v2_env_var = getenv(SVN_IGNORE_V2_ENV_VAR); if (!(ignore_v2_env_var && apr_strnatcasecmp(ignore_v2_env_var, "yes") == 0)) session->me_resource = apr_pstrdup(session->pool, val); #else session->me_resource = apr_pstrdup(session->pool, val); #endif } else if (svn_cstring_casecmp(key, SVN_DAV_REV_STUB_HEADER) == 0) { session->rev_stub = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_REV_ROOT_STUB_HEADER) == 0) { session->rev_root_stub = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_TXN_STUB_HEADER) == 0) { session->txn_stub = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_TXN_ROOT_STUB_HEADER) == 0) { session->txn_root_stub = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_VTXN_STUB_HEADER) == 0) { session->vtxn_stub = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_VTXN_ROOT_STUB_HEADER) == 0) { session->vtxn_root_stub = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_REPOS_UUID_HEADER) == 0) { session->uuid = apr_pstrdup(session->pool, val); } else if (svn_cstring_casecmp(key, SVN_DAV_YOUNGEST_REV_HEADER) == 0) { opt_ctx->youngest_rev = SVN_STR_TO_REV(val); } else if (svn_cstring_casecmp(key, SVN_DAV_SUPPORTED_POSTS_HEADER) == 0) { /* May contain multiple values, separated by commas. */ int i; apr_array_header_t *vals = svn_cstring_split(val, ",", TRUE, opt_ctx->pool); for (i = 0; i < vals->nelts; i++) { const char *post_val = APR_ARRAY_IDX(vals, i, const char *); apr_hash_set(session->supported_posts, post_val, APR_HASH_KEY_STRING, (void *)1); } } }
static int dbd_sqlite3_select_internal(apr_pool_t *pool, apr_dbd_t *sql, apr_dbd_results_t **results, sqlite3_stmt *stmt, int seek) { int ret, retry_count = 0, column_count; size_t i, num_tuples = 0; int increment = 0; apr_dbd_row_t *row = NULL; apr_dbd_row_t *lastrow = NULL; apr_dbd_column_t *column; char *hold = NULL; column_count = sqlite3_column_count(stmt); if (!*results) { *results = apr_pcalloc(pool, sizeof(apr_dbd_results_t)); } (*results)->stmt = stmt; (*results)->sz = column_count; (*results)->random = seek; (*results)->next_row = 0; (*results)->tuples = 0; (*results)->col_names = apr_pcalloc(pool, column_count * sizeof(char *)); (*results)->pool = pool; do { ret = sqlite3_step(stmt); if (ret == SQLITE_BUSY) { if (retry_count++ > MAX_RETRY_COUNT) { ret = SQLITE_ERROR; } else { apr_dbd_mutex_unlock(); apr_sleep(MAX_RETRY_SLEEP); apr_dbd_mutex_lock(); } } else if (ret == SQLITE_ROW) { int length; apr_dbd_column_t *col; row = apr_palloc(pool, sizeof(apr_dbd_row_t)); row->res = *results; increment = sizeof(apr_dbd_column_t *); length = increment * (*results)->sz; row->columns = apr_palloc(pool, length); row->columnCount = column_count; for (i = 0; i < (*results)->sz; i++) { column = apr_palloc(pool, sizeof(apr_dbd_column_t)); row->columns[i] = column; /* copy column name once only */ if ((*results)->col_names[i] == NULL) { (*results)->col_names[i] = apr_pstrdup(pool, sqlite3_column_name(stmt, i)); } column->name = (*results)->col_names[i]; column->size = sqlite3_column_bytes(stmt, i); column->type = sqlite3_column_type(stmt, i); column->value = NULL; switch (column->type) { case SQLITE_FLOAT: case SQLITE_INTEGER: case SQLITE_TEXT: hold = (char *) sqlite3_column_text(stmt, i); if (hold) { column->value = apr_pstrmemdup(pool, hold, column->size); } break; case SQLITE_BLOB: hold = (char *) sqlite3_column_blob(stmt, i); if (hold) { column->value = apr_pstrmemdup(pool, hold, column->size); } break; case SQLITE_NULL: break; } col = row->columns[i]; } row->rownum = num_tuples++; row->next_row = 0; (*results)->tuples = num_tuples; if ((*results)->next_row == 0) { (*results)->next_row = row; } if (lastrow != 0) { lastrow->next_row = row; } lastrow = row; } } while (ret == SQLITE_ROW || ret == SQLITE_BUSY); if (dbd_sqlite3_is_success(ret)) { ret = 0; } return ret; }
AP_DECLARE(int) ap_scan_script_header_err_core_ex(request_rec *r, char *buffer, int (*getsfunc) (char *, int, void *), void *getsfunc_data, int module_index) { char x[MAX_STRING_LEN]; char *w, *l; int p; int cgi_status = HTTP_UNSET; apr_table_t *merge; apr_table_t *cookie_table; int trace_log = APLOG_R_MODULE_IS_LEVEL(r, module_index, APLOG_TRACE1); int first_header = 1; if (buffer) { *buffer = '\0'; } w = buffer ? buffer : x; /* temporary place to hold headers to merge in later */ merge = apr_table_make(r->pool, 10); /* The HTTP specification says that it is legal to merge duplicate * headers into one. Some browsers that support Cookies don't like * merged headers and prefer that each Set-Cookie header is sent * separately. Lets humour those browsers by not merging. * Oh what a pain it is. */ cookie_table = apr_table_make(r->pool, 2); apr_table_do(set_cookie_doo_doo, cookie_table, r->err_headers_out, "Set-Cookie", NULL); while (1) { int rv = (*getsfunc) (w, MAX_STRING_LEN - 1, getsfunc_data); if (rv == 0) { const char *msg = "Premature end of script headers"; if (first_header) msg = "End of script output before headers"; ap_log_rerror(SCRIPT_LOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "%s: %s", msg, apr_filepath_name_get(r->filename)); return HTTP_INTERNAL_SERVER_ERROR; } else if (rv == -1) { ap_log_rerror(SCRIPT_LOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "Script timed out before returning headers: %s", apr_filepath_name_get(r->filename)); return HTTP_GATEWAY_TIME_OUT; } /* Delete terminal (CR?)LF */ p = strlen(w); /* Indeed, the host's '\n': '\012' for UNIX; '\015' for MacOS; '\025' for OS/390 -- whatever the script generates. */ if (p > 0 && w[p - 1] == '\n') { if (p > 1 && w[p - 2] == CR) { w[p - 2] = '\0'; } else { w[p - 1] = '\0'; } } /* * If we've finished reading the headers, check to make sure any * HTTP/1.1 conditions are met. If so, we're done; normal processing * will handle the script's output. If not, just return the error. * The appropriate thing to do would be to send the script process a * SIGPIPE to let it know we're ignoring it, close the channel to the * script process, and *then* return the failed-to-meet-condition * error. Otherwise we'd be waiting for the script to finish * blithering before telling the client the output was no good. * However, we don't have the information to do that, so we have to * leave it to an upper layer. */ if (w[0] == '\0') { int cond_status = OK; /* PR#38070: This fails because it gets confused when a * CGI Status header overrides ap_meets_conditions. * * We can fix that by dropping ap_meets_conditions when * Status has been set. Since this is the only place * cgi_status gets used, let's test it explicitly. * * The alternative would be to ignore CGI Status when * ap_meets_conditions returns anything interesting. * That would be safer wrt HTTP, but would break CGI. */ if ((cgi_status == HTTP_UNSET) && (r->method_number == M_GET)) { cond_status = ap_meets_conditions(r); } apr_table_overlap(r->err_headers_out, merge, APR_OVERLAP_TABLES_MERGE); if (!apr_is_empty_table(cookie_table)) { /* the cookies have already been copied to the cookie_table */ apr_table_unset(r->err_headers_out, "Set-Cookie"); r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out, cookie_table); } return cond_status; } if (trace_log) { if (first_header) ap_log_rerror(SCRIPT_LOG_MARK, APLOG_TRACE4, 0, r, "Headers from script '%s':", apr_filepath_name_get(r->filename)); ap_log_rerror(SCRIPT_LOG_MARK, APLOG_TRACE4, 0, r, " %s", w); } /* if we see a bogus header don't ignore it. Shout and scream */ #if APR_CHARSET_EBCDIC /* Chances are that we received an ASCII header text instead of * the expected EBCDIC header lines. Try to auto-detect: */ if (!(l = strchr(w, ':'))) { int maybeASCII = 0, maybeEBCDIC = 0; unsigned char *cp, native; apr_size_t inbytes_left, outbytes_left; for (cp = w; *cp != '\0'; ++cp) { native = apr_xlate_conv_byte(ap_hdrs_from_ascii, *cp); if (apr_isprint(*cp) && !apr_isprint(native)) ++maybeEBCDIC; if (!apr_isprint(*cp) && apr_isprint(native)) ++maybeASCII; } if (maybeASCII > maybeEBCDIC) { ap_log_error(SCRIPT_LOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02660) "CGI Interface Error: " "Script headers apparently ASCII: (CGI = %s)", r->filename); inbytes_left = outbytes_left = cp - w; apr_xlate_conv_buffer(ap_hdrs_from_ascii, w, &inbytes_left, w, &outbytes_left); } } #endif /*APR_CHARSET_EBCDIC*/ if (!(l = strchr(w, ':'))) { if (!buffer) { /* Soak up all the script output - may save an outright kill */ while ((*getsfunc)(w, MAX_STRING_LEN - 1, getsfunc_data) > 0) { continue; } } ap_log_rerror(SCRIPT_LOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "malformed header from script '%s': Bad header: %.30s", apr_filepath_name_get(r->filename), w); return HTTP_INTERNAL_SERVER_ERROR; } *l++ = '\0'; while (apr_isspace(*l)) { ++l; } if (!strcasecmp(w, "Content-type")) { char *tmp; /* Nuke trailing whitespace */ char *endp = l + strlen(l) - 1; while (endp > l && apr_isspace(*endp)) { *endp-- = '\0'; } tmp = apr_pstrdup(r->pool, l); ap_content_type_tolower(tmp); ap_set_content_type(r, tmp); } /* * If the script returned a specific status, that's what * we'll use - otherwise we assume 200 OK. */ else if (!strcasecmp(w, "Status")) { r->status = cgi_status = atoi(l); if (!ap_is_HTTP_VALID_RESPONSE(cgi_status)) ap_log_rerror(SCRIPT_LOG_MARK, APLOG_ERR|APLOG_TOCLIENT, 0, r, "Invalid status line from script '%s': %.30s", apr_filepath_name_get(r->filename), l); else if (APLOGrtrace1(r)) ap_log_rerror(SCRIPT_LOG_MARK, APLOG_TRACE1, 0, r, "Status line from script '%s': %.30s", apr_filepath_name_get(r->filename), l); r->status_line = apr_pstrdup(r->pool, l); } else if (!strcasecmp(w, "Location")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Content-Length")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Content-Range")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Transfer-Encoding")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "ETag")) { apr_table_set(r->headers_out, w, l); } /* * If the script gave us a Last-Modified header, we can't just * pass it on blindly because of restrictions on future values. */ else if (!strcasecmp(w, "Last-Modified")) { ap_update_mtime(r, apr_date_parse_http(l)); ap_set_last_modified(r); } else if (!strcasecmp(w, "Set-Cookie")) { apr_table_add(cookie_table, w, l); } else { apr_table_add(merge, w, l); } first_header = 0; } /* never reached - we leave this function within the while loop above */ return OK; }
AP_DECLARE(void) ap_add_common_vars(request_rec *r) { apr_table_t *e; server_rec *s = r->server; conn_rec *c = r->connection; core_dir_config *conf = (core_dir_config *)ap_get_core_module_config(r->per_dir_config); const char *env_temp; const apr_array_header_t *hdrs_arr = apr_table_elts(r->headers_in); const apr_table_entry_t *hdrs = (const apr_table_entry_t *) hdrs_arr->elts; int i; apr_port_t rport; char *q; /* use a temporary apr_table_t which we'll overlap onto * r->subprocess_env later * (exception: if r->subprocess_env is empty at the start, * write directly into it) */ if (apr_is_empty_table(r->subprocess_env)) { e = r->subprocess_env; } else { e = apr_table_make(r->pool, 25 + hdrs_arr->nelts); } /* First, add environment vars from headers... this is as per * CGI specs, though other sorts of scripting interfaces see * the same vars... */ for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) { continue; } /* A few headers are special cased --- Authorization to prevent * rogue scripts from capturing passwords; content-type and -length * for no particular reason. */ if (!strcasecmp(hdrs[i].key, "Content-type")) { apr_table_addn(e, "CONTENT_TYPE", hdrs[i].val); } else if (!strcasecmp(hdrs[i].key, "Content-length")) { apr_table_addn(e, "CONTENT_LENGTH", hdrs[i].val); } /* * You really don't want to disable this check, since it leaves you * wide open to CGIs stealing passwords and people viewing them * in the environment with "ps -e". But, if you must... */ #ifndef SECURITY_HOLE_PASS_AUTHORIZATION else if (!strcasecmp(hdrs[i].key, "Authorization") || !strcasecmp(hdrs[i].key, "Proxy-Authorization")) { if (conf->cgi_pass_auth == AP_CGI_PASS_AUTH_ON) { add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); } } #endif else add_unless_null(e, http2env(r, hdrs[i].key), hdrs[i].val); } env_temp = apr_table_get(r->subprocess_env, "PATH"); if (env_temp == NULL) { env_temp = getenv("PATH"); } if (env_temp == NULL) { env_temp = DEFAULT_PATH; } apr_table_addn(e, "PATH", apr_pstrdup(r->pool, env_temp)); #if defined(WIN32) env2env(e, "SystemRoot"); env2env(e, "COMSPEC"); env2env(e, "PATHEXT"); env2env(e, "WINDIR"); #elif defined(OS2) env2env(e, "COMSPEC"); env2env(e, "ETC"); env2env(e, "DPATH"); env2env(e, "PERLLIB_PREFIX"); #elif defined(BEOS) env2env(e, "LIBRARY_PATH"); #elif defined(DARWIN) env2env(e, "DYLD_LIBRARY_PATH"); #elif defined(_AIX) env2env(e, "LIBPATH"); #elif defined(__HPUX__) /* HPUX PARISC 2.0W knows both, otherwise redundancy is harmless */ env2env(e, "SHLIB_PATH"); env2env(e, "LD_LIBRARY_PATH"); #else /* Some Unix */ env2env(e, "LD_LIBRARY_PATH"); #endif apr_table_addn(e, "SERVER_SIGNATURE", ap_psignature("", r)); apr_table_addn(e, "SERVER_SOFTWARE", ap_get_server_banner()); apr_table_addn(e, "SERVER_NAME", ap_escape_html(r->pool, ap_get_server_name_for_url(r))); apr_table_addn(e, "SERVER_ADDR", r->connection->local_ip); /* Apache */ apr_table_addn(e, "SERVER_PORT", apr_psprintf(r->pool, "%u", ap_get_server_port(r))); add_unless_null(e, "REMOTE_HOST", ap_get_remote_host(c, r->per_dir_config, REMOTE_HOST, NULL)); apr_table_addn(e, "REMOTE_ADDR", r->useragent_ip); apr_table_addn(e, "DOCUMENT_ROOT", ap_document_root(r)); /* Apache */ apr_table_setn(e, "REQUEST_SCHEME", ap_http_scheme(r)); apr_table_addn(e, "CONTEXT_PREFIX", ap_context_prefix(r)); apr_table_addn(e, "CONTEXT_DOCUMENT_ROOT", ap_context_document_root(r)); apr_table_addn(e, "SERVER_ADMIN", s->server_admin); /* Apache */ if (apr_table_get(r->notes, "proxy-noquery") && (q = ap_strchr(r->filename, '?'))) { *q = '\0'; apr_table_addn(e, "SCRIPT_FILENAME", apr_pstrdup(r->pool, r->filename)); *q = '?'; } else { apr_table_addn(e, "SCRIPT_FILENAME", r->filename); /* Apache */ } rport = c->client_addr->port; apr_table_addn(e, "REMOTE_PORT", apr_itoa(r->pool, rport)); if (r->user) { apr_table_addn(e, "REMOTE_USER", r->user); } else if (r->prev) { request_rec *back = r->prev; while (back) { if (back->user) { apr_table_addn(e, "REDIRECT_REMOTE_USER", back->user); break; } back = back->prev; } } add_unless_null(e, "AUTH_TYPE", r->ap_auth_type); env_temp = ap_get_remote_logname(r); if (env_temp) { apr_table_addn(e, "REMOTE_IDENT", apr_pstrdup(r->pool, env_temp)); } /* Apache custom error responses. If we have redirected set two new vars */ if (r->prev) { /* PR#57785: reconstruct full URL here */ apr_uri_t *uri = &r->prev->parsed_uri; if (!uri->scheme) { uri->scheme = (char*)ap_http_scheme(r->prev); } if (!uri->port) { uri->port = ap_get_server_port(r->prev); uri->port_str = apr_psprintf(r->pool, "%u", uri->port); } if (!uri->hostname) { uri->hostname = (char*)ap_get_server_name_for_url(r->prev); } add_unless_null(e, "REDIRECT_QUERY_STRING", r->prev->args); add_unless_null(e, "REDIRECT_URL", apr_uri_unparse(r->pool, uri, 0)); } if (e != r->subprocess_env) { apr_table_overlap(r->subprocess_env, e, APR_OVERLAP_TABLES_SET); } }
static apr_status_t ap_unix_create_privileged_process( apr_proc_t *newproc, const char *progname, const char * const *args, const char * const *env, apr_procattr_t *attr, ap_unix_identity_t *ugid, apr_pool_t *p) { int i = 0; const char **newargs; char *newprogname; char *execuser, *execgroup; const char *argv0; if (!unixd_config.suexec_enabled) { return apr_proc_create(newproc, progname, args, env, attr, p); } argv0 = ap_strrchr_c(progname, '/'); /* Allow suexec's "/" check to succeed */ if (argv0 != NULL) { argv0++; } else { argv0 = progname; } if (ugid->userdir) { execuser = apr_psprintf(p, "~%ld", (long) ugid->uid); } else { execuser = apr_psprintf(p, "%ld", (long) ugid->uid); } execgroup = apr_psprintf(p, "%ld", (long) ugid->gid); if (!execuser || !execgroup) { return APR_ENOMEM; } i = 0; if (args) { while (args[i]) { i++; } } /* allocate space for 4 new args, the input args, and a null terminator */ newargs = apr_palloc(p, sizeof(char *) * (i + 4)); newprogname = SUEXEC_BIN; newargs[0] = SUEXEC_BIN; newargs[1] = execuser; newargs[2] = execgroup; newargs[3] = apr_pstrdup(p, argv0); /* ** using a shell to execute suexec makes no sense thus ** we force everything to be APR_PROGRAM, and never ** APR_SHELLCMD */ if(apr_procattr_cmdtype_set(attr, APR_PROGRAM) != APR_SUCCESS) { return APR_EGENERAL; } i = 1; do { newargs[i + 3] = args[i]; } while (args[i++]); return apr_proc_create(newproc, newprogname, newargs, env, attr, p); }
/** * This function gets called whenever there is a 'RequestHeaderPattern' in the * config file and it's syntax is correct (that is, it takes three arguments). * Returns NULL if everything went alright, otherwise an error message. * * @param cmd The command record filled with general information * about the environment. * @param dummy To be ignored. * @param name The name of the filter. * @param header The header field name. * @param pattern_str The regular expression for the pattern matching. * @param replace The replacement string. */ static const char *request_header_pattern(cmd_parms *cmd, void *dummy, const char *header, const char *pattern_str, const char *replace) { replace_server_t *conf; // the server configuration (hashtable) replace_filter_t *filter; // the filter configuration header_replace_pattern_t *pattern; // the pattern to add header_replace_pattern_t *previous; // the previous pattern, if any header_replace_pattern_t backup; pcre *re; // the regular expression pcre_extra *pe; // data from studying the pattern const char *error; // error text for the failed regex compilation int error_offset; // offset of the regex compilation error, if any int rc; // return count of the regex matching int i; // counter int rv; // return value for generic function calls int flags = 0; // the flags for the regex matching conf = ap_get_module_config(cmd->server->module_config, &replace_module); if (conf == NULL) { return apr_pstrcat(cmd->temp_pool, "Illegal server record", NULL, NULL); } /** Look for an existing filter */ filter = (replace_filter_t*)apr_hash_get(conf->h, REQUEST_REPLACE_FILTER, APR_HASH_KEY_STRING); /** If no filter exists, create one */ if (filter == NULL) { filter = (replace_filter_t *)apr_pcalloc(conf->p, sizeof(replace_filter_t)); filter->name = REQUEST_REPLACE_FILTER; filter->mode = INPUT_FILTER; filter->ftype = AP_FTYPE_RESOURCE; filter->pattern = NULL; filter->case_ignore = 1; apr_hash_set(conf->h, REQUEST_REPLACE_FILTER, APR_HASH_KEY_STRING, filter); } /* Check if we have to set the flag for case insensitive matching. */ if (filter->case_ignore == 1) { flags |= PCRE_CASELESS; } /* Compile the pattern. */ re = pcre_compile(pattern_str, flags, &error, &error_offset, NULL); /* Return ungraceful if the compilation of the regex failed. */ if (re == NULL) { return apr_pstrcat(cmd->temp_pool, "Error compiling regular expression: ", error, NULL); } /* Study the pattern. This is done for performance improvement, but most of * the time it doesn't speed up things, since the return value is simply * NULL. */ pe = pcre_study(re, 0, &error); if (error != NULL) { return apr_pstrcat(cmd->temp_pool, "Error studying compiled pattern: ", error, NULL); } /* Check for an already existing pattern. */ pattern = filter->header_pattern; previous = NULL; /* Find the last pattern in the list. */ while (pattern && pattern->next != NULL) { previous = pattern; pattern = pattern->next; } /* If there has been no pattern at all, create one. Otherwise save the last * pattern and create a new one. */ if (!pattern) { pattern = (header_replace_pattern_t *)apr_pcalloc(conf->p, sizeof(header_replace_pattern_t)); filter->header_pattern = pattern; } else { previous = pattern; pattern = (header_replace_pattern_t *)apr_pcalloc(conf->p, sizeof(header_replace_pattern_t)); } /* Assign the values to the structure and add the pattern to the list. */ pattern->pattern = re; pattern->extra = pe; pattern->replacement = apr_pstrdup(conf->p, replace); pattern->header = apr_pstrdup(conf->p, header); pattern->next = NULL; if (previous) { previous->next = pattern; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, "Header/In: Added pattern \"%s\"", pattern_str); return NULL; }
/* get session with szCookieValue key from memcached server */ static apr_table_t *Auth_memCookie_get_session(request_rec *r, strAuth_memCookie_config_rec *conf, char *szCookieValue) { char *szMemcached_addr = conf->szAuth_memCookie_memCached_addr; memcached_st *mc_session = NULL; memcached_server_st *servers = NULL; memcached_return mc_err = 0; apr_table_t *pMySession = NULL; size_t nGetKeyLen = strlen(szCookieValue); uint32_t nGetFlags = 0; size_t nGetLen = 0; char *szTokenPos; char *szFieldTokenPos; char *szField; char *szValue; char *szFieldName; char *szFieldValue; char *szMyValue; const char *UserName; int nbInfo = 0; if ((pMySession = apr_table_make(r->pool, conf->nAuth_memCookie_SessionTableSize)) == 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, ERRTAG "apr_tablemake failed"); return NULL; } /* init memcache lib */ if ((mc_session = memcached_create(NULL)) == 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, r, ERRTAG "memcache lib init failed"); return NULL; } servers = memcached_servers_parse(szMemcached_addr); memcached_server_push(mc_session, servers); if ((szValue = memcached_get(mc_session, szCookieValue, nGetKeyLen, &nGetLen, &nGetFlags, &mc_err)) == 0) { ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "memcached_get failed to find key '%s'",szCookieValue); memcached_free(mc_session); return NULL; } /* dup szValue in pool */ szMyValue = apr_pstrdup(r->pool, szValue); /* split szValue into struct strAuthSession */ /* szValue is formated multi line (\r\n) with name=value on each line */ /* must containe UserName,Groups,RemoteIP fieldname */ szTokenPos = NULL; for (szField = strtok_r(szMyValue, "\r\n", &szTokenPos); szField; szField=strtok_r(NULL, "\r\n", &szTokenPos)) { szFieldTokenPos = NULL; ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "session field:%s",szField); szFieldName = strtok_r(szField, "=", &szFieldTokenPos); szFieldValue = strtok_r(NULL, "=", &szFieldTokenPos); if (szFieldName != NULL && szFieldValue != NULL) { /* add key and value in pMySession table */ apr_table_set(pMySession, szFieldName, szFieldValue); ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "session information %s=%s",szFieldName,szFieldValue); /* count the number of element added to table to check table size not reached */ nbInfo++; if (nbInfo > conf->nAuth_memCookie_SessionTableSize) { ap_log_rerror(APLOG_MARK,APLOG_ERR|APLOG_NOERRNO, 0,r,ERRTAG "maximum session information reached!"); if (szValue) free(szValue); memcached_free(mc_session); return NULL; } } } if (!apr_table_get(pMySession, "UserName")) { ap_log_rerror(APLOG_MARK,APLOG_ERR|APLOG_NOERRNO, 0,r,ERRTAG "Username not found in Session value(key:%s) found = %s",szCookieValue,szValue); pMySession = NULL; } else if (conf->nAuth_memCookie_MatchIP_Mode != 0 && !apr_table_get(pMySession, "RemoteIP")) { ap_log_rerror(APLOG_MARK,APLOG_ERR|APLOG_NOERRNO, 0,r,ERRTAG "MatchIP_Mode activated and RemoteIP not found in Session value(key:%s) found = %s",szCookieValue,szValue); pMySession = NULL; } else { ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "Value for Session (key:%s) found => Username=%s Groups=%s RemoteIp=%s", szCookieValue, apr_table_get(pMySession,"UserName"), apr_table_get(pMySession,"Groups"), apr_table_get(pMySession,"RemoteIP")); } /* free returned value */ if (szValue) free(szValue); /* free the mc session */ memcached_free(mc_session); /* set the good username found in request structure */ UserName = 0; if (pMySession != NULL) UserName = apr_table_get(pMySession, "UserName"); if (UserName) r->user = (char *)UserName; return pMySession; }
/* Query the ProfileImagePath from the version-specific branch, where the * regkey uses the user's name on 9x, and user's sid string on NT. */ APR_DECLARE(apr_status_t) apr_uid_homepath_get(char **dirname, const char *username, apr_pool_t *p) { #ifdef _WIN32_WCE *dirname = apr_pstrdup(p, "/My Documents"); return APR_SUCCESS; #else apr_status_t rv; char regkey[MAX_PATH * 2]; char *fixch; DWORD keylen; DWORD type; HKEY key; if (apr_os_level >= APR_WIN_NT) { apr_uid_t uid; apr_gid_t gid; if ((rv = apr_uid_get(&uid, &gid, username, p)) != APR_SUCCESS) return rv; strcpy(regkey, "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\" "ProfileList\\"); keylen = (DWORD)strlen(regkey); get_sid_string(regkey + keylen, sizeof(regkey) - keylen, uid); } else { strcpy(regkey, "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" "ProfileList\\"); keylen = (DWORD)strlen(regkey); apr_cpystrn(regkey + keylen, username, sizeof(regkey) - keylen); } if ((rv = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey, 0, KEY_QUERY_VALUE, &key)) != ERROR_SUCCESS) return APR_FROM_OS_ERROR(rv); #if APR_HAS_UNICODE_FS IF_WIN_OS_IS_UNICODE { keylen = sizeof(regkey); rv = RegQueryValueExW(key, L"ProfileImagePath", NULL, &type, (void*)regkey, &keylen); RegCloseKey(key); if (rv != ERROR_SUCCESS) return APR_FROM_OS_ERROR(rv); if (type == REG_SZ) { char retdir[MAX_PATH]; if ((rv = unicode_to_utf8_path(retdir, sizeof(retdir), (apr_wchar_t*)regkey)) != APR_SUCCESS) return rv; *dirname = apr_pstrdup(p, retdir); } else if (type == REG_EXPAND_SZ) { apr_wchar_t path[MAX_PATH]; char retdir[MAX_PATH]; ExpandEnvironmentStringsW((apr_wchar_t*)regkey, path, sizeof(path) / 2); if ((rv = unicode_to_utf8_path(retdir, sizeof(retdir), path)) != APR_SUCCESS) return rv; *dirname = apr_pstrdup(p, retdir); } else return APR_ENOENT; } #endif #if APR_HAS_ANSI_FS ELSE_WIN_OS_IS_ANSI { keylen = sizeof(regkey); rv = RegQueryValueEx(key, "ProfileImagePath", NULL, &type, (void*)regkey, &keylen); RegCloseKey(key); if (rv != ERROR_SUCCESS) return APR_FROM_OS_ERROR(rv); if (type == REG_SZ) { *dirname = apr_pstrdup(p, regkey); } else if (type == REG_EXPAND_SZ) { char path[MAX_PATH]; ExpandEnvironmentStrings(regkey, path, sizeof(path)); *dirname = apr_pstrdup(p, path); } else return APR_ENOENT; } #endif /* APR_HAS_ANSI_FS */ for (fixch = *dirname; *fixch; ++fixch) if (*fixch == '\\') *fixch = '/'; return APR_SUCCESS; #endif /* _WIN32_WCE */ }
/************************************************** * authentification phase: * verify if cookie is set and if it is known in memcache server **************************************************/ static int Auth_memCookie_check_cookie(request_rec *r) { strAuth_memCookie_config_rec *conf = NULL; char *szCookieValue = NULL; apr_table_t *pAuthSession = NULL; apr_status_t tRetStatus; char *szRemoteIP = NULL; const char *command = NULL; ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "ap_hook_check_user_id in"); /* get apache config */ conf = ap_get_module_config(r->per_dir_config, &mod_auth_memcookie_module); ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "check MatchIP_Mode:%d",conf->nAuth_memCookie_MatchIP_Mode); /* set remote ip in case of conf->nAuth_memCookie_MatchIP_Mode value */ if (conf->nAuth_memCookie_MatchIP_Mode == 2 && apr_table_get(r->headers_in, "Via") != NULL) szRemoteIP = apr_pstrdup(r->pool, apr_table_get(r->headers_in, "Via")); else if (conf->nAuth_memCookie_MatchIP_Mode == 1 && apr_table_get(r->headers_in, "X-Forwarded-For") != NULL) szRemoteIP = apr_pstrdup(r->pool, apr_table_get(r->headers_in, "X-Forwarded-For")); else szRemoteIP = apr_pstrdup(r->pool, r->connection->client_ip); if (!conf->nAuth_memCookie_Authoritative) return DECLINED; ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "AuthType is '%s'", ap_auth_type(r)); if (strncmp("Cookie", ap_auth_type(r), 6) != 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, r, ERRTAG "Auth type not specified as 'Cookie'"); return HTTP_UNAUTHORIZED; } if (!conf->szAuth_memCookie_CookieName) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, r, ERRTAG "No Auth_memCookie_CookieName specified"); return HTTP_UNAUTHORIZED; } if (!conf->szAuth_memCookie_memCached_addr) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, r, ERRTAG "No Auth_memCookie_Memcached_AddrPort specified"); return HTTP_UNAUTHORIZED; } ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "Memcached server(s) adresse(s) are %s",conf->szAuth_memCookie_memCached_addr); pAuthSession = NULL; /* extract session cookie from headers */ szCookieValue = extract_cookie(r, conf->szAuth_memCookie_CookieName); /* if we have a cookie, get session from memcache */ if (szCookieValue) { ap_log_rerror(APLOG_MARK,APLOG_DEBUG|APLOG_NOERRNO, 0,r,ERRTAG "got cookie; value is %s", szCookieValue); if((pAuthSession = Auth_memCookie_get_session(r, conf, szCookieValue)) == NULL) { ap_log_rerror(APLOG_MARK, APLOG_INFO|APLOG_NOERRNO, 0, r, ERRTAG "AuthSession %s not found: %s", szCookieValue, r->filename); } } else { ap_log_rerror(APLOG_MARK, APLOG_INFO|APLOG_NOERRNO, 0, r, ERRTAG "cookie not found! not authorized! RemoteIP:%s", szRemoteIP); } /* unset headers sent by the client that are supposed to be set by us */ if (conf->szAuth_memCookie_SessionHeaders) { char *headers = apr_pstrdup(r->pool, conf->szAuth_memCookie_SessionHeaders); char *key, *keypos = 0; for(key = strtok_r(headers, ", ", &keypos); key; key = strtok_r(NULL, ", ", &keypos)) apr_table_unset(r->headers_in, key); } /* still no session? goodbye */ if (!pAuthSession) return HTTP_UNAUTHORIZED; /* check remote ip if option is enabled */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r, ERRTAG "check ip: remote_ip=%s cookie_ip=%s", szRemoteIP ,apr_table_get(pAuthSession,"RemoteIP")); if (conf->nAuth_memCookie_MatchIP_Mode != 0) { if (strcmp(szRemoteIP, apr_table_get(pAuthSession,"RemoteIP"))) { ap_log_rerror(APLOG_MARK, APLOG_ERR|APLOG_NOERRNO, 0, r, ERRTAG "unauthorized, by ip. user:%s remote_ip:%s != cookie_ip:%s", apr_table_get(pAuthSession,"UserName"),szRemoteIP ,apr_table_get(pAuthSession,"RemoteIP")); return HTTP_UNAUTHORIZED; } } /* set env var X_ to the information session value */ apr_table_do(Auth_memCookie_DoSetEnv, r, pAuthSession, NULL); /* set REMOTE_USER var for scripts language */ apr_table_setn(r->subprocess_env, "REMOTE_USER", apr_table_get(pAuthSession,"UserName")); /* set in http header the session value */ if (conf->nAuth_memCookie_SetSessionHTTPHeader) apr_table_do(Auth_memCookie_DoSetHeader, r, pAuthSession, NULL); /* log authorisation ok */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG|APLOG_NOERRNO, 0, r, ERRTAG "authentication ok"); /* fix http header for php */ if (conf->nAuth_memCookie_authbasicfix) fix_headers_in(r, apr_table_get(pAuthSession, "Password")); // do we add the X-Remote-User header? if (conf->nAuth_memCookie_Add_Remote_User_Header) { if (apr_table_get(r->headers_in, "X-Remote-User") == NULL) { apr_table_addn(r->headers_in, "X-Remote-User", r->user); } else { apr_table_set(r->headers_in, "X-Remote-User", r->user); } } /* if all is ok return auth ok */ return OK; }
/* * select a specific URL entity in the cache * * It is possible to store more than one entity per URL. Content * negotiation is used to select an entity. Once an entity is * selected, details of it are stored in the per request * config to save time when serving the request later. * * This function returns OK if successful, DECLINED if no * cached entity fits the bill. */ int cache_select(cache_request_rec *cache, request_rec *r) { cache_provider_list *list; apr_status_t rv; cache_handle_t *h; if (!cache) { /* This should never happen */ ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, r, APLOGNO(00693) "cache: No cache request information available for key" " generation"); return DECLINED; } /* if no-cache, we can't serve from the cache, but we may store to the * cache. */ if (!ap_cache_check_no_cache(cache, r)) { return DECLINED; } if (!cache->key) { rv = cache_generate_key(r, r->pool, &cache->key); if (rv != APR_SUCCESS) { return DECLINED; } } /* go through the cache types till we get a match */ h = apr_palloc(r->pool, sizeof(cache_handle_t)); list = cache->providers; while (list) { switch ((rv = list->provider->open_entity(h, r, cache->key))) { case OK: { char *vary = NULL; int mismatch = 0; char *last = NULL; if (list->provider->recall_headers(h, r) != APR_SUCCESS) { /* try again with next cache type */ list = list->next; continue; } /* * Check Content-Negotiation - Vary * * At this point we need to make sure that the object we found in * the cache is the same object that would be delivered to the * client, when the effects of content negotiation are taken into * effect. * * In plain english, we want to make sure that a language-negotiated * document in one language is not given to a client asking for a * language negotiated document in a different language by mistake. * * This code makes the assumption that the storage manager will * cache the req_hdrs if the response contains a Vary * header. * * RFC2616 13.6 and 14.44 describe the Vary mechanism. */ vary = cache_strqtok( apr_pstrdup(r->pool, cache_table_getm(r->pool, h->resp_hdrs, "Vary")), CACHE_SEPARATOR, &last); while (vary) { const char *h1, *h2; /* * is this header in the request and the header in the cached * request identical? If not, we give up and do a straight get */ h1 = cache_table_getm(r->pool, r->headers_in, vary); h2 = cache_table_getm(r->pool, h->req_hdrs, vary); if (h1 == h2) { /* both headers NULL, so a match - do nothing */ } else if (h1 && h2 && !strcmp(h1, h2)) { /* both headers exist and are equal - do nothing */ } else { /* headers do not match, so Vary failed */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00694) "cache_select(): Vary header mismatch."); mismatch = 1; break; } vary = cache_strqtok(NULL, CACHE_SEPARATOR, &last); } /* no vary match, try next provider */ if (mismatch) { /* try again with next cache type */ list = list->next; continue; } cache->provider = list->provider; cache->provider_name = list->provider_name; /* * RFC2616 13.3.4 Rules for When to Use Entity Tags and Last-Modified * Dates: An HTTP/1.1 caching proxy, upon receiving a conditional request * that includes both a Last-Modified date and one or more entity tags as * cache validators, MUST NOT return a locally cached response to the * client unless that cached response is consistent with all of the * conditional header fields in the request. */ if (ap_condition_if_match(r, h->resp_hdrs) == AP_CONDITION_NOMATCH || ap_condition_if_unmodified_since(r, h->resp_hdrs) == AP_CONDITION_NOMATCH || ap_condition_if_none_match(r, h->resp_hdrs) == AP_CONDITION_NOMATCH || ap_condition_if_modified_since(r, h->resp_hdrs) == AP_CONDITION_NOMATCH || ap_condition_if_range(r, h->resp_hdrs) == AP_CONDITION_NOMATCH) { mismatch = 1; } /* Is our cached response fresh enough? */ if (mismatch || !cache_check_freshness(h, cache, r)) { const char *etag, *lastmod; /* Cache-Control: only-if-cached and revalidation required, try * the next provider */ if (cache->control_in.only_if_cached) { /* try again with next cache type */ list = list->next; continue; } /* set aside the stale entry for accessing later */ cache->stale_headers = apr_table_copy(r->pool, r->headers_in); cache->stale_handle = h; /* if no existing conditionals, use conditionals of our own */ if (!mismatch) { ap_log_rerror( APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00695) "Cached response for %s isn't fresh. Adding " "conditional request headers.", r->uri); /* Remove existing conditionals that might conflict with ours */ apr_table_unset(r->headers_in, "If-Match"); apr_table_unset(r->headers_in, "If-Modified-Since"); apr_table_unset(r->headers_in, "If-None-Match"); apr_table_unset(r->headers_in, "If-Range"); apr_table_unset(r->headers_in, "If-Unmodified-Since"); etag = apr_table_get(h->resp_hdrs, "ETag"); lastmod = apr_table_get(h->resp_hdrs, "Last-Modified"); if (etag || lastmod) { /* If we have a cached etag and/or Last-Modified add in * our own conditionals. */ if (etag) { apr_table_set(r->headers_in, "If-None-Match", etag); } if (lastmod) { apr_table_set(r->headers_in, "If-Modified-Since", lastmod); } /* * Do not do Range requests with our own conditionals: If * we get 304 the Range does not matter and otherwise the * entity changed and we want to have the complete entity */ apr_table_unset(r->headers_in, "Range"); } } /* ready to revalidate, pretend we were never here */ return DECLINED; } /* Okay, this response looks okay. Merge in our stuff and go. */ cache_accept_headers(h, r, h->resp_hdrs, r->headers_out, 0); cache->handle = h; return OK; } case DECLINED: { /* try again with next cache type */ list = list->next; continue; } default: { /* oo-er! an error */ return rv; } } } /* if Cache-Control: only-if-cached, and not cached, return 504 */ if (cache->control_in.only_if_cached) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00696) "cache: 'only-if-cached' requested and no cached entity, " "returning 504 Gateway Timeout for: %s", r->uri); return HTTP_GATEWAY_TIME_OUT; } return DECLINED; }
svn_error_t * svn_fs_x__dag_clone_child(dag_node_t **child_p, dag_node_t *parent, const char *parent_path, const char *name, const svn_fs_x__id_part_t *copy_id, const svn_fs_x__id_part_t *txn_id, svn_boolean_t is_parent_copyroot, apr_pool_t *pool) { dag_node_t *cur_entry; /* parent's current entry named NAME */ const svn_fs_id_t *new_node_id; /* node id we'll put into NEW_NODE */ svn_fs_t *fs = svn_fs_x__dag_get_fs(parent); apr_pool_t *subpool = svn_pool_create(pool); /* First check that the parent is mutable. */ if (! svn_fs_x__dag_check_mutable(parent)) return svn_error_createf (SVN_ERR_FS_NOT_MUTABLE, NULL, "Attempted to clone child of non-mutable node"); /* Make sure that NAME is a single path component. */ if (! svn_path_is_single_path_component(name)) return svn_error_createf (SVN_ERR_FS_NOT_SINGLE_PATH_COMPONENT, NULL, "Attempted to make a child clone with an illegal name '%s'", name); /* Find the node named NAME in PARENT's entries list if it exists. */ SVN_ERR(svn_fs_x__dag_open(&cur_entry, parent, name, pool, subpool)); /* Check for mutability in the node we found. If it's mutable, we don't need to clone it. */ if (svn_fs_x__dag_check_mutable(cur_entry)) { /* This has already been cloned */ new_node_id = cur_entry->id; } else { node_revision_t *noderev, *parent_noderev; /* Go get a fresh NODE-REVISION for current child node. */ SVN_ERR(get_node_revision(&noderev, cur_entry)); if (is_parent_copyroot) { SVN_ERR(get_node_revision(&parent_noderev, parent)); noderev->copyroot_rev = parent_noderev->copyroot_rev; noderev->copyroot_path = apr_pstrdup(pool, parent_noderev->copyroot_path); } noderev->copyfrom_path = NULL; noderev->copyfrom_rev = SVN_INVALID_REVNUM; noderev->predecessor_id = svn_fs_x__id_copy(cur_entry->id, pool); if (noderev->predecessor_count != -1) noderev->predecessor_count++; noderev->created_path = svn_fspath__join(parent_path, name, pool); SVN_ERR(svn_fs_x__create_successor(&new_node_id, fs, cur_entry->id, noderev, copy_id, txn_id, pool)); /* Replace the ID in the parent's ENTRY list with the ID which refers to the mutable clone of this child. */ SVN_ERR(set_entry(parent, name, new_node_id, noderev->kind, txn_id, pool)); } /* Initialize the youngster. */ svn_pool_destroy(subpool); return svn_fs_x__dag_get_node(child_p, fs, new_node_id, pool); }
static int create_entity(cache_handle_t *h, cache_type_e type_e, request_rec *r, const char *key, apr_off_t len) { apr_status_t rv; apr_pool_t *pool; cache_object_t *obj, *tmp_obj; mem_cache_object_t *mobj; if (len == -1) { /* Caching a streaming response. Assume the response is * less than or equal to max_streaming_buffer_size. We will * correct all the cache size counters in store_body once * we know exactly know how much we are caching. */ len = sconf->max_streaming_buffer_size; } /* Note: cache_insert() will automatically garbage collect * objects from the cache if the max_cache_size threshold is * exceeded. This means mod_mem_cache does not need to implement * max_cache_size checks. */ if (len < sconf->min_cache_object_size || len > sconf->max_cache_object_size) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "mem_cache: URL %s failed the size check and will not be cached.", key); return DECLINED; } if (type_e == CACHE_TYPE_FILE) { /* CACHE_TYPE_FILE is only valid for local content handled by the * default handler. Need a better way to check if the file is * local or not. */ if (!r->filename) { return DECLINED; } } rv = apr_pool_create(&pool, NULL); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server, "mem_cache: Failed to create memory pool."); return DECLINED; } /* Allocate and initialize cache_object_t */ obj = apr_pcalloc(pool, sizeof(*obj)); obj->key = apr_pstrdup(pool, key); /* Allocate and init mem_cache_object_t */ mobj = apr_pcalloc(pool, sizeof(*mobj)); mobj->pool = pool; if (threaded_mpm) { apr_thread_mutex_create(&mobj->lock, APR_THREAD_MUTEX_DEFAULT, pool); } /* Finish initing the cache object */ apr_atomic_set32(&obj->refcount, 1); mobj->total_refs = 1; obj->complete = 0; obj->vobj = mobj; /* Safe cast: We tested < sconf->max_cache_object_size above */ mobj->m_len = (apr_size_t)len; mobj->type = type_e; /* Place the cache_object_t into the hash table. * Note: Perhaps we should wait to put the object in the * hash table when the object is complete? I add the object here to * avoid multiple threads attempting to cache the same content only * to discover at the very end that only one of them will succeed. * Furthermore, adding the cache object to the table at the end could * open up a subtle but easy to exploit DoS hole: someone could request * a very large file with multiple requests. Better to detect this here * rather than after the cache object has been completely built and * initialized... * XXX Need a way to insert into the cache w/o such coarse grained locking */ if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); } tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key); if (!tmp_obj) { cache_insert(sconf->cache_cache, obj); /* Add a refcount to account for the reference by the * hashtable in the cache. Refcount should be 2 now, one * for this thread, and one for the cache. */ apr_atomic_inc32(&obj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); } if (tmp_obj) { /* This thread collided with another thread loading the same object * into the cache at the same time. Defer to the other thread which * is further along. */ cleanup_cache_object(obj); return DECLINED; } apr_pool_cleanup_register(r->pool, obj, decrement_refcount, apr_pool_cleanup_null); /* Populate the cache handle */ h->cache_obj = obj; return OK; }
static int xlate_name(request_rec *r) { int i; const char *name; char *backend; apr_dbm_t *db; apr_status_t rv; apr_datum_t key, val; struct proxy_alias *ralias; proxy_dir_conf *dconf; express_server_conf *sconf; sconf = ap_get_module_config(r->server->module_config, &proxy_express_module); dconf = ap_get_module_config(r->per_dir_config, &proxy_module); if (!sconf->enabled) { return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01001) "proxy_express: Enabled"); if (!sconf->dbmfile || (r->filename && strncmp(r->filename, "proxy:", 6) == 0)) { /* it should be go on as an internal proxy request */ return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01002) "proxy_express: Opening DBM file: %s (%s)", sconf->dbmfile, sconf->dbmtype); rv = apr_dbm_open_ex(&db, sconf->dbmtype, sconf->dbmfile, APR_DBM_READONLY, APR_OS_DEFAULT, r->pool); if (rv != APR_SUCCESS) { return DECLINED; } name = ap_get_server_name(r); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01003) "proxy_express: looking for %s", name); key.dptr = (char *)name; key.dsize = strlen(key.dptr); rv = apr_dbm_fetch(db, key, &val); apr_dbm_close(db); if (rv != APR_SUCCESS) { return DECLINED; } backend = apr_pstrmemdup(r->pool, val.dptr, val.dsize); if (!backend) { return DECLINED; } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01004) "proxy_express: found %s -> %s", name, backend); r->filename = apr_pstrcat(r->pool, "proxy:", backend, r->uri, NULL); r->handler = "proxy-server"; r->proxyreq = PROXYREQ_REVERSE; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01005) "proxy_express: rewritten as: %s", r->filename); ralias = (struct proxy_alias *)dconf->raliases->elts; /* * See if we have already added a ProxyPassReverse entry * for this host... If so, don't do it again. */ /* * NOTE: dconf is process specific so this wil only * work as long as we maintain that this process * or thread is handling the backend */ for (i = 0; i < dconf->raliases->nelts; i++, ralias++) { if (strcasecmp(backend, ralias->real) == 0) { ralias = NULL; break; } } /* Didn't find one... add it */ if (!ralias) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01006) "proxy_express: adding PPR entry"); ralias = apr_array_push(dconf->raliases); ralias->fake = "/"; ralias->real = apr_pstrdup(dconf->raliases->pool, backend); ralias->flags = 0; } return OK; }
static int pyth_metric_init (apr_pool_t *p) { DIR *dp; struct dirent *entry; int i; char* modname; PyObject *pmod, *pinitfunc, *pobj, *pparamdict; py_metric_init_t minfo; Ganglia_25metric *gmi; mapped_info_t *mi; const char* path = python_module.module_params; cfg_t *module_cfg; /* Allocate a pool that will be used by this module */ apr_pool_create(&pool, p); metric_info = apr_array_make(pool, 10, sizeof(Ganglia_25metric)); metric_mapping_info = apr_array_make(pool, 10, sizeof(mapped_info_t)); /* Verify path exists and can be read */ if (!path) { err_msg("[PYTHON] Missing python module path.\n"); return -1; } if (access(path, F_OK)) { /* 'path' does not exist */ err_msg("[PYTHON] Can't open the python module path %s.\n", path); return -1; } if (access(path, R_OK)) { /* Don't have read access to 'path' */ err_msg("[PYTHON] Can't read from the python module path %s.\n", path); return -1; } /* Init Python environment */ /* Set up the python path to be able to load module from our module path */ Py_Initialize(); PyObject *sys_path = PySys_GetObject("path"); PyObject *addpath = PyString_FromString(path); PyList_Append(sys_path, addpath); PyEval_InitThreads(); gtstate = PyEval_SaveThread(); /* Initialize each python module */ if ((dp = opendir(path)) == NULL) { /* Error: Cannot open the directory - Shouldn't happen */ /* Log? */ err_msg("[PYTHON] Can't open the python module path %s.\n", path); return -1; } i = 0; while ((entry = readdir(dp)) != NULL) { modname = is_python_module(entry->d_name); if (modname == NULL) continue; /* Find the specified module configuration in gmond.conf If this return NULL then either the module config doesn't exist or the module is disabled. */ module_cfg = find_module_config(modname); if (!module_cfg) continue; PyEval_RestoreThread(gtstate); pmod = PyImport_ImportModule(modname); if (!pmod) { /* Failed to import module. Log? */ err_msg("[PYTHON] Can't import the metric module [%s].\n", modname); if (PyErr_Occurred()) { PyErr_Print(); } gtstate = PyEval_SaveThread(); continue; } pinitfunc = PyObject_GetAttrString(pmod, "metric_init"); if (!pinitfunc || !PyCallable_Check(pinitfunc)) { /* No metric_init function. */ err_msg("[PYTHON] Can't find the metric_init function in the python module [%s].\n", modname); Py_DECREF(pmod); gtstate = PyEval_SaveThread(); continue; } /* Build a parameter dictionary to pass to the module */ pparamdict = build_params_dict(module_cfg); if (!pparamdict || !PyDict_Check(pparamdict)) { /* No metric_init function. */ err_msg("[PYTHON] Can't build the parameters dictionary for [%s].\n", modname); Py_DECREF(pmod); gtstate = PyEval_SaveThread(); continue; } /* Now call the metric_init method of the python module */ pobj = PyObject_CallFunction(pinitfunc, "(N)", pparamdict); if (!pobj) { /* failed calling metric_init */ err_msg("[PYTHON] Can't call the metric_init function in the python module [%s].\n", modname); if (PyErr_Occurred()) { PyErr_Print(); } Py_DECREF(pinitfunc); Py_DECREF(pmod); gtstate = PyEval_SaveThread(); continue; } if (PyList_Check(pobj)) { int j; int size = PyList_Size(pobj); for (j = 0; j < size; j++) { PyObject* plobj = PyList_GetItem(pobj, j); if (PyMapping_Check(plobj)) { fill_metric_info(plobj, &minfo, modname, pool); gmi = (Ganglia_25metric*)apr_array_push(metric_info); fill_gmi(gmi, &minfo); mi = (mapped_info_t*)apr_array_push(metric_mapping_info); mi->pmod = pmod; mi->mod_name = apr_pstrdup(pool, modname); mi->pcb = minfo.pcb; } } } else if (PyMapping_Check(pobj)) { fill_metric_info(pobj, &minfo, modname, pool); gmi = (Ganglia_25metric*)apr_array_push(metric_info); fill_gmi(gmi, &minfo); mi = (mapped_info_t*)apr_array_push(metric_mapping_info); mi->pmod = pmod; mi->mod_name = apr_pstrdup(pool, modname); mi->pcb = minfo.pcb; } Py_DECREF(pobj); Py_DECREF(pinitfunc); gtstate = PyEval_SaveThread(); } closedir(dp); apr_pool_cleanup_register(pool, NULL, pyth_metric_cleanup, apr_pool_cleanup_null); /* Replace the empty static metric definition array with the dynamic array that we just created */ /*XXX Need to put this into a finalize MACRO. This is just pushing a NULL entry onto the array so that the looping logic can determine the end if the array. We should probably give back a ready APR array rather than a pointer to a Ganglia_25metric array. */ gmi = apr_array_push(metric_info); memset (gmi, 0, sizeof(*gmi)); mi = apr_array_push(metric_mapping_info); memset (mi, 0, sizeof(*mi)); python_module.metrics_info = (Ganglia_25metric *)metric_info->elts; return 0; }
/** * Replace a bunch of chunks holding a request body with a single large chunk. */ static apr_status_t modsecurity_request_body_end_raw(modsec_rec *msr, char **error_msg) { msc_data_chunk **chunks, *one_chunk; char *d; int i, sofar; *error_msg = NULL; /* Allocate a buffer large enough to hold the request body. */ if (msr->msc_reqbody_length + 1 == 0) { *error_msg = apr_psprintf(msr->mp, "Internal error, request body length will overflow: %u", msr->msc_reqbody_length); return -1; } msr->msc_reqbody_buffer = malloc(msr->msc_reqbody_length + 1); if (msr->msc_reqbody_buffer == NULL) { *error_msg = apr_psprintf(msr->mp, "Unable to allocate memory to hold request body. Asked " "for %u bytes.", msr->msc_reqbody_length + 1); return -1; } msr->msc_reqbody_buffer[msr->msc_reqbody_length] = '\0'; /* Copy the data we keep in chunks into the new buffer. */ sofar = 0; d = msr->msc_reqbody_buffer; chunks = (msc_data_chunk **)msr->msc_reqbody_chunks->elts; for(i = 0; i < msr->msc_reqbody_chunks->nelts; i++) { if (sofar + chunks[i]->length <= msr->msc_reqbody_length) { memcpy(d, chunks[i]->data, chunks[i]->length); d += chunks[i]->length; sofar += chunks[i]->length; } else { *error_msg = apr_psprintf(msr->mp, "Internal error, request body buffer overflow."); return -1; } } /* Now free the memory used by the chunks. */ chunks = (msc_data_chunk **)msr->msc_reqbody_chunks->elts; for(i = 0; i < msr->msc_reqbody_chunks->nelts; i++) { free(chunks[i]->data); chunks[i]->data = NULL; } /* Create a new array with only one chunk in it. */ msr->msc_reqbody_chunks = apr_array_make(msr->msc_reqbody_mp, 2, sizeof(msc_data_chunk *)); if (msr->msc_reqbody_chunks == NULL) { *error_msg = apr_pstrdup(msr->mp, "Failed to create structure to hold request body."); return -1; } one_chunk = (msc_data_chunk *)apr_pcalloc(msr->msc_reqbody_mp, sizeof(msc_data_chunk)); one_chunk->data = msr->msc_reqbody_buffer; one_chunk->length = msr->msc_reqbody_length; one_chunk->is_permanent = 1; *(const msc_data_chunk **)apr_array_push(msr->msc_reqbody_chunks) = one_chunk; if(msr->txcfg->reqbody_limit > 0 && msr->txcfg->reqbody_limit < msr->msc_reqbody_length) { msr->msc_reqbody_length = msr->txcfg->reqbody_limit; } return 1; }
int main(int argc, const char * const argv[]) { int rc; int stat = 0; const char *ip = NULL; char *msg = NULL; qos_geo_t *geo; int size; const char *db = NULL; apr_table_t *entries; apr_pool_t *pool; const char *cmd = strrchr(argv[0], '/'); apr_app_initialize(&argc, &argv, NULL); apr_pool_create(&pool, NULL); entries = apr_table_make(pool, 100); if(cmd == NULL) { cmd = (char *)argv[0]; } else { cmd++; } argc--; argv++; while(argc >= 1) { if(strcmp(*argv, "-d") == 0) { if (--argc >= 1) { db = *(++argv); } } else if(strcmp(*argv, "-ip") == 0) { if (--argc >= 1) { ip = *(++argv); } } else if(strcmp(*argv, "-s") == 0) { stat = 1; } else if(strcmp(*argv, "-l") == 0) { m_inject = 1; } else if(strcmp(*argv,"-h") == 0) { usage(cmd, 0); } else if(strcmp(*argv,"--help") == 0) { usage(cmd, 0); } else if(strcmp(*argv,"-?") == 0) { usage(cmd, 0); } else if(strcmp(*argv,"--man") == 0) { usage(cmd, 1); } else { usage(cmd, 0); } argc--; argv++; } if(db == NULL) { usage(cmd, 0); } rc = nice(10); if(rc == -1) { fprintf(stderr, "ERROR, failed to change nice value: %s\n", strerror(errno)); } geo = qos_loadgeo(pool, db, &size, &msg); if(geo == NULL || msg != NULL) { fprintf(stderr, "failed to load database: %s\n", msg ? msg : "-"); exit(1); } if(m_inject) { exit(0); } if(ip) { qos_geo_t *pB; unsigned long search = qos_geo_str2long(pool, ip); printf("search %lu: ", search); pB = bsearch(&search, geo, size, sizeof(qos_geo_t), qos_geo_comp); if(pB) { printf("%s\n", pB->country); } else { printf("n/a\n"); } return 0; } // start reading from stdin { char prev; qos_geo_t *pB; apr_pool_t *tmp; char line[HUGE_STRING_LEN]; regex_t preg; regex_t preg2; regmatch_t ma[MAX_REG_MATCH]; apr_pool_create(&tmp, NULL); if(regcomp(&preg, IPPATTERN, REG_EXTENDED)) { exit(1); } regcomp(&preg2, IPPATTERN2, REG_EXTENDED); while(fgets(line, sizeof(line), stdin) != NULL) { int match = regexec(&preg, line, MAX_REG_MATCH, ma, 0); if(match != 0) { char *dx = strchr(line, ';'); if(dx && ((dx - line) <= 15)) { // file starts probably with <ip>; => a qslog -pc file? match = regexec(&preg2, line, MAX_REG_MATCH, ma, 0); } } if(match == 0) { unsigned long search; prev = line[ma[1].rm_eo]; line[ma[1].rm_eo] = '\0'; search = qos_geo_str2long(tmp, &line[ma[1].rm_so]); apr_pool_clear(tmp); pB = bsearch(&search, geo, size, sizeof(qos_geo_t), qos_geo_comp); if(stat) { /* creates a single statistic entry for each country (used to collect requests per source country) */ if(pB) { qos_geo_stat_t *s = (qos_geo_stat_t *)apr_table_get(entries, pB->country); if(s == NULL) { s = apr_pcalloc(pool, sizeof(qos_geo_stat_t)); s->num = 0; s->c = pB->c; apr_table_addn(entries, apr_pstrdup(pool, pB->country), (char *)s); } s->num++; } } else { /* modifies each log line inserting the country code */ char cr = prev; char delw[2]; char delx[2]; delw[1] = '\0'; delw[0] = ' '; delx[1] = '\0'; delx[0] = ' '; if(line[ma[1].rm_eo+1] == ' ') { delx[0] = '\0'; } if(line[ma[1].rm_eo+1] == ';') { delx[0] = ';'; } if(prev <= CR) { prev = ' '; } if(prev == ' ') { delw[0] = '\0'; } if(prev == ';') { delw[0] = '\0'; delx[0] = ';'; } if(pB) { printf("%s%c%s%s%s%s", line, prev, delw, pB->country, delx, &line[ma[1].rm_eo+1]); } else { printf("%s%c%s--%s%s", line, prev, delw, delx, &line[ma[1].rm_eo+1]); } if(cr <= CR) { printf("\n"); } } } else { printf("%s", line); } fflush(stdout); } if(stat) { int i; apr_table_entry_t *entry = (apr_table_entry_t *)apr_table_elts(entries)->elts; for(i = 0; i < apr_table_elts(entries)->nelts; i++) { qos_geo_stat_t *s = (qos_geo_stat_t *)entry[i].val; printf("%7.d %s %s\n", s->num, entry[i].key, s->c ? s->c : ""); } } } return 0; }
/** * Returns one chunk of request body data. It stores a NULL * in the chunk pointer when there is no data to return. The * return code is 1 if more calls can be made to retrieve more * data, 0 if there is no more data to retrieve, or -1 on error. * * The caller can limit the amount of data returned by providing * a non-negative value in nbytes. */ apr_status_t modsecurity_request_body_retrieve(modsec_rec *msr, msc_data_chunk **chunk, long int nbytes, char **error_msg) { msc_data_chunk **chunks; *error_msg = NULL; if (chunk == NULL) { *error_msg = apr_pstrdup(msr->mp, "Internal error, retrieving request body chunk."); return -1; } *chunk = NULL; if (msr->msc_reqbody_storage == MSC_REQBODY_MEMORY) { /* Are there any chunks left? */ if (msr->msc_reqbody_chunk_position >= msr->msc_reqbody_chunks->nelts) { /* No more chunks. */ return 0; } /* We always respond with the same chunk, just different information in it. */ *chunk = msr->msc_reqbody_disk_chunk; /* Advance to the current chunk and position on the next byte we need to send. */ chunks = (msc_data_chunk **)msr->msc_reqbody_chunks->elts; msr->msc_reqbody_disk_chunk->data = chunks[msr->msc_reqbody_chunk_position]->data + msr->msc_reqbody_chunk_offset; if (nbytes < 0) { /* Send what's left in this chunk as there is no limit on the size. */ msr->msc_reqbody_disk_chunk->length = chunks[msr->msc_reqbody_chunk_position]->length; msr->msc_reqbody_chunk_position++; msr->msc_reqbody_chunk_offset = 0; } else { /* We have a limit we must obey. */ if (chunks[msr->msc_reqbody_chunk_position]->length - msr->msc_reqbody_chunk_offset <= (unsigned int)nbytes) { /* If what's left in our chunk is less than the limit then send it all back. */ msr->msc_reqbody_disk_chunk->length = chunks[msr->msc_reqbody_chunk_position]->length - msr->msc_reqbody_chunk_offset; msr->msc_reqbody_chunk_position++; msr->msc_reqbody_chunk_offset = 0; } else { /* If we have more data in our chunk, send the maximum bytes we can (nbytes). */ msr->msc_reqbody_disk_chunk->length = nbytes; msr->msc_reqbody_chunk_offset += nbytes; } } /* If we've advanced beyond our last chunk then we have no more data to send.*/ if (msr->msc_reqbody_chunk_position >= msr->msc_reqbody_chunks->nelts) { return 0; /* No more chunks. */ } /* More data available. */ return 1; } if (msr->msc_reqbody_storage == MSC_REQBODY_DISK) { long int my_nbytes = CHUNK_CAPACITY; int i; /* Send CHUNK_CAPACITY bytes at a time unless a lower limit was requested. */ if ((nbytes != -1) && (my_nbytes > nbytes)) { my_nbytes = nbytes; } i = read(msr->msc_reqbody_fd, msr->msc_reqbody_disk_chunk->data, my_nbytes); if (i < 0) { *error_msg = apr_psprintf(msr->mp, "Input filter: Error reading from temporary file: %s", strerror(errno)); return -1; } *chunk = msr->msc_reqbody_disk_chunk; msr->msc_reqbody_disk_chunk->length = i; if (i == 0) { return 0; /* No more data available. */ } return 1; /* More data available. */ } /* Should never happen. */ *error_msg = apr_psprintf(msr->mp, "Internal error, invalid msc_reqbody_storage value: %u", msr->msc_reqbody_storage); return -1; }
static int rpaf_post_read_request(request_rec *r) { char *fwdvalue, *val, *mask, *last_val; int i; apr_port_t tmpport; apr_pool_t *tmppool; const char *header_ip = NULL, *header_host = NULL, *header_https = NULL, *header_port = NULL; rpaf_server_cfg *cfg = (rpaf_server_cfg *)ap_get_module_config(r->server->module_config, &rpaf_module); if (!cfg->enable) return DECLINED; /* this overcomes an issue when mod_rewrite causes this to get called again and the environment value is lost for HTTPS. This is the only thing that is lost and we do not need to process any further after restoring the value. Note that this check uses the *per-request* note - otherwise we would shortcut here for every subsequent request */ const char *rpaf_https = apr_table_get(r->notes, "rpaf_https"); if (rpaf_https) { apr_table_set(r->subprocess_env, "HTTPS", rpaf_https); return DECLINED; } /* check if the remote_addr is in the allowed proxy IP list */ if (is_in_array(r->DEF_ADDR, cfg->proxy_ips) != 1) { if (cfg->forbid_if_not_proxy) return HTTP_FORBIDDEN; return DECLINED; } /* TODO: We should not just assume that we should fallback to X-Forwarded-For as this could pose a security risk, keeping this for now to keep our behaviour consistant */ header_ip = cfg->headername; if (header_ip) fwdvalue = (char *)apr_table_get(r->headers_in, header_ip); if (!header_ip || !fwdvalue) { header_ip = "X-Forwarded-For"; fwdvalue = (char *)apr_table_get(r->headers_in, header_ip); } /* if there was no forwarded for header then we dont do anything */ if (!fwdvalue) return DECLINED; /* split up the list of forwarded IPs */ apr_array_header_t *arr = apr_array_make(r->pool, 4, sizeof(char *)); while ((val = strsep(&fwdvalue, ",")) != NULL) { /* strip leading and trailing whitespace */ while(isspace(*val)) ++val; for (i = strlen(val) - 1; i > 0 && isspace(val[i]); i--) val[i] = '\0'; if (rpaf_looks_like_ip(val)) *(char **)apr_array_push(arr) = apr_pstrdup(r->pool, val); } /* if there were no IPs, then there is nothing to do */ if (apr_is_empty_array(arr)) return DECLINED; /* get the last IP and check if it is in our list of proxies */ if ((last_val = last_not_in_array(r, arr, cfg->proxy_ips)) == NULL) return DECLINED; /* if we are cleaning up the headers then we need to correct the forwarded IP list */ if (cfg->clean_headers) { /* pop the proxy's IP from the list */ apr_array_pop(arr); if (apr_is_empty_array(arr)) apr_table_unset(r->headers_in, header_ip); else { char *ip_list = apr_array_pstrcat(r->pool, arr, ','); apr_table_set(r->headers_in, header_ip, ip_list); } } rpaf_cleanup_rec *rcr = (rpaf_cleanup_rec *)apr_pcalloc(r->pool, sizeof(rpaf_cleanup_rec)); rcr->old_ip = apr_pstrdup(r->DEF_POOL, r->DEF_IP); rcr->r = r; apr_pool_cleanup_register(r->pool, (void *)rcr, rpaf_cleanup, apr_pool_cleanup_null); r->DEF_IP = apr_pstrdup(r->DEF_POOL, last_val); memcpy(&rcr->old_addr, r->DEF_ADDR, sizeof(apr_sockaddr_t)); tmppool = r->DEF_ADDR->pool; tmpport = r->DEF_ADDR->port; apr_sockaddr_t *tmpsa; int ret = apr_sockaddr_info_get(&tmpsa, r->DEF_IP, APR_UNSPEC, tmpport, 0, tmppool); if (ret == APR_SUCCESS) memcpy(r->DEF_ADDR, tmpsa, sizeof(apr_sockaddr_t)); if (cfg->sethostname) { const char *hostvalue; header_host = "X-Forwarded-Host"; hostvalue = apr_table_get(r->headers_in, header_host); if (!hostvalue) { header_host = "X-Host"; hostvalue = apr_table_get(r->headers_in, header_host); } if (!hostvalue) { header_host = NULL; } else { apr_array_header_t *arr = apr_array_make(r->pool, 0, sizeof(char*)); while (*hostvalue && (val = ap_get_token(r->pool, &hostvalue, 1))) { *(char **)apr_array_push(arr) = apr_pstrdup(r->pool, val); if (*hostvalue != '\0') ++hostvalue; } apr_table_set(r->headers_in, "Host", apr_pstrdup(r->pool, ((char **)arr->elts)[((arr->nelts)-1)])); r->hostname = apr_pstrdup(r->pool, ((char **)arr->elts)[((arr->nelts)-1)]); ap_update_vhost_from_headers(r); } } if (cfg->sethttps) { const char *httpsvalue, *scheme; header_https = "X-Forwarded-HTTPS"; httpsvalue = apr_table_get(r->headers_in, header_https); if (!httpsvalue) { header_https = "X-HTTPS"; httpsvalue = apr_table_get(r->headers_in, header_https); } if (!httpsvalue) { header_https = "X-Forwarded-Proto"; httpsvalue = apr_table_get(r->headers_in, header_https); if (!httpsvalue) { header_https = "X-Forwarded-Protocol"; httpsvalue = apr_table_get(r->headers_in, header_https); } if (httpsvalue) { if (strcmp(httpsvalue, cfg->https_scheme) == 0) { /* set a per-request note to get around an issue with mod_rewrite (explained in an earlier comment), and a per-connection note to allow our version of ssl_is_https() to work. */ apr_table_set(r->notes, "rpaf_https", "on"); apr_table_set(r->connection->notes, "rpaf_https", "on"); apr_table_set(r->subprocess_env , "HTTPS" , "on"); scheme = cfg->https_scheme; } else { scheme = cfg->orig_scheme; } } else { header_https = NULL; scheme = cfg->orig_scheme; } } else { if(strcmp(httpsvalue, "on") == 0 || strcmp(httpsvalue, "On") == 0) { apr_table_set(r->notes, "rpaf_https", "on"); apr_table_set(r->connection->notes, "rpaf_https", "on"); apr_table_set(r->subprocess_env , "HTTPS" , "on"); scheme = cfg->https_scheme; } else { scheme = cfg->orig_scheme; } } #if AP_SERVER_MINORVERSION_NUMBER > 1 && AP_SERVER_PATCHLEVEL_NUMBER > 2 r->server->server_scheme = scheme; #endif } if (cfg->setport) { const char *portvalue; header_port = "X-Forwarded-Port"; portvalue = apr_table_get(r->headers_in, header_port); if (!portvalue) { header_port = "X-Port"; portvalue = apr_table_get(r->headers_in, header_port); } if (!portvalue) { header_port = NULL; r->server->port = cfg->orig_port; } else { r->server->port = atoi(portvalue); r->parsed_uri.port = r->server->port; } } if (cfg->clean_headers) { if (header_host ) apr_table_unset(r->headers_in, header_host ); if (header_https) apr_table_unset(r->headers_in, header_https); if (header_port ) apr_table_unset(r->headers_in, header_port ); } return DECLINED; }
/** * This function gets called whenever there is a 'HeaderReplacePattern' in the * config file. * Returns NULL if everything went alright, otherwise an error message. * * @param cmd The command record filled with general information * about the environment. * @param dummy To be ignored. * @param args The arguments passed from the pattern definition. Must * be in the following order: name, header, pattern, * replacement string. */ static const char *add_header_pattern(cmd_parms *cmd, void *dummy, const char *args) { const char *name; // the filter name const char *header; // the HTTP header field to match const char *pattern_str;// the textual representation of the pattern const char *replace; // the replacement string replace_server_t *conf; // the server configuration (hashtable) replace_filter_t *filter; // the filter configuration header_replace_pattern_t *pattern; // the pattern to add header_replace_pattern_t *previous; // the previous pattern, if any header_replace_pattern_t backup; pcre *re; // the regular expression pcre_extra *pe; // data from studying the pattern const char *error; // error text for the failed regex compilation int error_offset; // offset of the regex compilation error, if any int rc; // return count of the regex matching int i; // counter int rv; // return value for generic function calls int flags = 0; // the flags for the regex matching /* Get the configuration record */ conf = ap_get_module_config(cmd->server->module_config, &replace_module); if (conf == NULL) { return apr_pstrcat(cmd->temp_pool, "Illegal server record", NULL, NULL); } /* * Parse the arguments. */ /* Extract the name of the filter and check for its existence. */ name = ap_getword_white(cmd->pool, &args); if (!apr_hash_get(conf->h, name, APR_HASH_KEY_STRING)) { return "ReplaceFilter not defined"; } /* Extract the header field. */ header = ap_getword_conf(cmd->pool, &args); if (!header || strlen(header) == 0) { return "Header field missing"; } /* Extract the regex pattern */ pattern_str = ap_getword_conf(cmd->pool, &args); if (!pattern_str || strlen(pattern_str) == 0) { return "Pattern definition missing"; } if (!args || !strlen(args) > 0) { return "Replacement pattern missing"; } /* Extract the replacement string */ replace = ap_getword_conf(cmd->pool, &args); if (!replace) { return "Replacement pattern missing"; } /* Check for additional, illegal configuration directives */ if (args && strlen(args) > 0) { return apr_psprintf(cmd->temp_pool, "Illegal conf directive: \"%s\"", args); } /* Get the filter definition */ filter = (replace_filter_t*)apr_hash_get(conf->h, name, APR_HASH_KEY_STRING); if (filter == NULL) { return apr_pstrcat(cmd->temp_pool, "Unknown filter definition for replace filter"); } /* Check if we have to set the flag for case insensitive matching. */ if (filter->case_ignore == 1) { flags |= PCRE_CASELESS; } /* Compile the pattern. */ re = pcre_compile(pattern_str, flags, &error, &error_offset, NULL); /* Return ungraceful if the compilation of the regex failed. */ if (re == NULL) { return apr_pstrcat(cmd->temp_pool, "Error compiling regular expression: ", error, NULL); } /* Study the pattern. This is done for performance improvement, but most of * the time it doesn't speed up things, since the return value is simply * NULL. */ pe = pcre_study(re, 0, &error); if (error != NULL) { return apr_pstrcat(cmd->temp_pool, "Error studying compiled pattern: ", error, NULL); } /* Check for an already existing pattern. */ pattern = filter->header_pattern; previous = NULL; /* Find the last pattern in the list. */ while (pattern && pattern->next != NULL) { previous = pattern; pattern = pattern->next; } /* If there has been no pattern at all, create one. Otherwise save the last * pattern and create a new one. */ if (!pattern) { pattern = (header_replace_pattern_t *)apr_pcalloc(conf->p, sizeof(header_replace_pattern_t)); filter->header_pattern = pattern; } else { previous = pattern; pattern = (header_replace_pattern_t *)apr_pcalloc(conf->p, sizeof(header_replace_pattern_t)); } /* Assign the values to the structure and add the pattern to the list. */ pattern->pattern = re; pattern->extra = pe; pattern->replacement = apr_pstrdup(conf->p, replace); pattern->header = apr_pstrdup(conf->p, header); pattern->next = NULL; if (previous) { previous->next = pattern; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, "Filter %s: Added header pattern \"%s\"", name, pattern_str); return NULL; }
/* parse_uri_components(): * Parse a given URI, fill in all supplied fields of a uri_components * structure. This eliminates the necessity of extracting host, port, * path, query info repeatedly in the modules. * Side effects: * - fills in fields of uri_components *uptr * - none on any of the r->* fields */ APU_DECLARE(apr_status_t) apr_uri_parse(apr_pool_t *p, const char *uri, apr_uri_t *uptr) { const char *s; const char *s1; const char *hostinfo; char *endstr; int port; int v6_offset1 = 0, v6_offset2 = 0; /* Initialize the structure. parse_uri() and parse_uri_components() * can be called more than once per request. */ memset (uptr, '\0', sizeof(*uptr)); uptr->is_initialized = 1; /* We assume the processor has a branch predictor like most -- * it assumes forward branches are untaken and backwards are taken. That's * the reason for the gotos. -djg */ if (uri[0] == '/') { /* RFC2396 #4.3 says that two leading slashes mean we have an * authority component, not a path! Fixing this looks scary * with the gotos here. But if the existing logic is valid, * then presumably a goto pointing to deal_with_authority works. * * RFC2396 describes this as resolving an ambiguity. In the * case of three or more slashes there would seem to be no * ambiguity, so it is a path after all. */ if (uri[1] == '/' && uri[2] != '/') { s = uri + 2 ; goto deal_with_authority ; } deal_with_path: /* we expect uri to point to first character of path ... remember * that the path could be empty -- http://foobar?query for example */ s = uri; while ((uri_delims[*(unsigned char *)s] & NOTEND_PATH) == 0) { ++s; } if (s != uri) { uptr->path = apr_pstrmemdup(p, uri, s - uri); } if (*s == 0) { return APR_SUCCESS; } if (*s == '?') { ++s; s1 = strchr(s, '#'); if (s1) { uptr->fragment = apr_pstrdup(p, s1 + 1); uptr->query = apr_pstrmemdup(p, s, s1 - s); } else { uptr->query = apr_pstrdup(p, s); } return APR_SUCCESS; } /* otherwise it's a fragment */ uptr->fragment = apr_pstrdup(p, s + 1); return APR_SUCCESS; } /* find the scheme: */ s = uri; while ((uri_delims[*(unsigned char *)s] & NOTEND_SCHEME) == 0) { ++s; } /* scheme must be non-empty and followed by :// */ if (s == uri || s[0] != ':' || s[1] != '/' || s[2] != '/') { goto deal_with_path; /* backwards predicted taken! */ } uptr->scheme = apr_pstrmemdup(p, uri, s - uri); s += 3; deal_with_authority: hostinfo = s; while ((uri_delims[*(unsigned char *)s] & NOTEND_HOSTINFO) == 0) { ++s; } uri = s; /* whatever follows hostinfo is start of uri */ uptr->hostinfo = apr_pstrmemdup(p, hostinfo, uri - hostinfo); /* If there's a username:password@host:port, the @ we want is the last @... * too bad there's no memrchr()... For the C purists, note that hostinfo * is definately not the first character of the original uri so therefore * &hostinfo[-1] < &hostinfo[0] ... and this loop is valid C. */ do { --s; } while (s >= hostinfo && *s != '@'); if (s < hostinfo) { /* again we want the common case to be fall through */ deal_with_host: /* We expect hostinfo to point to the first character of * the hostname. If there's a port it is the first colon, * except with IPv6. */ if (*hostinfo == '[') { v6_offset1 = 1; v6_offset2 = 2; s = memchr(hostinfo, ']', uri - hostinfo); if (s == NULL) { return APR_EGENERAL; } if (*++s != ':') { s = NULL; /* no port */ } } else { s = memchr(hostinfo, ':', uri - hostinfo); } if (s == NULL) { /* we expect the common case to have no port */ uptr->hostname = apr_pstrmemdup(p, hostinfo + v6_offset1, uri - hostinfo - v6_offset2); goto deal_with_path; } uptr->hostname = apr_pstrmemdup(p, hostinfo + v6_offset1, s - hostinfo - v6_offset2); ++s; uptr->port_str = apr_pstrmemdup(p, s, uri - s); if (uri != s) { port = strtol(uptr->port_str, &endstr, 10); uptr->port = port; if (*endstr == '\0') { goto deal_with_path; } /* Invalid characters after ':' found */ return APR_EGENERAL; } uptr->port = apr_uri_port_of_scheme(uptr->scheme); goto deal_with_path; } /* first colon delimits username:password */ s1 = memchr(hostinfo, ':', s - hostinfo); if (s1) { uptr->user = apr_pstrmemdup(p, hostinfo, s1 - hostinfo); ++s1; uptr->password = apr_pstrmemdup(p, s1, s - s1); } else { uptr->user = apr_pstrmemdup(p, hostinfo, s - hostinfo); } hostinfo = s + 1; goto deal_with_host; }
APR_DECLARE(apr_status_t) apr_shm_create(apr_shm_t **m, apr_size_t reqsize, const char *filename, apr_pool_t *pool) { apr_shm_t *new_m; apr_status_t status; #if APR_USE_SHMEM_SHMGET || APR_USE_SHMEM_SHMGET_ANON struct shmid_ds shmbuf; apr_uid_t uid; apr_gid_t gid; #endif #if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM || \ APR_USE_SHMEM_MMAP_ZERO int tmpfd; #endif #if APR_USE_SHMEM_SHMGET apr_size_t nbytes; key_t shmkey; #endif #if APR_USE_SHMEM_MMAP_ZERO || APR_USE_SHMEM_SHMGET || \ APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM apr_file_t *file; /* file where metadata is stored */ #endif /* Check if they want anonymous or name-based shared memory */ if (filename == NULL) { #if APR_USE_SHMEM_MMAP_ZERO || APR_USE_SHMEM_MMAP_ANON new_m = apr_palloc(pool, sizeof(apr_shm_t)); new_m->pool = pool; new_m->reqsize = reqsize; new_m->realsize = reqsize + APR_ALIGN_DEFAULT(sizeof(apr_size_t)); /* room for metadata */ new_m->filename = NULL; #if APR_USE_SHMEM_MMAP_ZERO status = apr_file_open(&file, "/dev/zero", APR_READ | APR_WRITE, APR_OS_DEFAULT, pool); if (status != APR_SUCCESS) { return status; } status = apr_os_file_get(&tmpfd, file); if (status != APR_SUCCESS) { return status; } new_m->base = mmap(NULL, new_m->realsize, PROT_READ|PROT_WRITE, MAP_SHARED, tmpfd, 0); if (new_m->base == (void *)MAP_FAILED) { return errno; } status = apr_file_close(file); if (status != APR_SUCCESS) { return status; } /* store the real size in the metadata */ *(apr_size_t*)(new_m->base) = new_m->realsize; /* metadata isn't usable */ new_m->usable = (char *)new_m->base + APR_ALIGN_DEFAULT(sizeof(apr_size_t)); apr_pool_cleanup_register(new_m->pool, new_m, shm_cleanup_owner, apr_pool_cleanup_null); *m = new_m; return APR_SUCCESS; #elif APR_USE_SHMEM_MMAP_ANON new_m->base = mmap(NULL, new_m->realsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_SHARED, -1, 0); if (new_m->base == (void *)MAP_FAILED) { return errno; } /* store the real size in the metadata */ *(apr_size_t*)(new_m->base) = new_m->realsize; /* metadata isn't usable */ new_m->usable = (char *)new_m->base + APR_ALIGN_DEFAULT(sizeof(apr_size_t)); apr_pool_cleanup_register(new_m->pool, new_m, shm_cleanup_owner, apr_pool_cleanup_null); *m = new_m; return APR_SUCCESS; #endif /* APR_USE_SHMEM_MMAP_ZERO */ #endif /* APR_USE_SHMEM_MMAP_ZERO || APR_USE_SHMEM_MMAP_ANON */ #if APR_USE_SHMEM_SHMGET_ANON new_m = apr_palloc(pool, sizeof(apr_shm_t)); new_m->pool = pool; new_m->reqsize = reqsize; new_m->realsize = reqsize; new_m->filename = NULL; if ((new_m->shmid = shmget(IPC_PRIVATE, new_m->realsize, SHM_R | SHM_W | IPC_CREAT)) < 0) { return errno; } if ((new_m->base = shmat(new_m->shmid, NULL, 0)) == (void *)-1) { return errno; } new_m->usable = new_m->base; if (shmctl(new_m->shmid, IPC_STAT, &shmbuf) == -1) { return errno; } apr_uid_current(&uid, &gid, pool); shmbuf.shm_perm.uid = uid; shmbuf.shm_perm.gid = gid; if (shmctl(new_m->shmid, IPC_SET, &shmbuf) == -1) { return errno; } /* Remove the segment once use count hits zero. * We will not attach to this segment again, since it is * anonymous memory, so it is ok to mark it for deletion. */ if (shmctl(new_m->shmid, IPC_RMID, NULL) == -1) { return errno; } apr_pool_cleanup_register(new_m->pool, new_m, shm_cleanup_owner, apr_pool_cleanup_null); *m = new_m; return APR_SUCCESS; #endif /* APR_USE_SHMEM_SHMGET_ANON */ /* It is an error if they want anonymous memory but we don't have it. */ return APR_ENOTIMPL; /* requested anonymous but we don't have it */ } /* Name-based shared memory */ else { new_m = apr_palloc(pool, sizeof(apr_shm_t)); new_m->pool = pool; new_m->reqsize = reqsize; new_m->filename = apr_pstrdup(pool, filename); #if APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM new_m->realsize = reqsize + APR_ALIGN_DEFAULT(sizeof(apr_size_t)); /* room for metadata */ /* FIXME: Ignore error for now. * * status = apr_file_remove(file, pool);*/ status = APR_SUCCESS; #if APR_USE_SHMEM_MMAP_TMP /* FIXME: Is APR_OS_DEFAULT sufficient? */ status = apr_file_open(&file, filename, APR_READ | APR_WRITE | APR_CREATE | APR_EXCL, APR_OS_DEFAULT, pool); if (status != APR_SUCCESS) { return status; } status = apr_os_file_get(&tmpfd, file); if (status != APR_SUCCESS) { apr_file_close(file); /* ignore errors, we're failing */ apr_file_remove(new_m->filename, new_m->pool); return status; } status = apr_file_trunc(file, new_m->realsize); if (status != APR_SUCCESS) { apr_file_close(file); /* ignore errors, we're failing */ apr_file_remove(new_m->filename, new_m->pool); return status; } new_m->base = mmap(NULL, new_m->realsize, PROT_READ | PROT_WRITE, MAP_SHARED, tmpfd, 0); /* FIXME: check for errors */ status = apr_file_close(file); if (status != APR_SUCCESS) { return status; } #endif /* APR_USE_SHMEM_MMAP_TMP */ #if APR_USE_SHMEM_MMAP_SHM tmpfd = shm_open(filename, O_RDWR | O_CREAT | O_EXCL, 0644); if (tmpfd == -1) { return errno; } status = apr_os_file_put(&file, &tmpfd, APR_READ | APR_WRITE | APR_CREATE | APR_EXCL, pool); if (status != APR_SUCCESS) { return status; } status = apr_file_trunc(file, new_m->realsize); if (status != APR_SUCCESS) { shm_unlink(filename); /* we're failing, remove the object */ return status; } new_m->base = mmap(NULL, reqsize, PROT_READ | PROT_WRITE, MAP_SHARED, tmpfd, 0); /* FIXME: check for errors */ status = apr_file_close(file); if (status != APR_SUCCESS) { return status; } #endif /* APR_USE_SHMEM_MMAP_SHM */ /* store the real size in the metadata */ *(apr_size_t*)(new_m->base) = new_m->realsize; /* metadata isn't usable */ new_m->usable = (char *)new_m->base + APR_ALIGN_DEFAULT(sizeof(apr_size_t)); apr_pool_cleanup_register(new_m->pool, new_m, shm_cleanup_owner, apr_pool_cleanup_null); *m = new_m; return APR_SUCCESS; #endif /* APR_USE_SHMEM_MMAP_TMP || APR_USE_SHMEM_MMAP_SHM */ #if APR_USE_SHMEM_SHMGET new_m->realsize = reqsize; /* FIXME: APR_OS_DEFAULT is too permissive, switch to 600 I think. */ status = apr_file_open(&file, filename, APR_WRITE | APR_CREATE | APR_EXCL, APR_OS_DEFAULT, pool); if (status != APR_SUCCESS) { return status; } /* ftok() (on solaris at least) requires that the file actually * exist before calling ftok(). */ shmkey = ftok(filename, 1); if (shmkey == (key_t)-1) { return errno; } if ((new_m->shmid = shmget(shmkey, new_m->realsize, SHM_R | SHM_W | IPC_CREAT | IPC_EXCL)) < 0) { return errno; } if ((new_m->base = shmat(new_m->shmid, NULL, 0)) == (void *)-1) { return errno; } new_m->usable = new_m->base; if (shmctl(new_m->shmid, IPC_STAT, &shmbuf) == -1) { return errno; } apr_uid_current(&uid, &gid, pool); shmbuf.shm_perm.uid = uid; shmbuf.shm_perm.gid = gid; if (shmctl(new_m->shmid, IPC_SET, &shmbuf) == -1) { return errno; } nbytes = sizeof(reqsize); status = apr_file_write(file, (const void *)&reqsize, &nbytes); if (status != APR_SUCCESS) { return status; } status = apr_file_close(file); if (status != APR_SUCCESS) { return status; } apr_pool_cleanup_register(new_m->pool, new_m, shm_cleanup_owner, apr_pool_cleanup_null); *m = new_m; return APR_SUCCESS; #endif /* APR_USE_SHMEM_SHMGET */ } return APR_ENOTIMPL; }