/* Determine if "url" matches the hostname, scheme and port and path * in "filter". All but the path comparisons are case-insensitive. */ int api_cache_uri_meets_conditions(apr_uri_t filter, int pathlen, apr_uri_t url) { /* Compare the hostnames */ if(filter.hostname) { if (!url.hostname) { return 0; } else if (strcasecmp(filter.hostname, url.hostname)) { return 0; } } /* Compare the schemes */ if(filter.scheme) { if (!url.scheme) { return 0; } else if (strcasecmp(filter.scheme, url.scheme)) { return 0; } } /* Compare the ports */ if(filter.port_str) { if (url.port_str && filter.port != url.port) { return 0; } /* NOTE: ap_port_of_scheme will return 0 if given NULL input */ else if (filter.port != apr_uri_port_of_scheme(url.scheme)) { return 0; } } else if(url.port_str && filter.scheme) { if (apr_uri_port_of_scheme(filter.scheme) == url.port) { return 0; } } /* For HTTP caching purposes, an empty (NULL) path is equivalent to * a single "/" path. RFCs 3986/2396 */ if (!url.path) { if (*filter.path == '/' && pathlen == 1) { return 1; } else { return 0; } } /* Url has met all of the filter conditions so far, determine * if the paths match. */ return !strncmp(filter.path, url.path, pathlen); }
apr_status_t h2_request_rwrite(h2_request *req, request_rec *r) { apr_status_t status; req->config = h2_config_rget(r); req->method = r->method; req->scheme = (r->parsed_uri.scheme? r->parsed_uri.scheme : ap_http_scheme(r)); req->authority = r->hostname; req->path = apr_uri_unparse(r->pool, &r->parsed_uri, APR_URI_UNP_OMITSITEPART); if (!ap_strchr_c(req->authority, ':') && r->server && r->server->port) { apr_port_t defport = apr_uri_port_of_scheme(req->scheme); if (defport != r->server->port) { /* port info missing and port is not default for scheme: append */ req->authority = apr_psprintf(r->pool, "%s:%d", req->authority, (int)r->server->port); } } AP_DEBUG_ASSERT(req->scheme); AP_DEBUG_ASSERT(req->authority); AP_DEBUG_ASSERT(req->path); AP_DEBUG_ASSERT(req->method); status = add_all_h1_header(req, r->pool, r->headers_in); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, "h2_request(%d): rwrite %s host=%s://%s%s", req->id, req->method, req->scheme, req->authority, req->path); return status; }
/* Return the responder URI object which should be used in the given * configuration for the given certificate, or NULL if none can be * determined. */ static apr_uri_t *determine_responder_uri(SSLSrvConfigRec *sc, X509 *cert, conn_rec *c, apr_pool_t *p) { apr_uri_t *u = apr_palloc(p, sizeof *u); const char *s; apr_status_t rv; /* Use default responder URL if forced by configuration, else use * certificate-specified responder, falling back to default if * necessary and possible. */ if (sc->server->ocsp_force_default == TRUE) { s = sc->server->ocsp_responder; } else { s = extract_responder_uri(cert, p); if (s == NULL && sc->server->ocsp_responder) { s = sc->server->ocsp_responder; } } if (s == NULL) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(01918) "no OCSP responder specified in certificate and " "no default configured"); return NULL; } rv = apr_uri_parse(p, s, u); if (rv || !u->hostname) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(01919) "failed to parse OCSP responder URI '%s'", s); return NULL; } if (strcasecmp(u->scheme, "http") != 0) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(01920) "cannot handle OCSP responder URI '%s'", s); return NULL; } if (!u->port) { u->port = apr_uri_port_of_scheme(u->scheme); } return u; }
int main(int argc, const char **argv) { apr_status_t status; apr_pool_t *pool; apr_sockaddr_t *address; serf_context_t *context; serf_connection_t *connection; app_baton_t app_ctx; handler_baton_t *handler_ctx; apr_uri_t url; const char *raw_url, *method; int count; apr_getopt_t *opt; char opt_c; char *authn = NULL; const char *opt_arg; /* For the parser threads */ apr_thread_t *thread[3]; apr_threadattr_t *tattr; apr_status_t parser_status; parser_baton_t *parser_ctx; apr_initialize(); atexit(apr_terminate); apr_pool_create(&pool, NULL); apr_atomic_init(pool); /* serf_initialize(); */ /* Default to one round of fetching. */ count = 1; /* Default to GET. */ method = "GET"; apr_getopt_init(&opt, pool, argc, argv); while ((status = apr_getopt(opt, "a:hv", &opt_c, &opt_arg)) == APR_SUCCESS) { int srclen, enclen; switch (opt_c) { case 'a': srclen = strlen(opt_arg); enclen = apr_base64_encode_len(srclen); authn = apr_palloc(pool, enclen + 6); strcpy(authn, "Basic "); (void) apr_base64_encode(&authn[6], opt_arg, srclen); break; case 'h': print_usage(pool); exit(0); break; case 'v': puts("Serf version: " SERF_VERSION_STRING); exit(0); default: break; } } if (opt->ind != opt->argc - 1) { print_usage(pool); exit(-1); } raw_url = argv[opt->ind]; apr_uri_parse(pool, raw_url, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.path) { url.path = "/"; } if (strcasecmp(url.scheme, "https") == 0) { app_ctx.using_ssl = 1; } else { app_ctx.using_ssl = 0; } status = apr_sockaddr_info_get(&address, url.hostname, APR_UNSPEC, url.port, 0, pool); if (status) { printf("Error creating address: %d\n", status); exit(1); } context = serf_context_create(pool); /* ### Connection or Context should have an allocator? */ app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); app_ctx.ssl_ctx = NULL; app_ctx.authn = authn; connection = serf_connection_create(context, address, conn_setup, &app_ctx, closed_connection, &app_ctx, pool); handler_ctx = (handler_baton_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc, sizeof(handler_baton_t)); handler_ctx->allocator = app_ctx.bkt_alloc; handler_ctx->doc_queue = apr_array_make(pool, 1, sizeof(doc_path_t*)); handler_ctx->doc_queue_alloc = app_ctx.bkt_alloc; handler_ctx->requests_outstanding = (apr_uint32_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc, sizeof(apr_uint32_t)); apr_atomic_set32(handler_ctx->requests_outstanding, 0); handler_ctx->hdr_read = 0; parser_ctx = (void*)serf_bucket_mem_alloc(app_ctx.bkt_alloc, sizeof(parser_baton_t)); parser_ctx->requests_outstanding = handler_ctx->requests_outstanding; parser_ctx->connection = connection; parser_ctx->app_ctx = &app_ctx; parser_ctx->doc_queue = handler_ctx->doc_queue; parser_ctx->doc_queue_alloc = handler_ctx->doc_queue_alloc; /* Restrict ourselves to this host. */ parser_ctx->hostinfo = url.hostinfo; status = apr_thread_mutex_create(&parser_ctx->mutex, APR_THREAD_MUTEX_DEFAULT, pool); if (status) { printf("Couldn't create mutex %d\n", status); return status; } status = apr_thread_cond_create(&parser_ctx->condvar, pool); if (status) { printf("Couldn't create condvar: %d\n", status); return status; } /* Let the handler now which condvar to use. */ handler_ctx->doc_queue_condvar = parser_ctx->condvar; apr_threadattr_create(&tattr, pool); /* Start the parser thread. */ apr_thread_create(&thread[0], tattr, parser_thread, parser_ctx, pool); /* Deliver the first request. */ create_request(url.hostinfo, url.path, NULL, NULL, parser_ctx, pool); /* Go run our normal thread. */ while (1) { int tries = 0; status = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(status)) continue; if (status) { char buf[200]; printf("Error running context: (%d) %s\n", status, apr_strerror(status, buf, sizeof(buf))); exit(1); } /* We run this check to allow our parser threads to add more * requests to our queue. */ for (tries = 0; tries < 3; tries++) { if (!apr_atomic_read32(handler_ctx->requests_outstanding)) { #ifdef SERF_VERBOSE printf("Waiting..."); #endif apr_sleep(100000); #ifdef SERF_VERBOSE printf("Done\n"); #endif } else { break; } } if (tries >= 3) { break; } /* Debugging purposes only! */ serf_debug__closed_conn(app_ctx.bkt_alloc); } printf("Quitting...\n"); serf_connection_close(connection); /* wake up the parser via condvar signal */ apr_thread_cond_signal(parser_ctx->condvar); status = apr_thread_join(&parser_status, thread[0]); if (status) { printf("Error joining thread: %d\n", status); return status; } serf_bucket_mem_free(app_ctx.bkt_alloc, handler_ctx->requests_outstanding); serf_bucket_mem_free(app_ctx.bkt_alloc, parser_ctx); apr_pool_destroy(pool); return 0; }
int main(int argc, const char **argv) { apr_status_t status; apr_pool_t *pool; serf_bucket_alloc_t *bkt_alloc; serf_context_t *context; serf_connection_t **connections; app_baton_t app_ctx; handler_baton_t handler_ctx; serf_bucket_t *req_hdrs = NULL; apr_uri_t url; const char *proxy = NULL; const char *raw_url, *method, *req_body_path = NULL; int count, inflight, conn_count; int i; int print_headers, debug; const char *username = NULL; const char *password = ""; const char *pem_path = NULL, *pem_pwd = NULL; apr_getopt_t *opt; int opt_c; const char *opt_arg; apr_initialize(); atexit(apr_terminate); apr_pool_create(&pool, NULL); /* serf_initialize(); */ bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); /* Default to one round of fetching with no limit to max inflight reqs. */ count = 1; inflight = 0; conn_count = 1; /* Default to GET. */ method = "GET"; /* Do not print headers by default. */ print_headers = 0; /* Do not debug by default. */ debug = 0; apr_getopt_init(&opt, pool, argc, argv); while ((status = apr_getopt_long(opt, options, &opt_c, &opt_arg)) == APR_SUCCESS) { switch (opt_c) { case 'U': username = opt_arg; break; case 'P': password = opt_arg; break; case 'd': debug = 1; break; case 'f': req_body_path = opt_arg; break; case 'h': print_usage(pool); exit(0); break; case 'H': print_headers = 1; break; case 'm': method = opt_arg; break; case 'n': errno = 0; count = apr_strtoi64(opt_arg, NULL, 10); if (errno) { printf("Problem converting number of times to fetch URL (%d)\n", errno); return errno; } break; case 'c': errno = 0; conn_count = apr_strtoi64(opt_arg, NULL, 10); if (errno) { printf("Problem converting number of concurrent connections to use (%d)\n", errno); return errno; } if (conn_count <= 0) { printf("Invalid number of concurrent connections to use (%d)\n", conn_count); return 1; } break; case 'x': errno = 0; inflight = apr_strtoi64(opt_arg, NULL, 10); if (errno) { printf("Problem converting number of requests to have outstanding (%d)\n", errno); return errno; } break; case 'p': proxy = opt_arg; break; case 'r': { char *sep; char *hdr_val; if (req_hdrs == NULL) { /* first request header, allocate bucket */ req_hdrs = serf_bucket_headers_create(bkt_alloc); } sep = strchr(opt_arg, ':'); if ((sep == NULL) || (sep == opt_arg) || (strlen(sep) <= 1)) { printf("Invalid request header string (%s)\n", opt_arg); return EINVAL; } hdr_val = sep + 1; while (*hdr_val == ' ') { hdr_val++; } serf_bucket_headers_setx(req_hdrs, opt_arg, (sep - opt_arg), 1, hdr_val, strlen(hdr_val), 1); } break; case CERTFILE: pem_path = opt_arg; break; case CERTPWD: pem_pwd = opt_arg; break; case 'v': puts("Serf version: " SERF_VERSION_STRING); exit(0); default: break; } } if (opt->ind != opt->argc - 1) { print_usage(pool); exit(-1); } raw_url = argv[opt->ind]; apr_uri_parse(pool, raw_url, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.path) { url.path = "/"; } if (strcasecmp(url.scheme, "https") == 0) { app_ctx.using_ssl = 1; } else { app_ctx.using_ssl = 0; } if (strcasecmp(method, "HEAD") == 0) { app_ctx.head_request = 1; } else { app_ctx.head_request = 0; } app_ctx.hostinfo = url.hostinfo; app_ctx.pem_path = pem_path; app_ctx.pem_pwd = pem_pwd; context = serf_context_create(pool); app_ctx.serf_ctx = context; if (proxy) { apr_sockaddr_t *proxy_address = NULL; apr_port_t proxy_port; char *proxy_host; char *proxy_scope; status = apr_parse_addr_port(&proxy_host, &proxy_scope, &proxy_port, proxy, pool); if (status) { printf("Cannot parse proxy hostname/port: %d\n", status); apr_pool_destroy(pool); exit(1); } if (!proxy_host) { printf("Proxy hostname must be specified\n"); apr_pool_destroy(pool); exit(1); } if (!proxy_port) { printf("Proxy port must be specified\n"); apr_pool_destroy(pool); exit(1); } status = apr_sockaddr_info_get(&proxy_address, proxy_host, APR_UNSPEC, proxy_port, 0, pool); if (status) { printf("Cannot resolve proxy address '%s': %d\n", proxy_host, status); apr_pool_destroy(pool); exit(1); } serf_config_proxy(context, proxy_address); } if (username) { serf_config_authn_types(context, SERF_AUTHN_ALL); } else { serf_config_authn_types(context, SERF_AUTHN_NTLM | SERF_AUTHN_NEGOTIATE); } serf_config_credentials_callback(context, credentials_callback); /* Setup debug logging */ if (debug) { serf_log_output_t *output; apr_status_t status; status = serf_logging_create_stream_output(&output, context, SERF_LOG_DEBUG, SERF_LOGCOMP_ALL_MSG, SERF_LOG_DEFAULT_LAYOUT, stderr, pool); if (!status) serf_logging_add_output(context, output); } /* ### Connection or Context should have an allocator? */ app_ctx.bkt_alloc = bkt_alloc; connections = apr_pcalloc(pool, conn_count * sizeof(serf_connection_t*)); for (i = 0; i < conn_count; i++) { conn_baton_t *conn_ctx = apr_pcalloc(pool, sizeof(*conn_ctx)); conn_ctx->app = &app_ctx; conn_ctx->ssl_ctx = NULL; status = serf_connection_create2(&connections[i], context, url, conn_setup, conn_ctx, closed_connection, conn_ctx, pool); if (status) { printf("Error creating connection: %d\n", status); apr_pool_destroy(pool); exit(1); } serf_connection_set_max_outstanding_requests(connections[i], inflight); } handler_ctx.completed_requests = 0; handler_ctx.print_headers = print_headers; #if APR_VERSION_AT_LEAST(1, 3, 0) apr_file_open_flags_stdout(&handler_ctx.output_file, APR_BUFFERED, pool); #else apr_file_open_stdout(&handler_ctx.output_file, pool); #endif handler_ctx.host = url.hostinfo; handler_ctx.method = method; handler_ctx.path = apr_pstrcat(pool, url.path, url.query ? "?" : "", url.query ? url.query : "", NULL); handler_ctx.username = username; handler_ctx.password = password; handler_ctx.auth_attempts = 0; handler_ctx.req_body_path = req_body_path; handler_ctx.acceptor = accept_response; handler_ctx.acceptor_baton = &app_ctx; handler_ctx.handler = handle_response; handler_ctx.req_hdrs = req_hdrs; for (i = 0; i < count; i++) { /* We don't need the returned request here. */ serf_connection_request_create(connections[i % conn_count], setup_request, &handler_ctx); } while (1) { status = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(status)) continue; if (status) { char buf[200]; const char *err_string; err_string = serf_error_string(status); if (!err_string) { err_string = apr_strerror(status, buf, sizeof(buf)); } printf("Error running context: (%d) %s\n", status, err_string); apr_pool_destroy(pool); exit(1); } if (apr_atomic_read32(&handler_ctx.completed_requests) >= count) { break; } /* Debugging purposes only! */ serf_debug__closed_conn(app_ctx.bkt_alloc); } apr_file_close(handler_ctx.output_file); for (i = 0; i < conn_count; i++) { serf_connection_close(connections[i]); } apr_pool_destroy(pool); return 0; }
/* * Canonicalise http-like URLs. * scheme is the scheme for the URL * url is the URL starting with the first '/' * def_port is the default port for this scheme. */ static int proxy_wstunnel_canon(request_rec *r, char *url) { char *host, *path, sport[7]; char *search = NULL; const char *err; char *scheme; apr_port_t port, def_port; /* ap_port_of_scheme() */ if (strncasecmp(url, "ws:", 3) == 0) { url += 3; scheme = "ws:"; def_port = apr_uri_port_of_scheme("http"); } else if (strncasecmp(url, "wss:", 4) == 0) { url += 4; scheme = "wss:"; def_port = apr_uri_port_of_scheme("https"); } else { return DECLINED; } port = def_port; ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "canonicalising URL %s", url); /* * do syntactic check. * We break the URL into host, port, path, search */ err = ap_proxy_canon_netloc(r->pool, &url, NULL, NULL, &host, &port); if (err) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02439) "error parsing URL %s: %s", url, err); return HTTP_BAD_REQUEST; } /* * now parse path/search args, according to rfc1738: * process the path. With proxy-nocanon set (by * mod_proxy) we use the raw, unparsed uri */ if (apr_table_get(r->notes, "proxy-nocanon")) { path = url; /* this is the raw path */ } else { path = ap_proxy_canonenc(r->pool, url, strlen(url), enc_path, 0, r->proxyreq); search = r->args; } if (path == NULL) return HTTP_BAD_REQUEST; apr_snprintf(sport, sizeof(sport), ":%d", port); if (ap_strchr_c(host, ':')) { /* if literal IPv6 address */ host = apr_pstrcat(r->pool, "[", host, "]", NULL); } r->filename = apr_pstrcat(r->pool, "proxy:", scheme, "//", host, sport, "/", path, (search) ? "?" : "", (search) ? search : "", NULL); return OK; }
/* Unparse a apr_uri_t structure to an URI string. * Optionally suppress the password for security reasons. */ APU_DECLARE(char *) apr_uri_unparse(apr_pool_t *p, const apr_uri_t *uptr, unsigned flags) { char *ret = ""; /* If suppressing the site part, omit both user name & scheme://hostname */ if (!(flags & APR_URI_UNP_OMITSITEPART)) { /* Construct a "user:password@" string, honoring the passed * APR_URI_UNP_ flags: */ if (uptr->user || uptr->password) { ret = apr_pstrcat(p, (uptr->user && !(flags & APR_URI_UNP_OMITUSER)) ? uptr->user : "", (uptr->password && !(flags & APR_URI_UNP_OMITPASSWORD)) ? ":" : "", (uptr->password && !(flags & APR_URI_UNP_OMITPASSWORD)) ? ((flags & APR_URI_UNP_REVEALPASSWORD) ? uptr->password : "******") : "", ((uptr->user && !(flags & APR_URI_UNP_OMITUSER)) || (uptr->password && !(flags & APR_URI_UNP_OMITPASSWORD))) ? "@" : "", NULL); } /* Construct scheme://site string */ if (uptr->hostname) { int is_default_port; const char *lbrk = "", *rbrk = ""; if (strchr(uptr->hostname, ':')) { /* v6 literal */ lbrk = "["; rbrk = "]"; } is_default_port = (uptr->port_str == NULL || uptr->port == 0 || uptr->port == apr_uri_port_of_scheme(uptr->scheme)); if (uptr->scheme) { ret = apr_pstrcat(p, uptr->scheme, "://", ret, lbrk, uptr->hostname, rbrk, is_default_port ? "" : ":", is_default_port ? "" : uptr->port_str, NULL); } else { /* A violation of RFC2396, but it is clear from section 3.2 * that the : belongs above to the scheme, while // belongs * to the authority, so include the authority prefix while * omitting the "scheme:" that the user neglected to pass us. */ ret = apr_pstrcat(p, "//", ret, lbrk, uptr->hostname, rbrk, is_default_port ? "" : ":", is_default_port ? "" : uptr->port_str, NULL); } } } /* Should we suppress all path info? */ if (!(flags & APR_URI_UNP_OMITPATHINFO)) { /* Append path, query and fragment strings: */ ret = apr_pstrcat(p, ret, (uptr->path) ? uptr->path : "", (uptr->query && !(flags & APR_URI_UNP_OMITQUERY)) ? "?" : "", (uptr->query && !(flags & APR_URI_UNP_OMITQUERY)) ? uptr->query : "", (uptr->fragment && !(flags & APR_URI_UNP_OMITQUERY)) ? "#" : NULL, (uptr->fragment && !(flags & APR_URI_UNP_OMITQUERY)) ? uptr->fragment : NULL, NULL); } return ret; }
/* parse_uri_components(): * Parse a given URI, fill in all supplied fields of a uri_components * structure. This eliminates the necessity of extracting host, port, * path, query info repeatedly in the modules. * Side effects: * - fills in fields of uri_components *uptr * - none on any of the r->* fields */ APU_DECLARE(apr_status_t) apr_uri_parse(apr_pool_t *p, const char *uri, apr_uri_t *uptr) { const char *s; const char *s1; const char *hostinfo; char *endstr; int port; int v6_offset1 = 0, v6_offset2 = 0; /* Initialize the structure. parse_uri() and parse_uri_components() * can be called more than once per request. */ memset (uptr, '\0', sizeof(*uptr)); uptr->is_initialized = 1; /* We assume the processor has a branch predictor like most -- * it assumes forward branches are untaken and backwards are taken. That's * the reason for the gotos. -djg */ if (uri[0] == '/') { /* RFC2396 #4.3 says that two leading slashes mean we have an * authority component, not a path! Fixing this looks scary * with the gotos here. But if the existing logic is valid, * then presumably a goto pointing to deal_with_authority works. * * RFC2396 describes this as resolving an ambiguity. In the * case of three or more slashes there would seem to be no * ambiguity, so it is a path after all. */ if (uri[1] == '/' && uri[2] != '/') { s = uri + 2 ; goto deal_with_authority ; } deal_with_path: /* we expect uri to point to first character of path ... remember * that the path could be empty -- http://foobar?query for example */ s = uri; while ((uri_delims[*(unsigned char *)s] & NOTEND_PATH) == 0) { ++s; } if (s != uri) { uptr->path = apr_pstrmemdup(p, uri, s - uri); } if (*s == 0) { return APR_SUCCESS; } if (*s == '?') { ++s; s1 = strchr(s, '#'); if (s1) { uptr->fragment = apr_pstrdup(p, s1 + 1); uptr->query = apr_pstrmemdup(p, s, s1 - s); } else { uptr->query = apr_pstrdup(p, s); } return APR_SUCCESS; } /* otherwise it's a fragment */ uptr->fragment = apr_pstrdup(p, s + 1); return APR_SUCCESS; } /* find the scheme: */ s = uri; while ((uri_delims[*(unsigned char *)s] & NOTEND_SCHEME) == 0) { ++s; } /* scheme must be non-empty and followed by :// */ if (s == uri || s[0] != ':' || s[1] != '/' || s[2] != '/') { goto deal_with_path; /* backwards predicted taken! */ } uptr->scheme = apr_pstrmemdup(p, uri, s - uri); s += 3; deal_with_authority: hostinfo = s; while ((uri_delims[*(unsigned char *)s] & NOTEND_HOSTINFO) == 0) { ++s; } uri = s; /* whatever follows hostinfo is start of uri */ uptr->hostinfo = apr_pstrmemdup(p, hostinfo, uri - hostinfo); /* If there's a username:password@host:port, the @ we want is the last @... * too bad there's no memrchr()... For the C purists, note that hostinfo * is definately not the first character of the original uri so therefore * &hostinfo[-1] < &hostinfo[0] ... and this loop is valid C. */ do { --s; } while (s >= hostinfo && *s != '@'); if (s < hostinfo) { /* again we want the common case to be fall through */ deal_with_host: /* We expect hostinfo to point to the first character of * the hostname. If there's a port it is the first colon, * except with IPv6. */ if (*hostinfo == '[') { v6_offset1 = 1; v6_offset2 = 2; s = memchr(hostinfo, ']', uri - hostinfo); if (s == NULL) { return APR_EGENERAL; } if (*++s != ':') { s = NULL; /* no port */ } } else { s = memchr(hostinfo, ':', uri - hostinfo); } if (s == NULL) { /* we expect the common case to have no port */ uptr->hostname = apr_pstrmemdup(p, hostinfo + v6_offset1, uri - hostinfo - v6_offset2); goto deal_with_path; } uptr->hostname = apr_pstrmemdup(p, hostinfo + v6_offset1, s - hostinfo - v6_offset2); ++s; uptr->port_str = apr_pstrmemdup(p, s, uri - s); if (uri != s) { port = strtol(uptr->port_str, &endstr, 10); uptr->port = port; if (*endstr == '\0') { goto deal_with_path; } /* Invalid characters after ':' found */ return APR_EGENERAL; } uptr->port = apr_uri_port_of_scheme(uptr->scheme); goto deal_with_path; } /* first colon delimits username:password */ s1 = memchr(hostinfo, ':', s - hostinfo); if (s1) { uptr->user = apr_pstrmemdup(p, hostinfo, s1 - hostinfo); ++s1; uptr->password = apr_pstrmemdup(p, s1, s - s1); } else { uptr->user = apr_pstrmemdup(p, hostinfo, s - hostinfo); } hostinfo = s + 1; goto deal_with_host; }
static apr_status_t cache_canonicalise_key(request_rec *r, apr_pool_t* p, const char *uri, apr_uri_t *parsed_uri, const char **key) { cache_server_conf *conf; char *port_str, *hn, *lcs; const char *hostname, *scheme; int i; const char *path; char *querystring; if (*key) { /* * We have been here before during the processing of this request. */ return APR_SUCCESS; } /* * Get the module configuration. We need this for the CacheIgnoreQueryString * option below. */ conf = (cache_server_conf *) ap_get_module_config(r->server->module_config, &cache_module); /* * Use the canonical name to improve cache hit rate, but only if this is * not a proxy request or if this is a reverse proxy request. * We need to handle both cases in the same manner as for the reverse proxy * case we have the following situation: * * If a cached entry is looked up by mod_cache's quick handler r->proxyreq * is still unset in the reverse proxy case as it only gets set in the * translate name hook (either by ProxyPass or mod_rewrite) which is run * after the quick handler hook. This is different to the forward proxy * case where it gets set before the quick handler is run (in the * post_read_request hook). * If a cache entry is created by the CACHE_SAVE filter we always have * r->proxyreq set correctly. * So we must ensure that in the reverse proxy case we use the same code * path and using the canonical name seems to be the right thing to do * in the reverse proxy case. */ if (!r->proxyreq || (r->proxyreq == PROXYREQ_REVERSE)) { if (conf->base_uri && conf->base_uri->hostname) { hostname = conf->base_uri->hostname; } else { /* Use _default_ as the hostname if none present, as in mod_vhost */ hostname = ap_get_server_name(r); if (!hostname) { hostname = "_default_"; } } } else if (parsed_uri->hostname) { /* Copy the parsed uri hostname */ hn = apr_pstrdup(p, parsed_uri->hostname); ap_str_tolower(hn); /* const work-around */ hostname = hn; } else { /* We are a proxied request, with no hostname. Unlikely * to get very far - but just in case */ hostname = "_default_"; } /* * Copy the scheme, ensuring that it is lower case. If the parsed uri * contains no string or if this is not a proxy request get the http * scheme for this request. As r->parsed_uri.scheme is not set if this * is a reverse proxy request, it is ensured that the cases * "no proxy request" and "reverse proxy request" are handled in the same * manner (see above why this is needed). */ if (r->proxyreq && parsed_uri->scheme) { /* Copy the scheme and lower-case it */ lcs = apr_pstrdup(p, parsed_uri->scheme); ap_str_tolower(lcs); /* const work-around */ scheme = lcs; } else { if (conf->base_uri && conf->base_uri->scheme) { scheme = conf->base_uri->scheme; } else { scheme = ap_http_scheme(r); } } /* * If this is a proxy request, but not a reverse proxy request (see comment * above why these cases must be handled in the same manner), copy the * URI's port-string (which may be a service name). If the URI contains * no port-string, use apr-util's notion of the default port for that * scheme - if available. Otherwise use the port-number of the current * server. */ if (r->proxyreq && (r->proxyreq != PROXYREQ_REVERSE)) { if (parsed_uri->port_str) { port_str = apr_pcalloc(p, strlen(parsed_uri->port_str) + 2); port_str[0] = ':'; for (i = 0; parsed_uri->port_str[i]; i++) { port_str[i + 1] = apr_tolower(parsed_uri->port_str[i]); } } else if (apr_uri_port_of_scheme(scheme)) { port_str = apr_psprintf(p, ":%u", apr_uri_port_of_scheme(scheme)); } else { /* No port string given in the AbsoluteUri, and we have no * idea what the default port for the scheme is. Leave it * blank and live with the inefficiency of some extra cached * entities. */ port_str = ""; } } else { if (conf->base_uri && conf->base_uri->port_str) { port_str = conf->base_uri->port_str; } else if (conf->base_uri && conf->base_uri->hostname) { port_str = ""; } else { /* Use the server port */ port_str = apr_psprintf(p, ":%u", ap_get_server_port(r)); } } /* * Check if we need to ignore session identifiers in the URL and do so * if needed. */ path = uri; querystring = parsed_uri->query; if (conf->ignore_session_id->nelts) { int i; char **identifier; identifier = (char **) conf->ignore_session_id->elts; for (i = 0; i < conf->ignore_session_id->nelts; i++, identifier++) { int len; const char *param; len = strlen(*identifier); /* * Check that we have a parameter separator in the last segment * of the path and that the parameter matches our identifier */ if ((param = ap_strrchr_c(path, ';')) && !strncmp(param + 1, *identifier, len) && (*(param + len + 1) == '=') && !ap_strchr_c(param + len + 2, '/')) { path = apr_pstrndup(p, path, param - path); continue; } /* * Check if the identifier is in the querystring and cut it out. */ if (querystring) { /* * First check if the identifier is at the beginning of the * querystring and followed by a '=' */ if (!strncmp(querystring, *identifier, len) && (*(querystring + len) == '=')) { param = querystring; } else { char *complete; /* * In order to avoid subkey matching (PR 48401) prepend * identifier with a '&' and append a '=' */ complete = apr_pstrcat(p, "&", *identifier, "=", NULL); param = strstr(querystring, complete); /* If we found something we are sitting on the '&' */ if (param) { param++; } } if (param) { const char *amp; if (querystring != param) { querystring = apr_pstrndup(p, querystring, param - querystring); } else { querystring = ""; } if ((amp = ap_strchr_c(param + len + 1, '&'))) { querystring = apr_pstrcat(p, querystring, amp + 1, NULL); } else { /* * If querystring is not "", then we have the case * that the identifier parameter we removed was the * last one in the original querystring. Hence we have * a trailing '&' which needs to be removed. */ if (*querystring) { querystring[strlen(querystring) - 1] = '\0'; } } } } } } /* Key format is a URI, optionally without the query-string */ if (conf->ignorequerystring) { *key = apr_pstrcat(p, scheme, "://", hostname, port_str, path, "?", NULL); } else { *key = apr_pstrcat(p, scheme, "://", hostname, port_str, path, "?", querystring, NULL); } /* * Store the key in the request_config for the cache as r->parsed_uri * might have changed in the time from our first visit here triggered by the * quick handler and our possible second visit triggered by the CACHE_SAVE * filter (e.g. r->parsed_uri got unescaped). In this case we would save the * resource in the cache under a key where it is never found by the quick * handler during following requests. */ ap_log_rerror( APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, r, APLOGNO(00698) "cache: Key for entity %s?%s is %s", uri, parsed_uri->query, *key); return APR_SUCCESS; }
char * default_chxj_serf_get(request_rec *r, apr_pool_t *ppool, const char *url_path, int set_headers_flag, apr_size_t *response_len) { apr_pool_t *pool; apr_uri_t url; apr_status_t rv; apr_sockaddr_t *address = NULL; serf_context_t *context; serf_connection_t *connection; app_ctx_t app_ctx; handler_ctx_t handler_ctx; char *ret; s_init(ppool, &pool); apr_uri_parse(pool, url_path, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.port) { url.port = 80; } if (!url.path) { url.path = "/"; } if (!url.hostname) { url.hostname = "localhost"; } if (url.query) { url.path = apr_psprintf(pool, "%s?%s", url.path, url.query); } rv = apr_sockaddr_info_get(&address, url.hostname, APR_UNSPEC, url.port, 0, pool); if (rv != APR_SUCCESS) { char buf[256]; ERR(r, "REQ[%X] %s:%d apr_sockaddr_info_get() failed: rv:[%d|%s] - Please check DNS settings.", (unsigned int)(apr_size_t)r, __FILE__,__LINE__, rv, apr_strerror(rv, buf, 256)); return NULL; } memset(&app_ctx, 0, sizeof(app_ctx_t)); app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); if (strcasecmp(url.scheme, "https") == 0) { app_ctx.ssl_flag = 1; } context = serf_context_create(pool); connection = serf_connection_create(context, address, s_connection_setup, &app_ctx, s_connection_closed, &app_ctx, pool); memset(&handler_ctx, 0, sizeof(handler_ctx_t)); handler_ctx.requests_outstanding = 0; handler_ctx.host = url.hostinfo; handler_ctx.method = "GET"; handler_ctx.path = url.path; handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, CHXJ_HTTP_USER_AGENT); if (!handler_ctx.user_agent) { handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, HTTP_USER_AGENT); } handler_ctx.post_data = NULL; handler_ctx.post_data_len = 0; handler_ctx.acceptor = s_accept_response; handler_ctx.acceptor_ctx = &app_ctx; handler_ctx.handler = s_handle_response; handler_ctx.pool = pool; handler_ctx.r = r; handler_ctx.response_len = 0; handler_ctx.response = NULL; serf_connection_request_create(connection, s_setup_request, &handler_ctx); while (1) { rv = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(rv)) continue; if (rv) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", rv, apr_strerror(rv, buf, sizeof(buf))); break; } if (!apr_atomic_read32(&handler_ctx.requests_outstanding)) { if (handler_ctx.rv != APR_SUCCESS) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", handler_ctx.rv, apr_strerror(handler_ctx.rv, buf, sizeof(buf))); } break; } } serf_connection_close(connection); if (handler_ctx.response) { ret = apr_palloc(ppool, handler_ctx.response_len + 1); memset(ret, 0, handler_ctx.response_len + 1); memcpy(ret, handler_ctx.response, handler_ctx.response_len); } else { ret = apr_pstrdup(ppool, ""); } *response_len = handler_ctx.response_len; if (set_headers_flag) { r->headers_out = apr_table_copy(pool, handler_ctx.headers_out); *response_len = handler_ctx.response_len; char *contentType = (char *)apr_table_get(handler_ctx.headers_out, "Content-Type"); if (contentType) { chxj_set_content_type(r, contentType); } } return ret; }
apr_table_t * default_chxj_serf_head(request_rec *r, apr_pool_t *ppool, const char *url_path, int *response_code) { apr_pool_t *pool; apr_uri_t url; apr_status_t rv; apr_sockaddr_t *address = NULL; serf_context_t *context; serf_connection_t *connection; app_ctx_t app_ctx; handler_ctx_t handler_ctx; char *ret; DBG(r,"REQ[%X] start %s()",TO_ADDR(r),__func__); s_init(ppool, &pool); apr_uri_parse(pool, url_path, &url); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } if (!url.port) { url.port = 80; } if (!url.path) { url.path = "/"; } if (!url.hostname) { url.hostname = "localhost"; } if (url.query) { url.path = apr_psprintf(pool, "%s?%s", url.path, url.query); } rv = apr_sockaddr_info_get(&address, url.hostname, APR_UNSPEC, url.port, 0, pool); if (rv != APR_SUCCESS) { char buf[256]; ERR(r, "apr_sockaddr_info_get() failed: rv:[%d|%s]", rv, apr_strerror(rv, buf, 256)); return NULL; } memset(&app_ctx, 0, sizeof(app_ctx_t)); app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL); if (strcasecmp(url.scheme, "https") == 0) { app_ctx.ssl_flag = 1; } context = serf_context_create(pool); connection = serf_connection_create(context, address, s_connection_setup, &app_ctx, s_connection_closed, &app_ctx, pool); memset(&handler_ctx, 0, sizeof(handler_ctx_t)); handler_ctx.requests_outstanding = 0; handler_ctx.host = url.hostinfo; /*========================================================================================================*/ /* XXX Maybe, libserf doesn't support the HEAD request. Because the part body is waited for with polling. */ /*========================================================================================================*/ handler_ctx.method = "GET"; handler_ctx.path = url.path; handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, CHXJ_HTTP_USER_AGENT); if (! handler_ctx.user_agent) { handler_ctx.user_agent = (char *)apr_table_get(r->headers_in, HTTP_USER_AGENT); } handler_ctx.post_data = NULL; handler_ctx.post_data_len = 0; handler_ctx.acceptor = s_accept_response; handler_ctx.acceptor_ctx = &app_ctx; handler_ctx.handler = s_handle_response; handler_ctx.pool = pool; handler_ctx.r = r; handler_ctx.response_len = 0; handler_ctx.response = NULL; serf_connection_request_create(connection, s_setup_request, &handler_ctx); while (1) { rv = serf_context_run(context, SERF_DURATION_FOREVER, pool); if (APR_STATUS_IS_TIMEUP(rv)) continue; if (rv) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", rv, apr_strerror(rv, buf, sizeof(buf))); break; } if (!apr_atomic_read32(&handler_ctx.requests_outstanding)) { if (handler_ctx.rv != APR_SUCCESS) { char buf[200]; ERR(r, "Error running context: (%d) %s\n", handler_ctx.rv, apr_strerror(handler_ctx.rv, buf, sizeof(buf))); } break; } } DBG(r,"REQ[%X] end of serf request",TO_ADDR(r)); DBG(r,"REQ[%X] response_code:[%d]", TO_ADDR(r),handler_ctx.response_code); DBG(r,"REQ[%X] response:[%s][%" APR_SIZE_T_FMT "]", TO_ADDR(r),handler_ctx.response, handler_ctx.response_len); serf_connection_close(connection); if (handler_ctx.response) { ret = apr_pstrdup(ppool, handler_ctx.response); } else { ret = apr_pstrdup(ppool, ""); } *response_code = handler_ctx.response_code; DBG(r,"REQ[%X] end %s()",TO_ADDR(r),__func__); return handler_ctx.headers_out; }
/* Implements svn_ra__vtable_t.open_session(). */ static svn_error_t * svn_ra_serf__open(svn_ra_session_t *session, const char **corrected_url, const char *session_URL, const svn_ra_callbacks2_t *callbacks, void *callback_baton, svn_auth_baton_t *auth_baton, apr_hash_t *config, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { apr_status_t status; svn_ra_serf__session_t *serf_sess; apr_uri_t url; const char *client_string = NULL; svn_error_t *err; if (corrected_url) *corrected_url = NULL; serf_sess = apr_pcalloc(result_pool, sizeof(*serf_sess)); serf_sess->pool = result_pool; if (config) SVN_ERR(svn_config_copy_config(&serf_sess->config, config, result_pool)); else serf_sess->config = NULL; serf_sess->wc_callbacks = callbacks; serf_sess->wc_callback_baton = callback_baton; serf_sess->auth_baton = auth_baton; serf_sess->progress_func = callbacks->progress_func; serf_sess->progress_baton = callbacks->progress_baton; serf_sess->cancel_func = callbacks->cancel_func; serf_sess->cancel_baton = callback_baton; /* todo: reuse serf context across sessions */ serf_sess->context = serf_context_create(serf_sess->pool); SVN_ERR(svn_ra_serf__blncache_create(&serf_sess->blncache, serf_sess->pool)); SVN_ERR(svn_ra_serf__uri_parse(&url, session_URL, serf_sess->pool)); if (!url.port) { url.port = apr_uri_port_of_scheme(url.scheme); } serf_sess->session_url = url; serf_sess->session_url_str = apr_pstrdup(serf_sess->pool, session_URL); serf_sess->using_ssl = (svn_cstring_casecmp(url.scheme, "https") == 0); serf_sess->supports_deadprop_count = svn_tristate_unknown; serf_sess->capabilities = apr_hash_make(serf_sess->pool); /* We have to assume that the server only supports HTTP/1.0. Once it's clear HTTP/1.1 is supported, we can upgrade. */ serf_sess->http10 = TRUE; /* If we switch to HTTP/1.1, then we will use chunked requests. We may disable this, if we find an intervening proxy does not support chunked requests. */ serf_sess->using_chunked_requests = TRUE; SVN_ERR(load_config(serf_sess, config, serf_sess->pool)); serf_sess->conns[0] = apr_pcalloc(serf_sess->pool, sizeof(*serf_sess->conns[0])); serf_sess->conns[0]->bkt_alloc = serf_bucket_allocator_create(serf_sess->pool, NULL, NULL); serf_sess->conns[0]->session = serf_sess; serf_sess->conns[0]->last_status_code = -1; /* create the user agent string */ if (callbacks->get_client_string) SVN_ERR(callbacks->get_client_string(callback_baton, &client_string, scratch_pool)); if (client_string) serf_sess->useragent = apr_pstrcat(result_pool, get_user_agent_string(scratch_pool), " ", client_string, SVN_VA_NULL); else serf_sess->useragent = get_user_agent_string(result_pool); /* go ahead and tell serf about the connection. */ status = serf_connection_create2(&serf_sess->conns[0]->conn, serf_sess->context, url, svn_ra_serf__conn_setup, serf_sess->conns[0], svn_ra_serf__conn_closed, serf_sess->conns[0], serf_sess->pool); if (status) return svn_ra_serf__wrap_err(status, NULL); /* Set the progress callback. */ serf_context_set_progress_cb(serf_sess->context, svn_ra_serf__progress, serf_sess); serf_sess->num_conns = 1; session->priv = serf_sess; /* The following code explicitly works around a bug in serf <= r2319 / 1.3.8 where serf doesn't report the request as failed/cancelled when the authorization request handler fails to handle the request. As long as we allocate the request in a subpool of the serf connection pool, we know that the handler is always cleaned before the connection. Luckily our caller now passes us two pools which handle this case. */ #if defined(SVN_DEBUG) && !SERF_VERSION_AT_LEAST(1,4,0) /* Currently ensured by svn_ra_open4(). If failing causes segfault in basic_tests.py 48, "basic auth test" */ SVN_ERR_ASSERT((serf_sess->pool != scratch_pool) && apr_pool_is_ancestor(serf_sess->pool, scratch_pool)); #endif err = svn_ra_serf__exchange_capabilities(serf_sess, corrected_url, result_pool, scratch_pool); /* serf should produce a usable error code instead of APR_EGENERAL */ if (err && err->apr_err == APR_EGENERAL) err = svn_error_createf(SVN_ERR_RA_DAV_REQUEST_FAILED, err, _("Connection to '%s' failed"), session_URL); SVN_ERR(err); /* We have set up a useful connection (that doesn't indication a redirect). If we've been told there is possibly a worrisome proxy in our path to the server AND we switched to HTTP/1.1 (chunked requests), then probe for problems in any proxy. */ if ((corrected_url == NULL || *corrected_url == NULL) && serf_sess->detect_chunking && !serf_sess->http10) SVN_ERR(svn_ra_serf__probe_proxy(serf_sess, scratch_pool)); return SVN_NO_ERROR; }
static svn_error_t * handle_basic_auth(svn_ra_serf__session_t *session, svn_ra_serf__connection_t *conn, serf_request_t *request, serf_bucket_t *response, char *auth_hdr, char *auth_attr, apr_pool_t *pool) { void *creds; char *last, *realm_name; svn_auth_cred_simple_t *simple_creds; const char *tmp; apr_size_t tmp_len; apr_port_t port; int i; if (!session->realm) { char *attr; attr = apr_strtok(auth_attr, "=", &last); if (strcmp(attr, "realm") == 0) { realm_name = apr_strtok(NULL, "=", &last); if (realm_name[0] == '\"') { apr_size_t realm_len; realm_len = strlen(realm_name); if (realm_name[realm_len - 1] == '\"') { realm_name[realm_len - 1] = '\0'; realm_name++; } } } else { return svn_error_create (SVN_ERR_RA_DAV_MALFORMED_DATA, NULL, _("Missing 'realm' attribute in Authorization header.")); } if (!realm_name) { return svn_error_create (SVN_ERR_RA_DAV_MALFORMED_DATA, NULL, _("Missing 'realm' attribute in Authorization header.")); } if (session->repos_url.port_str) { port = session->repos_url.port; } else { port = apr_uri_port_of_scheme(session->repos_url.scheme); } session->realm = apr_psprintf(session->pool, "<%s://%s:%d> %s", session->repos_url.scheme, session->repos_url.hostname, port, realm_name); } if (!session->auth_state) { SVN_ERR(svn_auth_first_credentials(&creds, &session->auth_state, SVN_AUTH_CRED_SIMPLE, session->realm, session->wc_callbacks->auth_baton, session->pool)); } else { SVN_ERR(svn_auth_next_credentials(&creds, session->auth_state, session->pool)); } session->auth_attempts++; if (!creds || session->auth_attempts > 4) { /* No more credentials. */ return svn_error_create(SVN_ERR_AUTHN_FAILED, NULL, "No more credentials or we tried too many times.\n" "Authentication failed"); } simple_creds = creds; tmp = apr_pstrcat(session->pool, simple_creds->username, ":", simple_creds->password, NULL); tmp_len = strlen(tmp); svn_ra_serf__encode_auth_header(session->auth_protocol->auth_name, &session->auth_value, tmp, tmp_len, pool); session->auth_header = "Authorization"; /* FIXME Come up with a cleaner way of changing the connection auth. */ for (i = 0; i < session->num_conns; i++) { session->conns[i]->auth_header = session->auth_header; session->conns[i]->auth_value = session->auth_value; } return SVN_NO_ERROR; }
/* Determine if "url" matches the hostname, scheme and port and path * in "filter". All but the path comparisons are case-insensitive. */ static int uri_meets_conditions(const apr_uri_t *filter, const int pathlen, const apr_uri_t *url) { /* Scheme, hostname port and local part. The filter URI and the * URI we test may have the following shapes: * /<path> * <scheme>[:://<hostname>[:<port>][/<path>]] * That is, if there is no scheme then there must be only the path, * and we check only the path; if there is a scheme, we check the * scheme for equality, and then if present we match the hostname, * and then if present match the port, and finally the path if any. * * Note that this means that "/<path>" only matches local paths, * and to match proxied paths one *must* specify the scheme. */ /* Is the filter is just for a local path or a proxy URI? */ if (!filter->scheme) { if (url->scheme || url->hostname) { return 0; } } else { /* The URI scheme must be present and identical except for case. */ if (!url->scheme || strcasecmp(filter->scheme, url->scheme)) { return 0; } /* If the filter hostname is null or empty it matches any hostname, * if it begins with a "*" it matches the _end_ of the URI hostname * excluding the "*", if it begins with a "." it matches the _end_ * of the URI * hostname including the ".", otherwise it must match * the URI hostname exactly. */ if (filter->hostname && filter->hostname[0]) { if (filter->hostname[0] == '.') { const size_t fhostlen = strlen(filter->hostname); const size_t uhostlen = url->hostname ? strlen(url->hostname) : 0; if (fhostlen > uhostlen || (url->hostname && strcasecmp(filter->hostname, url->hostname + uhostlen - fhostlen))) { return 0; } } else if (filter->hostname[0] == '*') { const size_t fhostlen = strlen(filter->hostname + 1); const size_t uhostlen = url->hostname ? strlen(url->hostname) : 0; if (fhostlen > uhostlen || (url->hostname && strcasecmp(filter->hostname + 1, url->hostname + uhostlen - fhostlen))) { return 0; } } else if (!url->hostname || strcasecmp(filter->hostname, url->hostname)) { return 0; } } /* If the filter port is empty it matches any URL port. * If the filter or URL port are missing, or the URL port is * empty, they default to the port for their scheme. */ if (!(filter->port_str && !filter->port_str[0])) { /* NOTE: ap_port_of_scheme will return 0 if given NULL input */ const unsigned fport = filter->port_str ? filter->port : apr_uri_port_of_scheme(filter->scheme); const unsigned uport = (url->port_str && url->port_str[0]) ? url->port : apr_uri_port_of_scheme(url->scheme); if (fport != uport) { return 0; } } } /* For HTTP caching purposes, an empty (NULL) path is equivalent to * a single "/" path. RFCs 3986/2396 */ if (!url->path) { if (*filter->path == '/' && pathlen == 1) { return 1; } else { return 0; } } /* Url has met all of the filter conditions so far, determine * if the paths match. */ return !strncmp(filter->path, url->path, pathlen); }
/* compute a new uri based on the old one and the current request such that the uri maps to the same location on a different server. there are three cases: 1. globally absolute: method://old-server(:port)/path/to/file 2. locally absolute: /path/to/file 3. relative: path/to/file we handle the cases as follows: 1. check from_servers to ensure that old-server(:port) is listed; if not, we check if old-server is the current servername; if not, we _do not_ replace the server in the url; note that we _do not_ try to match urls of the form method://username:password@old-server(:port)/path/to/file 2. just drop in to_server in the beginning 3. figure out the current request_uri, and replace the most specific filename with path/to/file */ static char * remap_url(saxctxt *ctx, char *uri, int add_auth, int add_qstring_ignore) { apr_uri_t u, u2; int i; char *s; apr_pool_t *pool = ctx->f->r->pool; ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, ctx->f->r->server, "remap_url: remapping %s (add_auth = %d, add_qstring_ignore = %d)", uri, add_auth, add_qstring_ignore); if(apr_uri_parse(pool, uri, &u) != APR_SUCCESS) return NULL; /* only CDNify HTTP and HTTPS requests for now */ if(u.scheme && strcasecmp(u.scheme, "http") != 0 && strcasecmp(u.scheme, "https") != 0) return NULL; if(u.hostinfo) { /* case 1 */ const char *hostinfo = (u.port_str && u.scheme && u.port != apr_uri_port_of_scheme(u.scheme)) ? u.hostinfo : u.hostname; /* does this hostinfo match the current ServerName or one of the ServerAliases? */ if(matches_aliases(ctx->f->r->server, hostinfo)) goto set_hostname_and_return; /* if not, cycle through from_servers and see if one of them matches */ if(ctx->cfg->from_servers) { tattr *from = (tattr *)ctx->cfg->from_servers->elts; for(i = 0; i < ctx->cfg->from_servers->nelts; ++i) if(!strcasecmp(hostinfo, from[i].val)) goto set_hostname_and_return; } } else if(u.path && u.path[0] == '/') { /* case 2 */ goto set_hostname_and_return; } else if(u.path) { /* case 3 */ /* * first figure out the local absolute path, minus the filename, * from r->uri; then, tack the relative uri onto the end */ char *my_uri = apr_pstrdup(pool, ctx->f->r->uri); s = strrchr(my_uri, '/'); if(s) { *(s+1) = '\0'; u.path = apr_pstrcat(pool, my_uri, u.path, NULL); } goto set_hostname_and_return; } return NULL; set_hostname_and_return: if(apr_uri_parse(pool, (char *)ctx->cfg->to_server, &u2) != APR_SUCCESS) return NULL; u.scheme = u2.scheme; u.hostname = u2.hostname; u.port_str = u2.port_str; u.port = u2.port; if(add_qstring_ignore) u.query = add_qstring_ignore_token(pool, ctx, &u); if(add_auth) u.query = add_auth_token(pool, ctx, &u); return apr_uri_unparse(pool, &u, 0); }
/* Unparse a apr_uri_t structure to an URI string. * Optionally suppress the password for security reasons. */ APU_DECLARE(char *) apr_uri_unparse(apr_pool_t *p, const apr_uri_t *uptr, unsigned flags) { char *ret = ""; /* If suppressing the site part, omit both user name & scheme://hostname */ if (!(flags & APR_URI_UNP_OMITSITEPART)) { /* Construct a "user:password@" string, honoring the passed * APR_URI_UNP_ flags: */ if (uptr->user || uptr->password) { ret = apr_pstrcat(p, (uptr->user && !(flags & APR_URI_UNP_OMITUSER)) ? uptr->user : "", (uptr->password && !(flags & APR_URI_UNP_OMITPASSWORD)) ? ":" : "", (uptr->password && !(flags & APR_URI_UNP_OMITPASSWORD)) ? ((flags & APR_URI_UNP_REVEALPASSWORD) ? uptr->password : "******") : "", ((uptr->user && !(flags & APR_URI_UNP_OMITUSER)) || (uptr->password && !(flags & APR_URI_UNP_OMITPASSWORD))) ? "@" : "", NULL); } /* Construct scheme://site string */ if (uptr->hostname) { int is_default_port; const char *lbrk = "", *rbrk = ""; if (strchr(uptr->hostname, ':')) { /* v6 literal */ lbrk = "["; rbrk = "]"; } is_default_port = (uptr->port_str == NULL || uptr->port == 0 || uptr->port == apr_uri_port_of_scheme(uptr->scheme)); ret = apr_pstrcat(p, "//", ret, lbrk, uptr->hostname, rbrk, is_default_port ? "" : ":", is_default_port ? "" : uptr->port_str, NULL); } if (uptr->scheme) { ret = apr_pstrcat(p, uptr->scheme, ":", ret, NULL); } } /* Should we suppress all path info? */ if (!(flags & APR_URI_UNP_OMITPATHINFO)) { /* Append path, query and fragment strings: */ ret = apr_pstrcat(p, ret, (uptr->path) ? uptr->path : "", (uptr->query && !(flags & APR_URI_UNP_OMITQUERY)) ? "?" : "", (uptr->query && !(flags & APR_URI_UNP_OMITQUERY)) ? uptr->query : "", (uptr->fragment && !(flags & APR_URI_UNP_OMITQUERY)) ? "#" : NULL, (uptr->fragment && !(flags & APR_URI_UNP_OMITQUERY)) ? uptr->fragment : NULL, NULL); } return ret; }