/*** * The main request handler. **/ static int mapserver_handler (request_rec *r) { /* aquire the apropriate configuration for this directory */ mapserver_dir_config *conf; conf = (mapserver_dir_config*) ap_get_module_config (r->per_dir_config, &mapserver_module); /* decline the request if there's no map configured */ if (!conf || !conf->map) return DECLINED; apr_finfo_t mapstat; if (apr_stat (&mapstat, conf->mapfile_name, APR_FINFO_MTIME, r->pool) == APR_SUCCESS) { if (apr_time_sec (mapstat.mtime) > apr_time_sec (conf->mtime)) { mapObj *newmap = msLoadMap (conf->mapfile_name, NULL); if (newmap) { msFreeMap (conf->map); conf->map = newmap; conf->mtime = mapstat.mtime; } else { ap_log_error (APLOG_MARK, APLOG_WARNING, 0, NULL, "unable to reload map file %s", conf->mapfile_name); } } } else { ap_log_error (APLOG_MARK, APLOG_WARNING, 0, NULL, "%s: unable to stat file %s", __func__, conf->mapfile_name); } /* make a copy of the URI so we can modify it safely */ char *uri = apr_pstrdup (r->pool, r->uri); int len = strlen (uri); int conf_uri_len = strlen (conf->uri); /* If the URI points to a subdirectory we want to decline. */ if (len > conf_uri_len) return DECLINED; int argc = 0; char **ParamNames = NULL; char **ParamValues = NULL; char *post_data = NULL; int szMethod = -1; char *szContentType = NULL; mapservObj *mapserv = NULL; /* Try decoding the query string */ if (r->method_number == M_GET) { argc = mapserver_decode_args (r->pool, (char*) apr_pstrdup (r->pool, r->args), &ParamNames, &ParamValues); szMethod = MS_GET_REQUEST; } else if (r->method_number == M_POST) { szContentType = (char*) apr_table_get (r->headers_in, "Content-Type"); post_data = mapserver_read_post_data (r); szMethod = MS_POST_REQUEST; if (strcmp (szContentType, "application/x-www-form-urlencoded") == 0) { argc = mapserver_decode_args (r->pool, (char*) apr_pstrdup (r->pool, r->args), &ParamNames, &ParamValues); } } else return HTTP_METHOD_NOT_ALLOWED; if (!argc && !post_data) return HTTP_BAD_REQUEST; /* Now we install the IO redirection. */ if (msIO_installApacheRedirect (r) != MS_TRUE) ap_log_error (APLOG_MARK, APLOG_ERR, 0, NULL, "%s: could not install apache redirect", __func__); mapserv = msAllocMapServObj(); mapserv->request->NumParams = argc; mapserv->request->ParamNames = ParamNames; mapserv->request->ParamValues = ParamValues; mapserv->request->type = (enum MS_REQUEST_TYPE) szMethod; mapserv->request->postrequest = post_data; mapserv->request->contenttype = szContentType; //mapserv->map = msModuleLoadMap(mapserv,conf); mapserv->map = conf->map; if(!mapserv->map) { msCGIWriteError(mapserv); goto end_request; } if(msCGIDispatchRequest(mapserv) != MS_SUCCESS) { msCGIWriteError(mapserv); goto end_request; } end_request: if(mapserv) { msCGIWriteLog(mapserv,MS_FALSE); mapserv->request->ParamNames = NULL; mapserv->request->ParamValues = NULL; mapserv->request->postrequest = NULL; mapserv->request->contenttype = NULL; mapserv->map = NULL; msFreeMapServObj(mapserv); } msResetErrorList(); /* Check if status was set inside MapServer functions. If it was, we * return it's value instead of simply OK. This is to support redirects * from maptemplate.c */ if (r->status == HTTP_MOVED_TEMPORARILY) return r->status; return OK; }
static int check_request_acl(request_rec *r, int req_access) { char *dir_path, *acl_path; apr_finfo_t acl_finfo; const char *req_uri, *dir_uri, *acl_uri, *access; const char *port, *par_uri, *req_file; librdf_world *rdf_world = NULL; librdf_storage *rdf_storage = NULL; librdf_model *rdf_model = NULL; librdf_parser *rdf_parser = NULL; librdf_uri *rdf_uri_acl = NULL, *rdf_uri_base = NULL; int ret = HTTP_FORBIDDEN; // dir_path: parent directory of request filename // acl_path: absolute path to request ACL dir_path = ap_make_dirstr_parent(r->pool, r->filename); acl_path = ap_make_full_path(r->pool, dir_path, WEBID_ACL_FNAME); if (apr_filepath_merge(&acl_path, NULL, acl_path, APR_FILEPATH_NOTRELATIVE, r->pool) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "Module bug? Request filename path %s is invalid or " "or not absolute for uri %s", r->filename, r->uri); return HTTP_FORBIDDEN; } // acl_path: 403 if missing if ((apr_stat(&acl_finfo, acl_path, APR_FINFO_TYPE, r->pool) != APR_SUCCESS) || (acl_finfo.filetype != APR_REG)) { return HTTP_FORBIDDEN; } // req_uri: fully qualified URI of request filename // dir_uri: fully qualified URI of request filename parent // acl_uri: fully qualified URI of request filename ACL // access: ACL URI of requested access port = ap_is_default_port(ap_get_server_port(r), r) ? "" : apr_psprintf(r->pool, ":%u", ap_get_server_port(r)); req_uri = apr_psprintf(r->pool, "%s://%s%s%s%s", ap_http_scheme(r), ap_get_server_name(r), port, (*r->uri == '/') ? "" : "/", r->uri); par_uri = ap_make_dirstr_parent(r->pool, r->uri); dir_uri = apr_psprintf(r->pool, "%s://%s%s%s%s", ap_http_scheme(r), ap_get_server_name(r), port, (*par_uri == '/') ? "" : "/", par_uri); acl_uri = ap_make_full_path(r->pool, dir_uri, WEBID_ACL_FNAME); if (req_access == WEBID_ACCESS_READ) { access = "Read"; } else if (req_access == WEBID_ACCESS_WRITE) { if ((req_file = strrchr(r->filename, '/')) != NULL && strcmp(++req_file, WEBID_ACL_FNAME) == 0) access = "Control"; else access = "Write"; } else { access = "Control"; } ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "[ACL] %s (%s) %s | URI: %s | DIR: %s (%s) | ACL: %s (%s) | status: %d", r->method, access, r->uri, req_uri, dir_uri, dir_path, acl_uri, acl_path, r->status); if ((rdf_world = librdf_new_world()) != NULL) { librdf_world_open(rdf_world); if ((rdf_storage = librdf_new_storage(rdf_world, "memory", NULL, NULL)) != NULL) { if ((rdf_model = librdf_new_model(rdf_world, rdf_storage, NULL)) != NULL) { if ((rdf_parser = librdf_new_parser(rdf_world, "turtle", NULL, NULL)) != NULL) { if ((rdf_uri_base = librdf_new_uri(rdf_world, (unsigned char*)acl_uri)) != NULL) { if ((rdf_uri_acl = librdf_new_uri_from_filename(rdf_world, acl_path)) != NULL) { if (!librdf_parser_parse_into_model(rdf_parser, rdf_uri_acl, rdf_uri_base, rdf_model)) { //log_stream_prefix(r, librdf_model_as_stream(rdf_model), "[ACL] [model]"); if (query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_MODE_AGENT, "accessTo", req_uri, access, r->user)) > 0 || \ query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_MODE_AGENTCLASS, "accessTo", req_uri, access, r->user)) > 0 || \ query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_MODE_WORLD, "accessTo", req_uri, access)) > 0 || \ ( ( query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_ACL_EXISTS, "accessTo", req_uri )) == 0 ) && ( query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_MODE_AGENT, "defaultForNew", dir_uri, access, r->user)) > 0 || \ query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_MODE_AGENTCLASS, "defaultForNew", dir_uri, access, r->user)) > 0 || \ query_results(r, rdf_world, rdf_model, apr_psprintf(r->pool, SPARQL_URI_MODE_WORLD, "defaultForNew", dir_uri, access)) > 0 ) ) ) { apr_table_set(r->headers_out, "Link", apr_psprintf(r->pool, "%s; rel=meta", acl_uri)); ret = OK; } } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_parser_parse_into_model failed"); librdf_free_uri(rdf_uri_acl); } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_new_uri_from_filename returned NULL"); librdf_free_uri(rdf_uri_base); } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_new_uri returned NULL"); librdf_free_parser(rdf_parser); } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_new_parser returned NULL"); librdf_free_model(rdf_model); } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_new_model returned NULL"); librdf_free_storage(rdf_storage); } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_new_storage returned NULL"); librdf_free_world(rdf_world); } else ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "librdf_new_world returned NULL"); return ret; }
/* Perhaps this becomes nothing but a macro? */ APR_DECLARE(apr_status_t) apr_lstat(apr_finfo_t *finfo, const char *fname, apr_int32_t wanted, apr_pool_t *pool) { return apr_stat(finfo, fname, wanted | APR_FINFO_LINK, pool); }
apr_status_t procmgr_post_config(server_rec * main_server, apr_pool_t * configpool) { apr_status_t rv; apr_finfo_t finfo; fcgid_server_conf *sconf = ap_get_module_config(main_server->module_config, &fcgid_module); /* Calculate procmgr_fetch_cmd wake up interval */ g_wakeup_timeout = fcgid_min(sconf->error_scan_interval, sconf->busy_scan_interval); g_wakeup_timeout = fcgid_min(sconf->idle_scan_interval, g_wakeup_timeout); if (g_wakeup_timeout == 0) g_wakeup_timeout = 1; /* Make it reasonable */ rv = apr_stat(&finfo, sconf->sockname_prefix, APR_FINFO_USER, configpool); if (rv != APR_SUCCESS) { /* Make dir for unix domain socket */ if ((rv = apr_dir_make_recursive(sconf->sockname_prefix, APR_UREAD | APR_UWRITE | APR_UEXECUTE, configpool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, main_server, "mod_fcgid: Can't create unix socket dir %s", sconf->sockname_prefix); exit(1); } /* Child processes need to be able to create sockets in the unix * socket dir. Change the ownership to the child user only if * running as root and we just successfully created the directory * (avoiding any concerns about changing the target of a link * created by another user). * * If the directory already existed and was owned by a different user, * FastCGI requests will fail at steady state, and manual intervention * will be required. */ if (!geteuid()) { if (chown(sconf->sockname_prefix, ap_unixd_config.user_id, -1) < 0) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, main_server, "mod_fcgid: Can't set ownership of unix socket dir %s", sconf->sockname_prefix); exit(1); } } } /* Create pipes to communicate between process manager and apache */ if ((rv = apr_file_pipe_create(&g_pm_read_pipe, &g_ap_write_pipe, configpool)) != APR_SUCCESS || (rv = apr_file_pipe_create(&g_ap_read_pipe, &g_pm_write_pipe, configpool))) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, main_server, "mod_fcgid: Can't create pipe between PM and stub"); return rv; } /* Create mutex for pipe reading and writing */ rv = fcgid_mutex_create(&g_pipelock, &g_pipelock_name, g_pipelock_mutex_type, main_server->process->pconf, main_server); if (rv != APR_SUCCESS) { exit(1); } /* Create process manager process */ return create_process_manager(main_server, configpool); }
bool getInfo(const char * path, apr_int32_t wanted) const { return (APR_SUCCESS == check_apr(apr_stat(&finfo, path, wanted, mPool))); }
static int gettemp(char *path, apr_file_t **doopen, apr_int32_t flags, apr_pool_t *p) { register char *start, *trv, *suffp; char *pad; apr_finfo_t sbuf; apr_status_t rv; apr_uint32_t randnum; if (randseed==0) { randseed = (int)apr_time_now(); seedrandom(randseed); } for (trv = path; *trv; ++trv) ; suffp = trv; --trv; if (trv < path) { return APR_EINVAL; } /* Fill space with random characters */ while (*trv == 'X') { randnum = arc4random() % (sizeof(padchar) - 1); *trv-- = padchar[randnum]; } start = trv + 1; /* * check the target directory. */ for (;; --trv) { if (trv <= path) break; if (*trv == '/') { *trv = '\0'; rv = apr_stat(&sbuf, path, APR_FINFO_TYPE, p); *trv = '/'; if (rv != APR_SUCCESS) return rv; if (sbuf.filetype != APR_DIR) { return APR_ENOTDIR; } break; } } for (;;) { if ((rv = apr_file_open(doopen, path, flags, APR_UREAD | APR_UWRITE, p)) == APR_SUCCESS) return APR_SUCCESS; if (!APR_STATUS_IS_EEXIST(rv)) return rv; /* If we have a collision, cycle through the space of filenames */ for (trv = start;;) { if (*trv == '\0' || trv == suffp) return APR_EINVAL; /* XXX: is this the correct return code? */ pad = strchr((char *)padchar, *trv); if (pad == NULL || !*++pad) { *trv++ = padchar[0]; } else { *trv++ = *pad; break; } } } /*NOTREACHED*/ }
static void assemble_services(struct runtime_data *r) { ap_directive_t *v; const char *default_host_name = NULL; uint16_t default_port = 0; struct service_data *j; apr_pool_t *t; ap_assert(r); apr_pool_create(&t, r->pool); for (v = ap_conftree; v; v = v->next) { const char *a = v->args; if (strcasecmp(v->directive, "ServerName") == 0) { const char *tdhn = NULL; char *colon; tdhn = ap_getword_conf(t, &a); colon = strrchr(tdhn, ':'); if (colon) { apr_size_t sz; if (!default_port) { default_port = (uint16_t) atoi(colon+1); } sz = colon - tdhn; default_host_name = apr_pstrndup(t, tdhn, sz); } else { default_host_name = tdhn; } } else if (strcasecmp(v->directive, "Listen") == 0) { char *sp; if (!default_port) { char *colon; sp = ap_getword_conf(t, &a); if ((colon = strrchr(sp, ':'))) sp = colon + 1; default_port = (uint16_t) atoi(sp); } } else if (strcasecmp(v->directive, "DNSSDServicePort") == 0) default_port = (uint16_t) atoi(a); else if (strcasecmp(v->directive, "<VirtualHost") == 0) { const char *host_name = NULL; uint16_t vport = 0; const char *vname = NULL, *vtypes = NULL, *txt_record = NULL; ap_directive_t *l; char *colon; struct service_data *marker = r->services; if ((colon = strrchr(v->args, ':'))) vport = (uint16_t) atoi(colon+1); /* ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->main_server, "VHOST: %s ", v->directive); */ for (l = v->first_child; l; l = l->next) { a = l->args; /* ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->main_server, "VHOST_INTERNAL %s | %s | %s | %s", l->directive, l->args, vname, vtypes); */ if (strcasecmp(l->directive, "ServerName") == 0) { const char *thn = NULL; thn = ap_getword_conf(t, &a); colon = strrchr(thn, ':'); if (colon) { apr_size_t sz; if (!vport) vport = (uint16_t) atoi(colon+1); sz = colon - thn; host_name = apr_pstrndup(t, thn, sz); } else { host_name = thn; } } else if (strcasecmp(l->directive, "DNSSDServiceName") == 0) vname = ap_getword_conf(t, &a); else if (strcasecmp(l->directive, "DNSSDServiceTypes") == 0) vtypes = a; else if (strcasecmp(l->directive, "DNSSDServicePort") == 0) vport = (uint16_t) atoi(a); else if (strcasecmp(l->directive, "DNSSDServiceTxtRecord") == 0) txt_record = a; else if (strcasecmp(l->directive, "<Location") == 0) { ap_directive_t *s; const char *sname = NULL, *stypes = NULL; char *path; size_t i; uint16_t sport = 0; path = apr_pstrdup(t, l->args); if (*path != 0 && (path[(i = strlen(path) - 1)] == '>')) path[i] = 0; for (s = l->first_child; s; s = s->next) { a = s->args; if (strcasecmp(s->directive, "DNSSDServiceName") == 0) sname = ap_getword_conf(t, &a); else if (strcasecmp(s->directive, "DNSSDServiceTypes") == 0) stypes = a; else if (strcasecmp(s->directive, "DNSSDServiceTxtRecord") == 0) txt_record = a; else if (strcasecmp(s->directive, "DNSSDServicePort") == 0) sport = (uint16_t) atoi(a); } if (sname) add_service(r, NULL, sport, path, sname, stypes, 0, txt_record); } } /* Fill in missing data in <Location> based services */ for (j = r->services; j && j != marker; j = j->next) { if (!j->pool) j->port = vport; j->host_name = apr_pstrdup(r->pool, host_name); } if (r->global_config_data->vhost || vname || vtypes || txt_record) add_service(r, host_name, vport, NULL, vname ? vname : host_name, vtypes, 0, txt_record); } } /* ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->main_server, "ping"); */ if (r->global_config_data->user_dir) { struct passwd *pw; apr_pool_t *p_loop; apr_pool_create(&p_loop, t); while ((pw = getpwent())) { apr_finfo_t finfo; char *path; const char *u; apr_pool_clear(p_loop); if (pw->pw_uid < 500) continue; if (*pw->pw_dir == 0 || strcmp(pw->pw_dir, "/") == 0) continue; path = apr_pstrcat(p_loop, pw->pw_dir, "/", r->global_config_data->user_dir_path, NULL); if (apr_stat(&finfo, path, APR_FINFO_TYPE, p_loop) != APR_SUCCESS) continue; if (finfo.filetype != APR_DIR) continue; if (access(path, X_OK) != 0) continue; if (pw->pw_gecos && *pw->pw_gecos) { char *comma; u = apr_pstrdup(p_loop, pw->pw_gecos); if ((comma = strchr(u, ','))) *comma = 0; } else u = pw->pw_name; add_service(r, NULL, 0, apr_pstrcat(p_loop, "/~", pw->pw_name, NULL), apr_pstrcat(p_loop, u, " on ", NULL), NULL, 1, NULL); } endpwent(); apr_pool_destroy(p_loop); } if (!default_port) default_port = 80; /* Fill in missing data in all services */ for (j = r->services; j; j = j->next) { if (!j->port) j->port = default_port; if (!j->host_name) j->host_name = apr_pstrdup(r->pool, default_host_name); if (!j->name) j->name = apr_pstrdup(r->pool, j->host_name); } apr_pool_destroy(t); }
static void cache_the_file(cmd_parms *cmd, const char *filename, int mmap) { a_server_config *sconf; a_file *new_file; a_file tmp; apr_file_t *fd = NULL; apr_status_t rc; const char *fspec; fspec = ap_server_root_relative(cmd->pool, filename); if (!fspec) { ap_log_error(APLOG_MARK, APLOG_WARNING, APR_EBADPATH, cmd->server, APLOGNO(00794) "invalid file path " "%s, skipping", filename); return; } if ((rc = apr_stat(&tmp.finfo, fspec, APR_FINFO_MIN, cmd->temp_pool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_WARNING, rc, cmd->server, APLOGNO(00795) "unable to stat(%s), skipping", fspec); return; } if (tmp.finfo.filetype != APR_REG) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server, APLOGNO(00796) "%s isn't a regular file, skipping", fspec); return; } if (tmp.finfo.size > AP_MAX_SENDFILE) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, cmd->server, APLOGNO(00797) "%s is too large to cache, skipping", fspec); return; } rc = apr_file_open(&fd, fspec, APR_READ | APR_BINARY | APR_XTHREAD, APR_OS_DEFAULT, cmd->pool); if (rc != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_WARNING, rc, cmd->server, APLOGNO(00798) "unable to open(%s, O_RDONLY), skipping", fspec); return; } apr_file_inherit_set(fd); /* WooHoo, we have a file to put in the cache */ new_file = apr_pcalloc(cmd->pool, sizeof(a_file)); new_file->finfo = tmp.finfo; #if APR_HAS_MMAP if (mmap) { /* MMAPFile directive. MMAP'ing the file * XXX: APR_HAS_LARGE_FILES issue; need to reject this request if * size is greater than MAX(apr_size_t) (perhaps greater than 1M?). */ if ((rc = apr_mmap_create(&new_file->mm, fd, 0, (apr_size_t)new_file->finfo.size, APR_MMAP_READ, cmd->pool)) != APR_SUCCESS) { apr_file_close(fd); ap_log_error(APLOG_MARK, APLOG_WARNING, rc, cmd->server, APLOGNO(00799) "unable to mmap %s, skipping", filename); return; } apr_file_close(fd); new_file->is_mmapped = TRUE; } #endif #if APR_HAS_SENDFILE if (!mmap) { /* CacheFile directive. Caching the file handle */ new_file->is_mmapped = FALSE; new_file->file = fd; } #endif new_file->filename = fspec; apr_rfc822_date(new_file->mtimestr, new_file->finfo.mtime); apr_snprintf(new_file->sizestr, sizeof new_file->sizestr, "%" APR_OFF_T_FMT, new_file->finfo.size); sconf = ap_get_module_config(cmd->server->module_config, &file_cache_module); apr_hash_set(sconf->fileht, new_file->filename, strlen(new_file->filename), new_file); }
static authz_status fileowner_check_authorization(request_rec *r, const char *require_args, const void *parsed_require_args) { char *reason = NULL; apr_status_t status = 0; #if !APR_HAS_USER reason = "'Require file-owner' is not supported on this platform."; ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01632) "Authorization of user %s to access %s failed, reason: %s", r->user, r->uri, reason ? reason : "unknown"); return AUTHZ_DENIED; #else /* APR_HAS_USER */ char *owner = NULL; apr_finfo_t finfo; if (!r->user) { return AUTHZ_DENIED_NO_USER; } if (!r->filename) { reason = "no filename available"; ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01633) "Authorization of user %s to access %s failed, reason: %s", r->user, r->uri, reason ? reason : "unknown"); return AUTHZ_DENIED; } status = apr_stat(&finfo, r->filename, APR_FINFO_USER, r->pool); if (status != APR_SUCCESS) { reason = apr_pstrcat(r->pool, "could not stat file ", r->filename, NULL); ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01634) "Authorization of user %s to access %s failed, reason: %s", r->user, r->uri, reason ? reason : "unknown"); return AUTHZ_DENIED; } if (!(finfo.valid & APR_FINFO_USER)) { reason = "no file owner information available"; ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01635) "Authorization of user %s to access %s failed, reason: %s", r->user, r->uri, reason ? reason : "unknown"); return AUTHZ_DENIED; } status = apr_uid_name_get(&owner, finfo.user, r->pool); if (status != APR_SUCCESS || !owner) { reason = "could not get name of file owner"; ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01636) "Authorization of user %s to access %s failed, reason: %s", r->user, r->uri, reason ? reason : "unknown"); return AUTHZ_DENIED; } if (strcmp(owner, r->user)) { reason = apr_psprintf(r->pool, "file owner %s does not match.", owner); ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(01637) "Authorization of user %s to access %s failed, reason: %s", r->user, r->uri, reason ? reason : "unknown"); return AUTHZ_DENIED; } /* this user is authorized */ return AUTHZ_GRANTED; #endif /* APR_HAS_USER */ }
static int vt_next(sqlite3_vtab_cursor *cur) { vtab_cursor *p_cur = (vtab_cursor*)cur; vtab* p_vt = (vtab*)cur->pVtab; /** This is a rather involved function. It is the core of this virtual * table. This function recursively reads down into a directory. It * automatically decends into a directory when it finds one, and * automatically ascends out of it when it has read all of its entries. * The logic is as follows: * The p_cur->current_node (a filenode struct) points to the current file * entry (given by the APR handle p_cur->current_node->dirent) and its * associated directory (given by the APR handle * p_cur->current_node->dirent) * We attempt to read the next entry in p_cur->current_node->dir, filling * the p_cur->current_node->dirent. If we succeed, then we have either a * file or a directory. If we have a directory, then we descend into it. If * we have a file, we proceed as usual. In either case, the dirent entry * will consitute a row in the result set, as we have done as much as we * have to and can exit the function. Our only job is to get to the next * valid file or directory entry to return as the current row in the * rowset. * If there are no more entries in the current directory, then we * deallocate p_cur->current_node and proceed up one directory (given by * p_cur->current_node->parent). We thus set p_cur->current_node to * p_cur->current_node->parent, and start over again. */ read_next_entry: /** First, check for a special case where the top level directory is * actually a top-level file. In this case, we rely that * p_cur->current_node->dir == NULL (set by next_directory()). If this is * true, resort to next_directory(). */ if (p_cur->current_node->dir == NULL) { return next_directory(p_cur); } /* Read the next directory entry. */ /* Increment the current row count. */ p_cur->count += 1; struct filenode* d = p_cur->current_node; struct filenode* prev_d = d; reread_next_entry: /* Read the next entry in the directory (d->dir). Fills the d->dirent member. */ if ( apr_dir_read( &d->dirent, APR_FINFO_DIRENT|APR_FINFO_PROT|APR_FINFO_TYPE| APR_FINFO_NAME|APR_FINFO_SIZE, d->dir) != APR_SUCCESS ) { /** If we get here, the call failed. There are no more entries in * directory. */ /** If we are at the top level directory */ if (d->parent == NULL) { /** We are done with this directory. See if there is another * top-level directory to search. * * If there is not another directory, next_directory() will have set * eof=1. We are at the end of the result set. If there is another * directory to search, then it will load the next dirent for * us. Either way, we have nothing left to do here. */ return next_directory(p_cur); } else { /** There is a parent directory that we still have to search * through. Free this filenode and resume iterating through the * parent. */ d = move_up_directory(p_cur); /* Start over, reading the next entry from parent. */ goto read_next_entry; } } /* If the current dirent is a directory, then descend into it. */ if (d->dirent.filetype == APR_DIR) { /* Skip . and .. entries */ if (d->dirent.name != NULL) { if (strcmp(d->dirent.name, ".") == 0 || strcmp(d->dirent.name, "..") == 0) { goto read_next_entry; } } /* Create a new child directory node */ /* Combine the path and file names to get full path */ char path[1024]; sprintf(&path[0], "%s/%s", d->path, d->dirent.name); /* Allocate space for new filenode and initlialize members. */ d = malloc(sizeof(struct filenode)); d->path = strdup(path); d->parent = p_cur->current_node; /* See note ZERO-FILL DIRENT below. */ memset(&d->dirent, 0, sizeof(apr_finfo_t)); /* Set current pointer to it. */ p_cur->current_node = d; /* Clear the pool memory associated with the path string allocated above. */ apr_pool_clear(p_cur->tmp_pool); /* Open the directory */ if ((p_cur->status = apr_dir_open(&d->dir, d->path, p_cur->pool)) != APR_SUCCESS) { /* Problem. Couldn't open directory. */ fprintf( stderr, "Failed to open directory: %s\n", p_cur->current_node->path ); /* Set this to null to move_up_directory() doesn't try to * apr_close() it (->core dump) */ p_cur->current_node->dir = NULL; /* Skip to next entry */ deallocate_filenode(d); p_cur->current_node = d = prev_d; goto reread_next_entry; } /* Else we were able to open directory. Update the currnet dirent info ** to that of the opened directory. This is our next row in the result ** set. */ apr_stat( &d->dirent, d->path, APR_FINFO_DIRENT|APR_FINFO_TYPE|APR_FINFO_NAME, p_cur->pool ); } return SQLITE_OK; }
/* Update cursor to point to next top-level directory to search, if any. */ static int next_directory(vtab_cursor *p_cur) { vtab *p_vt = (vtab*)((sqlite3_vtab_cursor*)p_cur)->pVtab; /* Get the next path name in the search list. If there isn't next_path() * will return 0, as do we. */ if (next_path(p_cur) == 0) { /* No more directories to search. End of result set. */ p_cur->eof = 1; return SQLITE_OK; } /* Now try to open the directory. If it can be opended, set up the dirent to ** return as a row in the rowset. */ /* ZERO-FILL DIRENT: Very important to zero-fill here, otherwise we may have ** dirent.fname and/or dirent.name members pointing to invalid addresses ** after apr_stat(). Our code depends in being able to check NULL status of ** these members, so all pointers must be NULL by default. */ memset(&p_cur->current_node->dirent, 0, sizeof(apr_finfo_t)); /* Check to see if the directory exists */ p_cur->status = apr_stat( &p_cur->current_node->dirent, p_cur->current_node->path, APR_FINFO_TYPE, p_cur->pool ); if (p_cur->status != APR_SUCCESS) { /* Directory does not exist */ p_cur->eof = 1; if (p_vt->base.zErrMsg != NULL) { sqlite3_free(p_vt->base.zErrMsg); } printf( "Invalid directory: %s\n", p_cur->current_node->path ); p_vt->base.zErrMsg = sqlite3_mprintf( "Invalid directory: %s", p_cur->current_node->path ); return SQLITE_ERROR; } else { /* If this entry is a directory, then open it */ if (p_cur->current_node->dirent.filetype == APR_DIR) { p_cur->status = apr_dir_open( &p_cur->current_node->dir, p_cur->current_node->path, p_cur->pool); if (p_cur->status != APR_SUCCESS) { /* Could not open directory */ p_cur->eof = 1; if (p_vt->base.zErrMsg != NULL) { sqlite3_free(p_vt->base.zErrMsg); } printf("Could not open directory: %s\n", p_cur->current_node->path ); p_vt->base.zErrMsg = sqlite3_mprintf( "Could not open directory: %s", p_cur->current_node->path ); return SQLITE_ERROR; } } else { /** Set dir to NULL to indicate that this entry is NOT a * directory. In this case, we have a top-level file, not a * top-level directory. vt_next() will pick up on this and do the * Right Thing. */ p_cur->current_node->dir = NULL; } } /** Move cursor to first row: get the directory information on the top level * directory. */ apr_stat( &p_cur->current_node->dirent, p_cur->current_node->path, APR_FINFO_DIRENT|APR_FINFO_TYPE|APR_FINFO_NAME, p_cur->pool ); return SQLITE_OK; }
const char *nss_cmd_NSSRandomSeed(cmd_parms *cmd, void *dcfg, const char *arg1, const char *arg2, const char *arg3) { SSLModConfigRec *mc = myModConfig(cmd->server); const char *err; ssl_randseed_t *seed; int arg2len = strlen(arg2); if ((err = ap_check_cmd_context(cmd, GLOBAL_ONLY))) { return err; } /* Only run through this once. Otherwise the random seed sources are * pushed into the array for each server start (and we are guaranteed 2) */ if (mc->nInitCount >= 1) { return NULL; } seed = apr_array_push(mc->aRandSeed); if (strcEQ(arg1, "startup")) { seed->nCtx = SSL_RSCTX_STARTUP; } else if (strcEQ(arg1, "connect")) { return apr_pstrcat(cmd->pool, "NSSRandomSeed: " "mod_nss doesn't do per-connection random seeding", NULL); } else { return apr_pstrcat(cmd->pool, "NSSRandomSeed: " "invalid context: `", arg1, "'", NULL); } if ((arg2len > 5) && strEQn(arg2, "file:", 5)) { seed->nSrc = SSL_RSSRC_FILE; seed->cpPath = ap_server_root_relative(mc->pPool, arg2+5); } else if ((arg2len > 5) && strEQn(arg2, "exec:", 5)) { seed->nSrc = SSL_RSSRC_EXEC; seed->cpPath = ap_server_root_relative(mc->pPool, arg2+5); } else if ((arg2len > 6) && strEQn(arg2, "defer:", 6)) { seed->nSrc = SSL_RSSRC_FILE; seed->cpPath = ap_server_root_relative(mc->pPool, arg2+5); } else if (strcEQ(arg2, "builtin")) { seed->nSrc = SSL_RSSRC_BUILTIN; seed->cpPath = NULL; } else { seed->nSrc = SSL_RSSRC_FILE; seed->cpPath = ap_server_root_relative(mc->pPool, arg2); } if (seed->nSrc != SSL_RSSRC_BUILTIN) { apr_finfo_t finfo; if (!seed->cpPath) { return apr_pstrcat(cmd->pool, "Invalid NSSRandomSeed path ", arg2, NULL); } if (apr_stat(&finfo, seed->cpPath, APR_FINFO_TYPE|APR_FINFO_SIZE, cmd->pool) != 0) { return apr_pstrcat(cmd->pool, "NSSRandomSeed: source path '", seed->cpPath, "' does not exist", NULL); } } if (!arg3) { seed->nBytes = 0; /* read whole file */ } else { if (seed->nSrc == SSL_RSSRC_BUILTIN) { return "NSSRandomSeed: byte specification not " "allowed for builtin seed source"; } seed->nBytes = atoi(arg3); if (seed->nBytes < 0) { return "NSSRandomSeed: invalid number of bytes specified"; } } return NULL; }
/* Dumps a node */ static svn_error_t *delta_dump_node(de_node_baton_t *node) { de_baton_t *de_baton = node->de_baton; session_t *session = de_baton->session; dump_options_t *opts = de_baton->opts; char *path = node->path; unsigned long prop_len, content_len; char dump_content = 0, dump_props = 0; apr_hash_index_t *hi; /* Check if the node needs to be dumped at all */ if (!node->dump_needed) { DEBUG_MSG("delta_dump_node(%s): aborting: dump not needed\n", node->path); return SVN_NO_ERROR; } /* Check if this is a dry run */ if (opts->flags & DF_DRY_RUN) { node->dump_needed = 0; DEBUG_MSG("delta_dump_node(%s): aborting: DF_DRY_RUN\n", node->path); return SVN_NO_ERROR; } /* If the node is a directory and no properties have been changed, we don't need to dump it */ if ((node->action == 'M') && (node->kind == svn_node_dir) && (!node->props_changed)) { node->dump_needed = 0; DEBUG_MSG("delta_dump_node(%s): aborting: directory && action == 'M' && !props_changed\n", node->path); return SVN_NO_ERROR; } /* If the node's parent has been copied, we don't need to dump it if its contents haven't changed. Addionally, make sure the node doesn't contain extra copyfrom information. */ if ((node->cp_info == CPI_COPY) && (node->action == 'A') && (node->copyfrom_path == NULL)) { node->dump_needed = 0; DEBUG_MSG("delta_dump_node(%s): aborting: cp_info == CPI_COPY && action == 'A'\n", node->path); return SVN_NO_ERROR; } DEBUG_MSG("delta_dump_node(%s): started\n", node->path); /* Check for potential copy. This is neede here because it might change the action. */ delta_check_copy(node); if (node->action == 'R') { /* Special handling for replacements */ DEBUG_MSG("delta_dump_node(%s): running delta_dump_replace()\n", node->path); return delta_dump_replace(node); } /* Dump node path */ if (opts->prefix != NULL) { printf("%s: %s%s\n", SVN_REPOS_DUMPFILE_NODE_PATH, opts->prefix, path); } else { printf("%s: %s\n", SVN_REPOS_DUMPFILE_NODE_PATH, path); } /* Dump node kind */ if (node->action != 'D') { printf("%s: %s\n", SVN_REPOS_DUMPFILE_NODE_KIND, node->kind == svn_node_file ? "file" : "dir"); } /* Dump action */ printf("%s: ", SVN_REPOS_DUMPFILE_NODE_ACTION); switch (node->action) { case 'M': printf("change\n"); if ((de_baton->opts->verbosity > 0) && !(de_baton->opts->flags & DF_DRY_RUN)) { fprintf(stderr, _(" * editing path : %s ... "), path); } break; case 'A': printf("add\n"); if ((de_baton->opts->verbosity > 0) && !(de_baton->opts->flags & DF_DRY_RUN)) { fprintf(stderr, _(" * adding path : %s ... "), path); } break; case 'D': printf("delete\n"); if ((de_baton->opts->verbosity > 0) && !(de_baton->opts->flags & DF_DRY_RUN)) { fprintf(stderr, _(" * deleting path : %s ... "), path); } /* We can finish early here */ printf("\n\n"); delta_mark_node(node); DEBUG_MSG("delta_dump_node(%s): deleted -> finished\n", node->path); return SVN_NO_ERROR; case 'R': printf("replace\n"); break; } /* Check if the node content needs to be dumped */ if (node->kind == svn_node_file && node->applied_delta) { DEBUG_MSG("delta_dump_node(%s): dump_content = 1\n", node->path); dump_content = 1; } /* Check if the node properties need to be dumped */ if ((node->props_changed) || (node->action == 'A')) { DEBUG_MSG("delta_dump_node(%s): dump_props = 1\n", node->path); dump_props = 1; } /* Output copy information if neccessary */ if (node->cp_info == CPI_COPY) { const char *copyfrom_path = delta_get_local_copyfrom_path(session->prefix, node->copyfrom_path); printf("%s: %ld\n", SVN_REPOS_DUMPFILE_NODE_COPYFROM_REV, node->copyfrom_rev_local); if (opts->prefix != NULL) { printf("%s: %s%s\n", SVN_REPOS_DUMPFILE_NODE_COPYFROM_PATH, opts->prefix, copyfrom_path); } else { printf("%s: %s\n", SVN_REPOS_DUMPFILE_NODE_COPYFROM_PATH, copyfrom_path); } /* Maybe we don't need to dump the contents */ if ((node->action == 'A') && (node->kind == svn_node_file)) { unsigned char *prev_md5 = rhash_get(md5_hash, copyfrom_path, APR_HASH_KEY_STRING); if (prev_md5 && !memcmp(node->md5sum, prev_md5, APR_MD5_DIGESTSIZE)) { DEBUG_MSG("md5sum matches\n"); dump_content = 0; } else { #ifdef DEBUG if (prev_md5) { DEBUG_MSG("md5sum doesn't match: (%s != %s)\n", svn_md5_digest_to_cstring(node->md5sum, node->pool), svn_md5_digest_to_cstring(prev_md5, node->pool)); } else { DEBUG_MSG("md5sum of %s not available\n", copyfrom_path); } #endif dump_content = 1; } } if (!dump_content && !node->props_changed) { printf("\n\n"); delta_mark_node(node); return 0; } else if (node->kind == svn_node_dir) { dump_content = 0; } } /* Deltify? */ if (dump_content && (opts->flags & DF_USE_DELTAS)) { svn_error_t *err; if ((err = delta_deltify_node(node))) { return err; } } #ifdef DUMP_DEBUG /* Dump some extra debug info */ if (dump_content) { printf("Debug-filename: %s\n", node->filename); if (node->old_filename) { printf("Debug-old-filename: %s\n", node->old_filename); } if (opts->flags & DF_USE_DELTAS) { printf("Debug-delta-filename: %s\n", node->delta_filename); } } #endif /* Dump properties & content */ prop_len = 0; content_len = 0; /* Dump property size */ for (hi = apr_hash_first(node->pool, node->properties); hi; hi = apr_hash_next(hi)) { const char *key; svn_string_t *value; apr_hash_this(hi, (const void **)(void *)&key, NULL, (void **)(void *)&value); /* Don't dump the property if it has been deleted */ if (apr_hash_get(node->del_properties, key, APR_HASH_KEY_STRING) != NULL) { continue; } prop_len += property_strlen(node->pool, key, value->data); } /* In dump format version 3, deleted properties should be dumped, too */ if (opts->dump_format == 3) { for (hi = apr_hash_first(node->pool, node->del_properties); hi; hi = apr_hash_next(hi)) { const char *key; void *value; apr_hash_this(hi, (const void **)(void *)&key, NULL, &value); prop_len += property_del_strlen(node->pool, key); } } if ((prop_len > 0)) { dump_props = 1; } if (dump_props) { if (opts->dump_format == 3) { printf("%s: true\n", SVN_REPOS_DUMPFILE_PROP_DELTA); } prop_len += PROPS_END_LEN; printf("%s: %lu\n", SVN_REPOS_DUMPFILE_PROP_CONTENT_LENGTH, prop_len); } /* Dump content size */ if (dump_content) { char *fpath = (opts->flags & DF_USE_DELTAS) ? node->delta_filename : node->filename; apr_finfo_t *info = apr_pcalloc(node->pool, sizeof(apr_finfo_t)); if (apr_stat(info, fpath, APR_FINFO_SIZE, node->pool) != APR_SUCCESS) { DEBUG_MSG("delta_dump_node: FATAL: cannot stat %s\n", node->filename); return svn_error_create(1, NULL, apr_psprintf(session->pool, "Cannot stat %s", node->filename)); } content_len = (unsigned long)info->size; if (opts->flags & DF_USE_DELTAS) { printf("%s: true\n", SVN_REPOS_DUMPFILE_TEXT_DELTA); } printf("%s: %lu\n", SVN_REPOS_DUMPFILE_TEXT_CONTENT_LENGTH, content_len); if (*node->md5sum != 0x00) { printf("%s: %s\n", SVN_REPOS_DUMPFILE_TEXT_CONTENT_MD5, svn_md5_digest_to_cstring(node->md5sum, node->pool)); } } printf("%s: %lu\n\n", SVN_REPOS_DUMPFILE_CONTENT_LENGTH, (unsigned long)prop_len+content_len); /* Dump properties */ if (dump_props) { for (hi = apr_hash_first(node->pool, node->properties); hi; hi = apr_hash_next(hi)) { const char *key; svn_string_t *value; apr_hash_this(hi, (const void **)(void *)&key, NULL, (void **)(void *)&value); /* Don't dump the property if it has been deleted */ if (apr_hash_get(node->del_properties, key, APR_HASH_KEY_STRING) != NULL) { continue; } property_dump(key, value->data); } /* In dump format version 3, deleted properties should be dumped, too */ if (opts->dump_format == 3) { for (hi = apr_hash_first(node->pool, node->del_properties); hi; hi = apr_hash_next(hi)) { const char *key; void *value; apr_hash_this(hi, (const void **)(void *)&key, NULL, &value); property_del_dump(key); } } printf(PROPS_END); } /* Dump content */ if (dump_content) { svn_error_t *err; apr_pool_t *pool = svn_pool_create(node->pool); const char *fpath = (opts->flags & DF_USE_DELTAS) ? node->delta_filename : node->filename; fflush(stdout); if ((err = delta_cat_file(pool, fpath))) { return err; } fflush(stdout); svn_pool_destroy(pool); #ifndef DUMP_DEBUG if (opts->flags & DF_USE_DELTAS) { apr_file_remove(node->delta_filename, node->pool); } #endif } printf("\n\n"); delta_mark_node(node); return SVN_NO_ERROR; }
static STATUS domaintree_hook_translate_name(request_rec *r) { MDT_CNF *DT; char *host, *path, *docroot; if ((!(DT = GET_MDT_CNF(r->server))) || (DT->enabled < 1)) { return DECLINED; } #if DBG ap_log_error(DT_LOG_DBG "processid = %d", (int) getpid()); #endif /* get a usable host name */ if (!(host = domaintree_host(r->pool, DT, ap_get_server_name(r)))) { return DECLINED; } /* ignore? */ if (domaintree_test(DT, host, DT->ignore->nelts, (const char **) DT->ignore->elts, TEST_IS_AOS, NULL, NULL)) { return DECLINED; } /* forbid? */ if (domaintree_test(DT, host, DT->forbid->nelts, (const char **) DT->forbid->elts, TEST_IS_AOS, NULL, NULL)) { return HTTP_FORBIDDEN; } /* check cache */ if ((DT->dircache.clim < 1) || (!(path = domaintree_cache_get(DT, r->request_time, host)))) { /* build path */ if (!(path = domaintree_path(r->pool, DT, host))) { return DECLINED; } /* apply any aliases */ if (apr_table_elts(DT->aliases.faketable)->nelts) { domaintree_fake(r->pool, DT, &path); } /* add to cache */ if (DT->dircache.clim > 0) { domaintree_cache_set(DT, r->request_time, host, path); } } /* compose virtual docroot */ docroot = struniqchr(apr_pstrcat(r->pool, DT->prefix, "/", path, "/", DT->suffix, "/", NULL), '/'); /* stat docroot */ if (DT->statroot > 0) { apr_finfo_t sb; switch (apr_stat(&sb, docroot, APR_FINFO_MIN, r->pool)) { case APR_SUCCESS: case APR_INCOMPLETE: ap_log_error(DT_LOG_DBG "stat path = %s (success)", docroot); break; default: ap_log_error(DT_LOG_DBG "stat path = %s (failure)", docroot); return DECLINED; } } /* set virtual docroot */ apr_table_set(r->subprocess_env, "VIRTUAL_DOCUMENT_ROOT", docroot); #ifdef HAVE_UNIX_SUEXEC /* set suexec note */ { const char *username, *separator; if (domaintree_test(DT, docroot, DT->suexec->nelts, DT->suexec->elts, TEST_IS_BOS, NULL, &username)) { if ((separator = strchr(username, '/'))) { username = apr_pstrndup(r->pool, username, separator-username); } else { username = apr_pstrdup(r->pool, username); } apr_table_setn(r->notes, "mod_domaintree.suexec", username); } } #endif /* done */ r->canonical_filename = ""; r->filename = apr_pstrcat(r->pool, docroot, EMPTY(r->uri) ? NULL : ('/' == *r->uri ? r->uri + 1 : r->uri), NULL); ap_log_error(DT_LOG_DBG "path done = %s", r->filename); return OK; }
///////////////////////////////////////////////////////////////////////////////// // // Function: // // Purpose: // // Parameters: // // Return value: // // Author: Komatsu Yuji(Zheng Chuyu) // ///////////////////////////////////////////////////////////////////////////////// int jhklog_rotate(void) { char errmsg[100]; char logfile_old[MAX_STRING_LEN]; int rename_flag; apr_status_t rv; apr_finfo_t finfo, finfo_log; if (jx_log == NULL) return -1; rv = apr_file_info_get(&finfo, APR_FINFO_INODE, jx_log->fp); if (rv != APR_SUCCESS) return -1; // no rotate if (jx_log->logsize == 0) { rv = apr_stat(&finfo_log, jx_log->logfile, APR_FINFO_INODE, jx_log->pool); if (rv == APR_SUCCESS) { if (finfo.inode == finfo_log.inode) return 0; } jhklog_unlock(); jhklog_close(); jhklog_open(); jhklog_lock(); return 0; } // don't rotate if (finfo.size < jx_log->logsize) return 0; // create new log file rename_flag = 0; snprintf(logfile_old, sizeof(logfile_old), "%s.old", jx_log->logfile); rv = apr_stat(&finfo_log, logfile_old, APR_FINFO_INODE, jx_log->pool); if (rv == APR_SUCCESS) { if (finfo.inode != finfo_log.inode) { rename_flag = 1; } } else { rename_flag = 1; } if (rename_flag == 1) { rv = apr_file_rename(jx_log->logfile, logfile_old, jx_log->pool); if (rv != APR_SUCCESS) { apr_strerror(rv, errmsg, sizeof(errmsg)); fprintf(stderr, "can not rename [%s] to [%s]. %s\n", jx_log->logfile, logfile_old, errmsg); return -1; } } jhklog_unlock(); jhklog_close(); jhklog_open(); jhklog_lock(); return 0; }
static int read_response(request_rec* r, char* res_id) { apr_file_t* fd; apr_status_t rv = 0; apr_size_t bytes_sent; apr_finfo_t fi; char res_fn[256]; const char *res_content; const char *res_status; int rc = 0; /***** For Http Header ******/ rc = GetHttpHeader(res_id, "Content-type", &res_content); if(rc == DSO_NO_MATCH) {// in case without maching key //true = success, false = failure res_content = "text/xml"; } if (rc == EXIT_FAILURE) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, "*** mod_err *** : GetHttpHeader(res_id)"); return HTTP_INTERNAL_SERVER_ERROR; } r->content_type = (char *) apr_pstrdup(r->pool, res_content); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, NULL, "*** mod *** : GetHttpHeader header = %s", res_content); #endif rc = GetHttpHeader(res_id, "status", &res_status); if(rc == DSO_NO_MATCH) {// in the case without maching key res_status = "200"; } if (rc == EXIT_FAILURE) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, "*** mod_err *** : GetHttpHeader(req_id)"); return HTTP_INTERNAL_SERVER_ERROR; } r->status = atoi(res_status); #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, NULL, "*** mod *** : GetHttpHeader header = %s", res_status); #endif /***** For Http Body ******/ rc = GetFileName(res_id, res_fn); if (rc == EXIT_FAILURE) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, "*** mod_err *** : GetFileName(res_id)"); return HTTP_INTERNAL_SERVER_ERROR; } #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, NULL, "*** mod *** : GetFileName res_fn = %s", res_fn); #endif rv = apr_file_open(&fd, res_fn, APR_READ, -1, r->pool); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, "*** mod_err *** : apr_file_open"); return HTTP_INTERNAL_SERVER_ERROR; } /****** Get file size of response message ******/ rv = apr_stat(&fi, res_fn, APR_FINFO_SIZE, r->pool); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, "*** mod_err *** : apr_stat"); return HTTP_INTERNAL_SERVER_ERROR; } #ifdef DEBUG ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, NULL, "*** mod *** : apr_stat res_size = %ld", (apr_off_t)fi.size); #endif WriteLog(9,"send to client start"); /* send the file with size if known */ if (r->proto_num <1001) ap_set_content_length(r,fi.size); rv = ap_send_fd(fd, r, 0, ((fi.size > 0) ? fi.size : -1), &bytes_sent); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, NULL, "*** mod_err *** : ap_send_fd"); return HTTP_INTERNAL_SERVER_ERROR; } /* Must not file close rv = apr_file_close(fd); */ return EXIT_SUCCESS; }
/** * Function used to create a lua_State instance bound into the web * server in the appropriate scope. */ lua_State *ap_lua_get_lua_state(apr_pool_t *lifecycle_pool, ap_lua_vm_spec *spec, request_rec* r) { lua_State *L = NULL; ap_lua_finfo *cache_info = NULL; int tryCache = 0; if (spec->scope == AP_LUA_SCOPE_SERVER) { char *hash; apr_reslist_t* reslist = NULL; ap_lua_server_spec* sspec = NULL; hash = apr_psprintf(r->pool, "reslist:%s", spec->file); #if APR_HAS_THREADS apr_thread_mutex_lock(ap_lua_mutex); #endif if (apr_pool_userdata_get((void **)&reslist, hash, r->server->process->pool) == APR_SUCCESS) { if (reslist != NULL) { if (apr_reslist_acquire(reslist, (void**) &sspec) == APR_SUCCESS) { L = sspec->L; cache_info = sspec->finfo; } } } if (L == NULL) { ap_lua_vm_spec* server_spec = copy_vm_spec(r->server->process->pool, spec); if ( apr_reslist_create(&reslist, spec->vm_min, spec->vm_max, spec->vm_max, 0, (apr_reslist_constructor) server_vm_construct, (apr_reslist_destructor) server_cleanup_lua, server_spec, r->server->process->pool) == APR_SUCCESS && reslist != NULL) { apr_pool_userdata_set(reslist, hash, NULL, r->server->process->pool); if (apr_reslist_acquire(reslist, (void**) &sspec) == APR_SUCCESS) { L = sspec->L; cache_info = sspec->finfo; } else { return NULL; } } } #if APR_HAS_THREADS apr_thread_mutex_unlock(ap_lua_mutex); #endif } else { if (apr_pool_userdata_get((void **)&L, spec->file, lifecycle_pool) != APR_SUCCESS) { L = NULL; } } if (L == NULL) { ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, lifecycle_pool, APLOGNO(01483) "creating lua_State with file %s", spec->file); /* not available, so create */ if (!vm_construct(&L, spec, lifecycle_pool)) { AP_DEBUG_ASSERT(L != NULL); apr_pool_userdata_set(L, spec->file, cleanup_lua, lifecycle_pool); } } if (spec->codecache == AP_LUA_CACHE_FOREVER || (spec->bytecode && spec->bytecode_len > 0)) { tryCache = 1; } else { char* mkey; if (spec->scope != AP_LUA_SCOPE_SERVER) { mkey = apr_psprintf(r->pool, "ap_lua_modified:%s", spec->file); apr_pool_userdata_get((void **)&cache_info, mkey, lifecycle_pool); if (cache_info == NULL) { cache_info = apr_pcalloc(lifecycle_pool, sizeof(ap_lua_finfo)); apr_pool_userdata_set((void*) cache_info, mkey, NULL, lifecycle_pool); } } if (spec->codecache == AP_LUA_CACHE_STAT) { apr_finfo_t lua_finfo; apr_stat(&lua_finfo, spec->file, APR_FINFO_MTIME|APR_FINFO_SIZE, lifecycle_pool); /* On first visit, modified will be zero, but that's fine - The file is loaded in the vm_construct function. */ if ((cache_info->modified == lua_finfo.mtime && cache_info->size == lua_finfo.size) || cache_info->modified == 0) { tryCache = 1; } cache_info->modified = lua_finfo.mtime; cache_info->size = lua_finfo.size; } else if (spec->codecache == AP_LUA_CACHE_NEVER) { if (cache_info->runs == 0) tryCache = 1; } cache_info->runs++; } if (tryCache == 0 && spec->scope != AP_LUA_SCOPE_ONCE) { int rc; ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, lifecycle_pool, APLOGNO(02332) "(re)loading lua file %s", spec->file); rc = luaL_loadfile(L, spec->file); if (rc != 0) { ap_log_perror(APLOG_MARK, APLOG_ERR, 0, lifecycle_pool, APLOGNO(02333) "Error loading %s: %s", spec->file, rc == LUA_ERRMEM ? "memory allocation error" : lua_tostring(L, 0)); return 0; } lua_pcall(L, 0, LUA_MULTRET, 0); } return L; }
static dav_error * dav_rawx_deliver(const dav_resource *resource, ap_filter_t *output) { dav_rawx_server_conf *conf; apr_pool_t *pool; apr_bucket_brigade *bb = NULL; apr_status_t status; apr_bucket *bkt = NULL; dav_resource_private *ctx; dav_error *e = NULL; apr_finfo_t info; DAV_XDEBUG_RES(resource, 0, "%s(%s)", __FUNCTION__, resource_get_pathname(resource)); pool = resource->pool; conf = resource_get_server_config(resource); /* Check resource type */ if (DAV_RESOURCE_TYPE_REGULAR != resource->type) { e = server_create_and_stat_error(conf, pool, HTTP_CONFLICT, 0, "Cannot GET this type of resource."); goto end_deliver; } if (resource->collection) { e = server_create_and_stat_error(conf, pool, HTTP_CONFLICT, 0, "No GET on collections"); goto end_deliver; } ctx = resource->info; if (ctx->update_only) { /* Check if it is not a busy file. We accept reads during * compression but not attr updates. */ char *pending_file = apr_pstrcat(pool, resource_get_pathname(resource), ".pending", NULL); status = apr_stat(&info, pending_file, APR_FINFO_ATIME, pool); if (status == APR_SUCCESS) { e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "File in pending mode."); goto end_deliver; } GError *error_local = NULL; /* UPDATE chunk attributes and go on */ const char *path = resource_get_pathname(resource); FILE *f = NULL; f = fopen(path, "r"); /* Try to open the file but forbids a creation */ if (!set_rawx_info_to_fd(fileno(f), &error_local, &(ctx->chunk))) { fclose(f); e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, apr_pstrdup(pool, gerror_get_message(error_local))); g_clear_error(&error_local); goto end_deliver; } fclose(f); } else { bb = apr_brigade_create(pool, output->c->bucket_alloc); if (!ctx->compression){ apr_file_t *fd = NULL; /* Try to open the file but forbids a creation */ status = apr_file_open(&fd, resource_get_pathname(resource), APR_READ|APR_BINARY|APR_BUFFERED, 0, pool); if (APR_SUCCESS != status) { e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "File permissions deny server access."); goto end_deliver; } /* FIXME this does not handle large files. but this is test code anyway */ bkt = apr_bucket_file_create(fd, 0, (apr_size_t)resource->info->finfo.size, pool, output->c->bucket_alloc); } else { DAV_DEBUG_RES(resource, 0, "Building a compressed resource bucket"); gint i64; i64 = g_ascii_strtoll(ctx->cp_chunk.uncompressed_size, NULL, 10); /* creation of compression specific bucket */ bkt = apr_pcalloc(pool, sizeof(struct apr_bucket)); bkt->type = &chunk_bucket_type; bkt->length = i64; bkt->start = 0; bkt->data = ctx; bkt->free = chunk_bucket_free_noop; bkt->list = output->c->bucket_alloc; } APR_BRIGADE_INSERT_TAIL(bb, bkt); /* as soon as the chunk has been sent, end of stream!*/ bkt = apr_bucket_eos_create(output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); if ((status = ap_pass_brigade(output, bb)) != APR_SUCCESS){ e = server_create_and_stat_error(conf, pool, HTTP_FORBIDDEN, 0, "Could not write contents to filter."); /* close file */ if (ctx->cp_chunk.fd) { fclose(ctx->cp_chunk.fd); } goto end_deliver; } if (ctx->cp_chunk.buf){ g_free(ctx->cp_chunk.buf); ctx->cp_chunk.buf = NULL; } if (ctx->cp_chunk.uncompressed_size){ g_free(ctx->cp_chunk.uncompressed_size); ctx->cp_chunk.uncompressed_size = NULL; } /* close file */ if (ctx->cp_chunk.fd) { fclose(ctx->cp_chunk.fd); } server_inc_stat(conf, RAWX_STATNAME_REP_2XX, 0); server_add_stat(conf, RAWX_STATNAME_REP_BWRITTEN, resource->info->finfo.size, 0); } end_deliver: if (bb) { apr_brigade_destroy(bb); bb = NULL; } /* Now we pass here even if an error occured, for process request duration */ server_inc_request_stat(resource_get_server_config(resource), RAWX_STATNAME_REQ_CHUNKGET, request_get_duration(resource->info->request)); return e; }
static int log_script(request_rec *r, cgi_server_conf * conf, int ret, char *dbuf, const char *sbuf, apr_bucket_brigade *bb, apr_file_t *script_err) { const apr_array_header_t *hdrs_arr = apr_table_elts(r->headers_in); const apr_table_entry_t *hdrs = (const apr_table_entry_t *) hdrs_arr->elts; char argsbuffer[HUGE_STRING_LEN]; apr_file_t *f = NULL; apr_bucket *e; const char *buf; apr_size_t len; apr_status_t rv; int first; int i; apr_finfo_t finfo; char time_str[APR_CTIME_LEN]; /* XXX Very expensive mainline case! Open, then getfileinfo! */ if (!conf->logname || ((apr_stat(&finfo, conf->logname, APR_FINFO_SIZE, r->pool) == APR_SUCCESS) && (finfo.size > conf->logbytes)) || (apr_file_open(&f, conf->logname, APR_APPEND|APR_WRITE|APR_CREATE, APR_OS_DEFAULT, r->pool) != APR_SUCCESS)) { /* Soak up script output */ discard_script_output(bb); log_script_err(r, script_err); return ret; } /* "%% [Wed Jun 19 10:53:21 1996] GET /cgi-bin/printenv HTTP/1.0" */ apr_ctime(time_str, apr_time_now()); apr_file_printf(f, "%%%% [%s] %s %s%s%s %s\n", time_str, r->method, r->uri, r->args ? "?" : "", r->args ? r->args : "", r->protocol); /* "%% 500 /usr/local/apache/cgi-bin" */ apr_file_printf(f, "%%%% %d %s\n", ret, r->filename); apr_file_puts("%request\n", f); for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) continue; apr_file_printf(f, "%s: %s\n", hdrs[i].key, hdrs[i].val); } if ((r->method_number == M_POST || r->method_number == M_PUT) && *dbuf) { apr_file_printf(f, "\n%s\n", dbuf); } apr_file_puts("%response\n", f); hdrs_arr = apr_table_elts(r->err_headers_out); hdrs = (const apr_table_entry_t *) hdrs_arr->elts; for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) continue; apr_file_printf(f, "%s: %s\n", hdrs[i].key, hdrs[i].val); } if (sbuf && *sbuf) apr_file_printf(f, "%s\n", sbuf); first = 1; for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (APR_BUCKET_IS_EOS(e)) { break; } rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS || (len == 0)) { break; } if (first) { apr_file_puts("%stdout\n", f); first = 0; } apr_file_write(f, buf, &len); apr_file_puts("\n", f); } if (apr_file_gets(argsbuffer, HUGE_STRING_LEN, script_err) == APR_SUCCESS) { apr_file_puts("%stderr\n", f); apr_file_puts(argsbuffer, f); while (apr_file_gets(argsbuffer, HUGE_STRING_LEN, script_err) == APR_SUCCESS) { apr_file_puts(argsbuffer, f); } apr_file_puts("\n", f); } apr_brigade_destroy(bb); apr_file_close(script_err); apr_file_close(f); return ret; }
/** * Try obtain a cache wide lock on the given cache key. * * If we return APR_SUCCESS, we obtained the lock, and we are clear to * proceed to the backend. If we return APR_EEXIST, then the lock is * already locked, someone else has gone to refresh the backend data * already, so we must return stale data with a warning in the mean * time. If we return anything else, then something has gone pear * shaped, and we allow the request through to the backend regardless. * * This lock is created from the request pool, meaning that should * something go wrong and the lock isn't deleted on return of the * request headers from the backend for whatever reason, at worst the * lock will be cleaned up when the request dies or finishes. * * If something goes truly bananas and the lock isn't deleted when the * request dies, the lock will be trashed when its max-age is reached, * or when a request arrives containing a Cache-Control: no-cache. At * no point is it possible for this lock to permanently deny access to * the backend. */ apr_status_t cache_try_lock(cache_server_conf *conf, cache_request_rec *cache, request_rec *r) { apr_status_t status; const char *lockname; const char *path; char dir[5]; apr_time_t now = apr_time_now(); apr_finfo_t finfo; apr_file_t *lockfile; void *dummy; finfo.mtime = 0; if (!conf || !conf->lock || !conf->lockpath) { /* no locks configured, leave */ return APR_SUCCESS; } /* lock already obtained earlier? if so, success */ apr_pool_userdata_get(&dummy, CACHE_LOCKFILE_KEY, r->pool); if (dummy) { return APR_SUCCESS; } /* create the key if it doesn't exist */ if (!cache->key) { cache_generate_key(r, r->pool, &cache->key); } /* create a hashed filename from the key, and save it for later */ lockname = ap_cache_generate_name(r->pool, 0, 0, cache->key); /* lock files represent discrete just-went-stale URLs "in flight", so * we support a simple two level directory structure, more is overkill. */ dir[0] = '/'; dir[1] = lockname[0]; dir[2] = '/'; dir[3] = lockname[1]; dir[4] = 0; /* make the directories */ path = apr_pstrcat(r->pool, conf->lockpath, dir, NULL); if (APR_SUCCESS != (status = apr_dir_make_recursive(path, APR_UREAD|APR_UWRITE|APR_UEXECUTE, r->pool))) { ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00778) "Could not create a cache lock directory: %s", path); return status; } lockname = apr_pstrcat(r->pool, path, "/", lockname, NULL); apr_pool_userdata_set(lockname, CACHE_LOCKNAME_KEY, NULL, r->pool); /* is an existing lock file too old? */ status = apr_stat(&finfo, lockname, APR_FINFO_MTIME | APR_FINFO_NLINK, r->pool); if (!(APR_STATUS_IS_ENOENT(status)) && APR_SUCCESS != status) { ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EEXIST, r, APLOGNO(00779) "Could not stat a cache lock file: %s", lockname); return status; } if ((status == APR_SUCCESS) && (((now - finfo.mtime) > conf->lockmaxage) || (now < finfo.mtime))) { ap_log_rerror(APLOG_MARK, APLOG_INFO, status, r, APLOGNO(00780) "Cache lock file for '%s' too old, removing: %s", r->uri, lockname); apr_file_remove(lockname, r->pool); } /* try obtain a lock on the file */ if (APR_SUCCESS == (status = apr_file_open(&lockfile, lockname, APR_WRITE | APR_CREATE | APR_EXCL | APR_DELONCLOSE, APR_UREAD | APR_UWRITE, r->pool))) { apr_pool_userdata_set(lockfile, CACHE_LOCKFILE_KEY, NULL, r->pool); } return status; }