static void * dav_rainx_merge_server_config(apr_pool_t *p, void *base, void *overrides) { (void) base; dav_rainx_server_conf *child; dav_rainx_server_conf *newconf; DAV_XDEBUG_POOL(p, 0, "%s()", __FUNCTION__); child = overrides; newconf = apr_pcalloc(p, sizeof(*newconf)); newconf->pool = p; newconf->cleanup = NULL; newconf->hash_depth = child->hash_depth; newconf->hash_width = child->hash_width; newconf->fsync_on_close = child->fsync_on_close; newconf->headers_scheme = child->headers_scheme; newconf->FILE_buffer_size = child->FILE_buffer_size; memcpy(newconf->docroot, child->docroot, sizeof(newconf->docroot)); memcpy(newconf->ns_name, child->ns_name, sizeof(newconf->ns_name)); apr_thread_mutex_create(&(newconf->rainx_conf_lock), APR_THREAD_MUTEX_DEFAULT, newconf->pool); update_rainx_conf(p, &(newconf->rainx_conf), newconf->ns_name); DAV_DEBUG_POOL(p, 0, "Configuration merged!"); return newconf; }
apr_status_t server_init_child_stat(dav_rawx_server_conf *conf, apr_pool_t *pool, apr_pool_t *plog) { char buff[256]; apr_status_t rc; DAV_XDEBUG_POOL(plog, 0, "%s()", __FUNCTION__); /* Attaches the mutex */ DAV_DEBUG_POOL(plog, 0, "%s : Attaching the SHM global_mutex at [%s]", __FUNCTION__, conf->shm.path); rc = apr_global_mutex_child_init(&(conf->lock.handle), conf->shm.path, pool); if (APR_SUCCESS != rc) { DAV_ERROR_POOL(plog, 0, "%s : Failed to attach the SHM global_mutex at [%s] rc=%d : %s", __FUNCTION__, conf->shm.path, rc, apr_strerror(rc, buff, sizeof(buff))); return rc; } DAV_DEBUG_POOL(plog, 0, "%s : globalmutex attached at [%s]", __FUNCTION__, conf->shm.path); /* Atatches the SHM */ if (!conf->shm.handle) { DAV_DEBUG_POOL(plog, 0, "%s : Attaching the SHM segment at [%s]", __FUNCTION__, conf->shm.path); rc = apr_shm_attach(&(conf->shm.handle), conf->shm.path, pool); if (APR_SUCCESS != rc) { DAV_ERROR_POOL(plog, 0, "%s : Failed to attach the SHM segment at [%s] rc=%d : %s", __FUNCTION__, conf->shm.path, rc, apr_strerror(rc, buff, sizeof(buff))); conf->shm.handle = NULL; return rc; } } DAV_DEBUG_POOL(plog, 0, "%s : SHM segment attached at [%s]", __FUNCTION__, conf->shm.path); return APR_SUCCESS; }
static const char * dav_rainx_cmd_gridconfig_docroot(cmd_parms *cmd, void *config, const char *arg1) { apr_finfo_t finfo; dav_rainx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); /* Check the directory exists */ do { apr_status_t status = apr_stat(&(finfo), arg1, APR_FINFO_NORM, cmd->pool); if (status != APR_SUCCESS) { DAV_DEBUG_POOL(cmd->temp_pool, 0, "Invalid docroot for GridStorage chunks : %s", arg1); return apr_pstrcat(cmd->temp_pool, "Invalid docroot for GridStorage chunks : ", arg1, NULL); } if (finfo.filetype != APR_DIR) { DAV_DEBUG_POOL(cmd->temp_pool, 0, "Docroot for GridStorage chunks must be a directory : %s", arg1); return apr_pstrcat(cmd->temp_pool, "Docroot for GridStorage chunks must be a directory : ", arg1, NULL); } } while (0); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); memset(conf->docroot, 0x00, sizeof(conf->docroot)); apr_cpystrn(conf->docroot, arg1, sizeof(conf->docroot)-1); DAV_DEBUG_POOL(cmd->pool, 0, "DOCROOT=[%s]", conf->docroot); return NULL; }
static const char * dav_rainx_cmd_gridconfig_sock_timeout(cmd_parms *cmd, void *config, const char *arg1) { dav_rainx_server_conf *conf; apr_int64_t socket_timeout; char *endstr; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); socket_timeout = apr_strtoi64(arg1, &endstr, 10); if (errno == 0 && *endstr == '\0') { conf->socket_timeout = socket_timeout; } else { DAV_ERROR_POOL(cmd->pool, 0, "Invalid socket timeout, default value will be used"); } DAV_DEBUG_POOL(cmd->pool, 0, "Socket timeout for rawx request is %" APR_TIME_T_FMT " microseconds", conf->socket_timeout); return NULL; }
apr_status_t server_init_master_stat(dav_rawx_server_conf *conf, apr_pool_t *pool, apr_pool_t *plog) { char buff[256]; apr_status_t rc; DAV_XDEBUG_POOL(plog, 0, "%s()", __FUNCTION__); /* Try to attach to the already existing SHM segment */ rc = apr_shm_attach(&(conf->shm.handle), conf->shm.path, pool); if (APR_SUCCESS != rc) { DAV_DEBUG_POOL(plog, 0, "%s: Failed to attach to SHM segment at [%s]: %s", __FUNCTION__, conf->shm.path, apr_strerror(rc, buff, sizeof(buff))); conf->shm.handle = NULL; return rc; } DAV_DEBUG_POOL(plog, 0, "%s: Attached to existing SHM segment at [%s]", __FUNCTION__, conf->shm.path); /* Create a processus lock*/ rc = apr_global_mutex_create(&(conf->lock.handle), conf->shm.path, APR_LOCK_DEFAULT, pool); if (rc != APR_SUCCESS) { DAV_ERROR_POOL(plog, 0, "%s : Cannot create a global_mutex at [%s] rc=%d : %s", __FUNCTION__, conf->shm.path, rc, apr_strerror(rc, buff, sizeof(buff))); (void) apr_shm_destroy(conf->shm.handle); conf->shm.handle = NULL; return rc; } DAV_DEBUG_POOL(plog, 0, "%s : globalmutex created at [%s]", __FUNCTION__, conf->shm.path); return APR_SUCCESS; }
static dav_error * dav_rainx_get_parent_resource_SPECIAL(const dav_resource *resource, dav_resource **result_parent) { DAV_XDEBUG_POOL(resource->info->pool, 0, "%s(...)", __FUNCTION__); *result_parent = __build_req_resource(resource->info->request, resource->hooks, resource->info->generator); return NULL; }
static dav_error * dav_rawx_get_parent_resource_SPECIAL(const dav_resource *resource, dav_resource **result_parent) { DAV_XDEBUG_POOL(resource->info->pool, 0, "%s(...)", __FUNCTION__); *result_parent = __get_chunkupdate_resource(resource->info->request, resource->hooks); return NULL; }
static const char * dav_rainx_cmd_gridconfig_namespace(cmd_parms *cmd, void *config, const char *arg1) { dav_rainx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); memset(conf->ns_name, 0x00, sizeof(conf->ns_name)); apr_cpystrn(conf->ns_name, arg1, sizeof(conf->ns_name)); DAV_DEBUG_POOL(cmd->pool, 0, "NS=[%s]", conf->ns_name); /* Prepare COMPRESSION / ACL CONF when we get ns name */ namespace_info_t* ns_info; GError *local_error = NULL; ns_info = get_namespace_info(conf->ns_name, &local_error); if (!ns_info) { DAV_DEBUG_POOL(cmd->temp_pool, 0, "Failed to get namespace info from ns [%s]", conf->ns_name); return apr_pstrcat(cmd->temp_pool, "Failed to get namespace info from ns: ", conf->ns_name, " ", local_error->message, NULL); } if (!conf->rainx_conf_lock) { apr_thread_mutex_create(&(conf->rainx_conf_lock), APR_THREAD_MUTEX_DEFAULT, conf->pool); } apr_thread_mutex_lock(conf->rainx_conf_lock); conf->rainx_conf = apr_palloc(cmd->pool, sizeof(rawx_conf_t)); char * stgpol = NULL; stgpol = namespace_storage_policy(ns_info, ns_info->name); if(NULL != stgpol) { conf->rainx_conf->sp = storage_policy_init(ns_info, stgpol); } else { conf->rainx_conf->sp = NULL; } conf->rainx_conf->ni = ns_info; conf->rainx_conf->acl = _get_acl(cmd->pool, ns_info); conf->rainx_conf->last_update = time(0); apr_thread_mutex_unlock(conf->rainx_conf_lock); if(local_error) g_clear_error(&local_error); return NULL; }
static const char * dav_rainx_cmd_gridconfig_hash_depth(cmd_parms *cmd, void *config, const char *arg1) { dav_rainx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); conf->hash_depth = atoi(arg1); DAV_DEBUG_POOL(cmd->pool, 0, "hash_depth=[%d]", conf->hash_depth); return NULL; }
static void rainx_hook_child_init(apr_pool_t *pchild, server_rec *s) { apr_status_t status; dav_rainx_server_conf *conf; DAV_XDEBUG_POOL(pchild, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(s->module_config, &dav_rainx_module); conf->cleanup = _stat_cleanup_child; status = server_init_child_stat(conf, pchild, pchild); if (APR_SUCCESS != status) DAV_ERROR_POOL(pchild, 0, "Failed to attach the RAWX statistics support"); conf->cleanup = _stat_cleanup_child; }
static const char * dav_rawx_cmd_gridconfig_fsync_dir(cmd_parms *cmd, void *config, const char *arg1) { dav_rawx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rawx_module); if (_str_to_boolean(arg1)) conf->fsync_on_close |= FSYNC_ON_CHUNK_DIR; else conf->fsync_on_close &= ~FSYNC_ON_CHUNK_DIR; return NULL; }
static const char * dav_rainx_cmd_gridconfig_acl(cmd_parms *cmd, void *config, const char *arg1) { dav_rainx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); conf->enabled_acl = 0; conf->enabled_acl |= (0 == apr_strnatcasecmp(arg1,"on")); conf->enabled_acl |= (0 == apr_strnatcasecmp(arg1,"true")); conf->enabled_acl |= (0 == apr_strnatcasecmp(arg1,"yes")); conf->enabled_acl |= (0 == apr_strnatcasecmp(arg1,"enabled")); return NULL; }
static void * dav_rawx_create_server_config(apr_pool_t *p, server_rec *s) { dav_rawx_server_conf *conf = NULL; (void) s; DAV_XDEBUG_POOL(p, 0, "%s()", __FUNCTION__); conf = apr_pcalloc(p, sizeof(dav_rawx_server_conf)); conf->pool = p; conf->cleanup = NULL; conf->hash_depth = conf->hash_width = 2; conf->fsync_on_close = FSYNC_ON_CHUNK; conf->FILE_buffer_size = 0; return conf; }
static dav_error * dav_rawx_seek_stream(dav_stream *stream, apr_off_t abs_pos) { DAV_XDEBUG_POOL(stream->p, 0, "%s(%s)", __FUNCTION__, stream->pathname); TRACE("Seek stream: START please contact CDR if you get this TRACE"); if (fseek(stream->f, abs_pos, SEEK_SET) != 0) { /* ### should check whether apr_file_seek set abs_pos was set to the * correct position? */ /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "Could not seek to specified position in the " "resource."); } return NULL; }
apr_status_t server_init_master_stat(dav_rainx_server_conf *conf, apr_pool_t *pool, apr_pool_t *plog) { char buff[256]; apr_status_t rc; DAV_XDEBUG_POOL(plog, 0, "%s()", __FUNCTION__); /* Create and attach the segment */ rc = apr_shm_create(&(conf->shm.handle), sizeof(struct shm_stats_s), conf->shm.path, pool); if (APR_SUCCESS != rc) { DAV_ERROR_POOL(plog, 0, "%s : Cannot create a SHM segment at [%s] rc=%d : %s", __FUNCTION__, conf->shm.path, rc, apr_strerror(rc, buff, sizeof(buff))); conf->shm.handle = NULL; return rc; } DAV_DEBUG_POOL(plog, 0, "%s : SHM segment created at [%s]", __FUNCTION__, conf->shm.path); /* Create a processus lock*/ rc = apr_global_mutex_create(&(conf->lock.handle), conf->lock.path, APR_LOCK_DEFAULT, pool); if (rc != APR_SUCCESS) { DAV_ERROR_POOL(plog, 0, "%s : Cannot create a global_mutex at [%s] rc=%d : %s", __FUNCTION__, conf->lock.path, rc, apr_strerror(rc, buff, sizeof(buff))); (void) apr_shm_destroy(conf->shm.handle); conf->shm.handle = NULL; return rc; } DAV_DEBUG_POOL(plog, 0, "%s : globalmutex created at [%s]", __FUNCTION__, conf->lock.path); /* Init the SHM */ void *ptr_counter = apr_shm_baseaddr_get(conf->shm.handle); if (ptr_counter) { bzero(ptr_counter, sizeof(struct shm_stats_s)); /* init rrd's */ rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_duration)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_put_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_put_duration)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_get_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_get_duration)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_del_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_del_duration)); } return APR_SUCCESS; }
static const char * dav_rawx_cmd_gridconfig_dirrun(cmd_parms *cmd, void *config, const char *arg1) { dav_rawx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rawx_module); apr_snprintf(conf->shm.path, sizeof(conf->shm.path), "%s/httpd-shm.%d", arg1, getpid()); DAV_DEBUG_POOL(cmd->pool, 0, "mutex_key=[%s]", conf->shm.path); DAV_DEBUG_POOL(cmd->pool, 0, "shm_key=[%s]", conf->shm.path); return NULL; }
static void * dav_rainx_create_server_config(apr_pool_t *p, server_rec *s) { dav_rainx_server_conf *conf = NULL; (void) s; DAV_XDEBUG_POOL(p, 0, "%s()", __FUNCTION__); conf = apr_pcalloc(p, sizeof(dav_rainx_server_conf)); conf->pool = p; conf->cleanup = NULL; conf->socket_timeout = RAINX_DEFAULT_SOCKET_TIMEOUT; apr_snprintf(conf->lock.path, sizeof(conf->lock.path), "/var/run/httpd-lock.%d", getpid()); apr_snprintf(conf->shm.path, sizeof(conf->shm.path), "/var/run/httpd-shm.%d", getpid()); return conf; }
static const char * dav_rainx_cmd_gridconfig_upblock(cmd_parms *cmd, void *config, const char *arg1) { dav_rainx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); if (arg1 && *arg1) { conf->FILE_buffer_size = atoi(arg1); if (conf->FILE_buffer_size > 0 && conf->FILE_buffer_size < 8192) conf->FILE_buffer_size = 8192; else if (conf->FILE_buffer_size > 131072) conf->FILE_buffer_size = 131072; } return NULL; }
void server_master_stat_fini(dav_rawx_server_conf *conf, apr_pool_t *plog) { DAV_XDEBUG_POOL(plog, 0, "%s()", __FUNCTION__); if (conf->lock.handle) { DAV_DEBUG_POOL(plog, 0, "%s: Destroying the globalmutex at [%s]", __FUNCTION__, conf->shm.path); if (APR_SUCCESS != apr_global_mutex_destroy(conf->lock.handle)) { DAV_ERROR_POOL(plog, 0, "Failed to destroy the global_mutex"); } conf->lock.handle = NULL; } if (conf->shm.handle) { DAV_DEBUG_POOL(plog, 0, "%s: Detaching the SHM segment at [%s]", __FUNCTION__, conf->shm.path); if (APR_SUCCESS != apr_shm_detach(conf->shm.handle)) { DAV_ERROR_POOL(plog, 0, "Failed to detach the SHM segment"); } conf->shm.handle = NULL; } }
static const char * __gen_stats(const dav_resource *resource, apr_pool_t *pool) { DAV_XDEBUG_POOL(pool, 0, "%s()", __FUNCTION__); dav_rainx_server_conf *c = resource_get_server_config(resource); apr_global_mutex_lock(c->lock.handle); struct shm_stats_s *stats = apr_shm_baseaddr_get(c->shm.handle); apr_global_mutex_unlock(c->lock.handle); return apr_pstrcat(pool, STR_KV(time_all, "counter req.time"), STR_KV(time_put, "counter req.time.put"), STR_KV(time_get, "counter req.time.get"), STR_KV(time_del, "counter req.time.del"), STR_KV(time_stat, "counter req.time.stat"), STR_KV(time_info, "counter req.time.info"), STR_KV(time_raw, "counter req.time.raw"), STR_KV(time_other, "counter req.time.other"), STR_KV(req_all, "counter req.hits"), STR_KV(req_chunk_put, "counter req.hits.put"), STR_KV(req_chunk_get, "counter req.hits.get"), STR_KV(req_chunk_del, "counter req.hits.del"), STR_KV(req_stat, "counter req.hits.stat"), STR_KV(req_info, "counter req.hits.info"), STR_KV(req_raw, "counter req.hits.raw"), STR_KV(req_other, "counter req.hits.other"), STR_KV(rep_2XX, "counter rep.hits.2xx"), STR_KV(rep_4XX, "counter rep.hits.4xx"), STR_KV(rep_5XX, "counter rep.hits.5xx"), STR_KV(rep_other, "counter rep.hits.other"), STR_KV(rep_403, "counter rep.hits.403"), STR_KV(rep_404, "counter rep.hits.404"), STR_KV(rep_bread, "counter rep.bread"), STR_KV(rep_bwritten, "counter rep.bwritten"), NULL); }
apr_status_t server_child_stat_fini(dav_rawx_server_conf *conf, apr_pool_t *plog) { char buff[256]; apr_status_t rc; DAV_XDEBUG_POOL(plog, 0, "%s()", __FUNCTION__); /* Detaches the segment */ if (conf->shm.handle) { rc = apr_shm_detach(conf->shm.handle); if (APR_SUCCESS != rc) { DAV_ERROR_POOL(plog, 0, "Failed to detach SHM segment at [%s] rc=%d : %s", conf->shm.path, rc, apr_strerror(rc, buff, sizeof(buff))); return rc; } conf->shm.handle = NULL; } DAV_DEBUG_POOL(plog, 0, "%s: SHM segment at [%s] detached", __FUNCTION__, conf->shm.path); return APR_SUCCESS; }
static void * dav_rainx_create_server_config(apr_pool_t *p, server_rec *s) { dav_rainx_server_conf *conf = NULL; (void) s; DAV_XDEBUG_POOL(p, 0, "%s()", __FUNCTION__); conf = apr_pcalloc(p, sizeof(dav_rainx_server_conf)); conf->pool = p; conf->cleanup = NULL; conf->hash_depth = conf->hash_width = 2; conf->headers_scheme = HEADER_SCHEME_V1; conf->fsync_on_close = ~0; conf->FILE_buffer_size = 0; conf->socket_timeout = RAINX_DEFAULT_SOCKET_TIMEOUT; apr_snprintf(conf->lock.path, sizeof(conf->lock.path), "/var/run/httpd-lock.%d", getpid()); apr_snprintf(conf->shm.path, sizeof(conf->shm.path), "/var/run/httpd-shm.%d", getpid()); return conf; }
static const char * dav_rainx_cmd_gridconfig_headers(cmd_parms *cmd, void *config, const char *arg1) { dav_rainx_server_conf *conf; (void) config; DAV_XDEBUG_POOL(cmd->pool, 0, "%s()", __FUNCTION__); conf = ap_get_module_config(cmd->server->module_config, &dav_rainx_module); /* ensure a right default value */ conf->headers_scheme = HEADER_SCHEME_V1; if (0 == apr_strnatcasecmp(arg1,"1")) conf->headers_scheme = HEADER_SCHEME_V1; else if (0 == apr_strnatcasecmp(arg1, "2")) conf->headers_scheme = HEADER_SCHEME_V2; else if (0 == apr_strnatcasecmp(arg1, "both")) conf->headers_scheme = HEADER_SCHEME_V1 | HEADER_SCHEME_V2; else return apr_psprintf(cmd->pool, "Grid Headers scheme : invalid value [%s]", arg1); return NULL; }
static void * dav_rawx_merge_server_config(apr_pool_t *p, void *base, void *overrides) { dav_rawx_server_conf *child; dav_rawx_server_conf *newconf; DAV_XDEBUG_POOL(p, 0, "%s()", __FUNCTION__); (void) base; child = overrides; newconf = apr_pcalloc(p, sizeof(*newconf)); newconf->pool = p; newconf->cleanup = NULL; newconf->hash_depth = child->hash_depth; newconf->hash_width = child->hash_width; newconf->fsync_on_close = child->fsync_on_close; newconf->FILE_buffer_size = child->FILE_buffer_size; memcpy(newconf->docroot, child->docroot, sizeof(newconf->docroot)); memcpy(newconf->ns_name, child->ns_name, sizeof(newconf->ns_name)); update_rawx_conf(p, &(newconf->rawx_conf), newconf->ns_name); DAV_DEBUG_POOL(p, 0, "Configuration merged!"); return newconf; }
static int rawx_hook_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *server) { apr_status_t status; enum lock_state_e state; server_rec *s; server_addr_rec *a; dav_rawx_server_conf *conf; GError *gerr; int volume_validated = 0; (void) ptemp; DAV_XDEBUG_POOL(plog, 0, "%s(%lx)", __FUNCTION__, (long)server); if (__rawx_is_first_call(server)) { DAV_DEBUG_POOL(plog, 0, "First call detected"); return OK; } DAV_DEBUG_POOL(plog, 0, "Second call detected"); gerr = NULL; conf = ap_get_module_config(server->module_config, &dav_rawx_module); DAV_XDEBUG_POOL(plog, 0, "Checking the docroot XATTR lock for [%s]", conf->docroot); /* Runs the configured servers and check they do not serve * the grid docroot with an unauthorized IP:PORT couple */ for (s = server ; s ; s = s->next) { for (a = s->addrs ; a ; a = a->next) { apr_status_t status2; char *host = NULL, url[512]; if (gerr) g_clear_error(&gerr); if (a->host_port == 0) continue; host = NULL; status2 = apr_getnameinfo(&host, a->host_addr, NI_NUMERICSERV|NI_NUMERICHOST|NI_NOFQDN); if (status2 != APR_SUCCESS || host == NULL) { DAV_ERROR_POOL(plog, 0, "getnameinfo() failed : %d", status2); continue; } apr_snprintf(url, sizeof(url), "%s:%d", host, a->host_port); DAV_DEBUG_POOL(plog, 0, "xattr-lock : testing addr [%s]", url); state = rawx_get_volume_lock_state(conf->docroot, conf->ns_name, url, &gerr); switch (state) { case ERROR_LS: DAV_ERROR_POOL(plog, 0, "Failed to check the docroot ownership: %s", gerror_get_message(gerr)); goto label_error; case NOLOCK_LS: if (!rawx_lock_volume(conf->docroot, conf->ns_name, url, 0, &gerr)) { DAV_ERROR_POOL(plog, 0, "Failed to grab the docroot ownership: %s", gerror_get_message(gerr)); goto label_error; } DAV_DEBUG_POOL(plog, 0, "Docroot now owned"); volume_validated = ~0; break; case OWN_LS: DAV_DEBUG_POOL(plog, 0, "Docroot already owned by the current server"); if (!rawx_lock_volume(conf->docroot, conf->ns_name, url, RAWXLOCK_FLAG_OVERWRITE, &gerr)) DAV_ERROR_POOL(plog, 0, "Failed to complete the docroot ownership: %s", gerror_get_message(gerr)); volume_validated = ~0; break; case OTHER_LS: DAV_ERROR_POOL(plog, 0, "Another RAWX already used the docroot (see XATTR)" " : %s", gerror_get_message(gerr)); goto label_error; } } } if (gerr) g_clear_error(&gerr); if (!volume_validated) { DAV_ERROR_POOL(plog, 0, "No server found, could not validate the RAWX volume. " "Did you declare at least one VirtualHost ?"); goto label_error; } if (_create_shm_if_needed(conf->shm.path, server, plog) != APR_SUCCESS) { DAV_ERROR_POOL(plog, 0, "Failed to init the RAWX statistics support"); return DONE; } /* Init the stat support : doing this so late avoids letting orphan * SHM segments in the nature in case of previous errors */ status = server_init_master_stat(conf, pconf, plog); if (APR_SUCCESS != status) { DAV_ERROR_POOL(plog, 0, "Failed to init the RAWX statistics support"); return DONE; } else { /* This will be overwritten by the child_init */ conf->cleanup = _stat_cleanup_master; apr_pool_userdata_set(conf, apr_psprintf(pconf, "RAWX-config-to-be-cleaned-%d", i++), _stat_cleanup_to_register, pconf); } return OK; label_error: if (gerr) g_clear_error(&gerr); return DONE; }
static int rainx_hook_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *server) { apr_status_t status; server_rec *s; server_addr_rec *a; dav_rainx_server_conf *conf; GError *gerr; (void) ptemp; DAV_XDEBUG_POOL(plog, 0, "%s(%lx)", __FUNCTION__, (long)server); if (__rainx_is_first_call(server)) { DAV_DEBUG_POOL(plog, 0, "First call detected"); return OK; } DAV_DEBUG_POOL(plog, 0, "Second call detected"); gerr = NULL; conf = ap_get_module_config(server->module_config, &dav_rainx_module); /* perform some options consistency checks */ if (!(conf->headers_scheme & HEADER_SCHEME_V1) && !(conf->headers_scheme & HEADER_SCHEME_V2)) { DAV_ERROR_POOL(plog, 0, "You cannot disable both V1 and V2 header scheme"); return DONE; } DAV_XDEBUG_POOL(plog, 0, "Checking the docroot XATTR lock for [%s]", conf->docroot); /* Runs the configured servers and check they do not serve * the grid docroot with an unauthorized IP:PORT couple */ for (s = server ; s ; s = s->next) { for (a = s->addrs ; a ; a = a->next) { char *host = NULL, url[512]; if (gerr) g_clear_error(&gerr); if (a->host_port == 0) continue; host = NULL; status = apr_getnameinfo(&host, a->host_addr, NI_NUMERICSERV|NI_NUMERICHOST|NI_NOFQDN); if (status != APR_SUCCESS || host == NULL) { DAV_ERROR_POOL(plog, 0, "getnameinfo() failed : %d", status); continue; } apr_snprintf(url, sizeof(url), "%s:%d", host, a->host_port); DAV_DEBUG_POOL(plog, 0, "xattr-lock : testing addr [%s]", url); } } if (gerr) g_clear_error(&gerr); /* Init the stat support : doing this so late avoids letting orphan * SHM segments in the nature in case of previous errors */ status = server_init_master_stat(conf, pconf, plog); if (APR_SUCCESS != status) { DAV_ERROR_POOL(plog, 0, "Failed to init the RAINX statistics support"); return DONE; } else { /* This will be overwritten by the child_init */ conf->cleanup = _stat_cleanup_master; apr_pool_userdata_set(conf, apr_psprintf(pconf, "RAINX-config-to-be-cleaned-%d", i++), _stat_cleanup_to_register, pconf); } return OK; }
static const char * __gen_stats(const dav_resource *resource, apr_pool_t *pool) { struct shm_stats_s *stats = NULL; DAV_XDEBUG_POOL(pool, 0, "%s()", __FUNCTION__); memset(&stats, 0, sizeof(stats)); dav_rainx_server_conf *c = NULL; c = resource_get_server_config(resource); apr_global_mutex_lock(c->lock.handle); stats = apr_shm_baseaddr_get(c->shm.handle); apr_global_mutex_unlock(c->lock.handle); apr_uint64_t req = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_sec), 4); apr_uint64_t reqavgtime = rainx_stats_rrd_get_delta(&(stats->body.rrd_duration), 4); apr_uint64_t req_put = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_put_sec), 4); apr_uint64_t reqavgtime_put = rainx_stats_rrd_get_delta(&(stats->body.rrd_put_duration), 4); apr_uint64_t req_get = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_get_sec), 4); apr_uint64_t reqavgtime_get = rainx_stats_rrd_get_delta(&(stats->body.rrd_get_duration), 4); apr_uint64_t req_del = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_del_sec), 4); apr_uint64_t reqavgtime_del = rainx_stats_rrd_get_delta(&(stats->body.rrd_del_duration), 4); apr_uint64_t r_time = 0, r_put_time = 0, r_get_time = 0, r_del_time = 0; if(req > 0) r_time = reqavgtime / req; if(req_put > 0) r_put_time = reqavgtime_put / req_put; if(req_get > 0) r_get_time = reqavgtime_get / req_get; if(req_del > 0) r_del_time = reqavgtime_del / req_del; double r_rate = 0, r_put_rate = 0, r_get_rate = 0, r_del_rate = 0; r_rate = (double)req / 4; r_put_rate = (double)req_put / 4; r_get_rate = (double)req_get / 4; r_del_rate = (double)req_del / 4; return apr_pstrcat(pool, STR_KV(req_all, "req.all"), STR_KV(req_chunk_put, "req.put"), STR_KV(req_chunk_get, "req.get"), STR_KV(req_chunk_del, "req.del"), STR_KV(req_stat, "req.stat"), STR_KV(req_info, "req.info"), STR_KV(req_raw, "req.raw"), STR_KV(req_other, "req.other"), STR_KV(rep_2XX, "rep.2xx"), STR_KV(rep_4XX, "rep.4xx"), STR_KV(rep_5XX, "rep.5xx"), STR_KV(rep_other, "rep.other"), STR_KV(rep_403, "rep.403"), STR_KV(rep_404, "rep.404"), STR_KV(rep_bread, "rep.bread"), STR_KV(rep_bwritten, "rep.bwritten"), apr_psprintf(pool, "rainx.reqpersec %f\n", r_rate), apr_psprintf(pool, "rainx.avreqtime %"APR_UINT64_T_FMT"\n", r_time), apr_psprintf(pool, "rainx.reqputpersec %f\n", r_put_rate), apr_psprintf(pool, "rainx.avputreqtime %"APR_UINT64_T_FMT"\n", r_put_time), apr_psprintf(pool, "rainx.reqgetpersec %f\n", r_get_rate), apr_psprintf(pool, "rainx.avgetreqtime %"APR_UINT64_T_FMT"\n", r_get_time), apr_psprintf(pool, "rainx.reqdelpersec %f\n", r_del_rate), apr_psprintf(pool, "rainx.avdelreqtime %"APR_UINT64_T_FMT"\n", r_del_time), NULL); }
static dav_error * dav_rawx_write_stream(dav_stream *stream, const void *buf, apr_size_t bufsize) { DAV_XDEBUG_POOL(stream->p, 0, "%s(%s)", __FUNCTION__, stream->pathname); guint written = 0; gulong checksum = stream->compress_checksum; while (written < bufsize) { memcpy(stream->buffer + stream->bufsize, buf + written, MIN(bufsize - written, stream->blocksize - stream->bufsize)); guint tmp = MIN(bufsize - written, stream->blocksize - stream->bufsize); written += tmp; stream->bufsize += tmp; /* If buffer full, compress if needed and write to distant file */ if (stream->blocksize - stream->bufsize <=0){ gsize nb_write = 0; if (!stream->compression) { nb_write = fwrite(stream->buffer, stream->bufsize, 1, stream->f); if (nb_write != 1) { /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "An error occurred while writing to a " "resource."); } } else { GByteArray *gba = g_byte_array_new(); if (stream->comp_ctx.data_compressor(stream->buffer, stream->bufsize, gba, &checksum)!=0) { if (gba) g_byte_array_free(gba, TRUE); /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "An error occurred while compressing data."); } nb_write = fwrite(gba->data, gba->len, 1, stream->f); if (nb_write != 1) { if (gba) g_byte_array_free(gba, TRUE); /* ### use something besides 500? */ return server_create_and_stat_error(resource_get_server_config(stream->r), stream->p, HTTP_INTERNAL_SERVER_ERROR, 0, "An error occurred while writing to a " "resource."); } stream->compressed_size += gba->len; if (gba) g_byte_array_free(gba, TRUE); } stream->buffer = apr_pcalloc(stream->p, stream->blocksize); stream->bufsize = 0; } } stream->compress_checksum = checksum; /* update the hash and the stats */ g_checksum_update(stream->md5, buf, bufsize); /* update total_size */ stream->total_size += bufsize; return NULL; }