static int set_status_to_api_cache_shm( request_rec *r, const char* key, int status) { int i; int st; apr_status_t ret; if (status < 0) { return 0; /* no record set or update */ } if (!api_cache_shm) { return -1; /* some problem on shm init!! */ } ret = apr_global_mutex_lock(global_lock); if (ret != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, ret, r, "apr_global_mutex_lock failed!"); return -1; /* some problem on mutex */ } if (get_status_from_api_cache_shm(r, key, &st) == 1) { return 0; /* no record set or update */ } for (i = 0; i < NUM_BUCKETS; i++) { if (key_verified_infos[i].status==-1) { apr_cpystrn( key_verified_infos[i].key, key, strlen(key)+1); key_verified_infos[i].status = status; break; } } ret = apr_global_mutex_unlock(global_lock); if (ret != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, ret, r, "apr_global_mutex_unlock failed!"); return -1; } return 1; /* good */ }
static void lock_grab(test_mode_e test_mode) { if (test_mode == TEST_PROC) { assert(apr_proc_mutex_lock(proc_mutex) == APR_SUCCESS); } else { assert(apr_global_mutex_lock(global_mutex) == APR_SUCCESS); } }
void mapcache_util_mutex_aquire(mapcache_context *gctx) { int ret; mapcache_context_apache_request *ctx = (mapcache_context_apache_request*)gctx; mapcache_server_cfg *cfg = ap_get_module_config(ctx->request->server->module_config, &mapcache_module); ret = apr_global_mutex_lock(cfg->mutex); if(ret != APR_SUCCESS) { gctx->set_error(gctx,500,"failed to lock mutex"); return; } apr_pool_cleanup_register(gctx->pool, cfg->mutex, (void*)apr_global_mutex_unlock, apr_pool_cleanup_null); }
int zcache_mutex_on(MCConfigRecord *mc) { apr_status_t rv; if (mc->nMutexMode == ZCACHE_MUTEXMODE_NONE) return TRUE; if ((rv = apr_global_mutex_lock(mc->pMutex)) != APR_SUCCESS) { /* ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, "Failed to acquire global mutex lock");*/ return FALSE; } return TRUE; }
int ssl_mutex_on(server_rec *s) { SSLModConfigRec *mc = myModConfig(s); apr_status_t rv; if (mc->nMutexMode == SSL_MUTEXMODE_NONE) return TRUE; if ((rv = apr_global_mutex_lock(mc->pMutex)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, "Failed to acquire global mutex lock"); return FALSE; } return TRUE; }
static int psgi_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { dTHX; const void *key; char *file; SV *app; apr_hash_index_t *hi; void *data; const char *userdata_key = "psgi_post_config"; psgi_apps_t *psgi_apps = NULL; apr_status_t rc; apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (data == NULL) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); return OK; } ap_add_version_component(pconf, apr_psprintf(pconf, "mod_psgi/%s", MOD_PSGI_VERSION)); mutex_name = apr_psprintf(pconf, "/tmp/psgi_mutex.%ld", (long int) getpid()); rc = apr_global_mutex_create(&psgi_mutex, (const char *) mutex_name, APR_LOCK_DEFAULT, pconf); if (rc != APR_SUCCESS) { return DECLINED; } rc = apr_global_mutex_lock(psgi_mutex); if (rc != APR_SUCCESS) { return DECLINED; } /* shared name to store apps */ shm_name = apr_pstrdup(pconf, "/tmp/psgi_shm"); rc = apr_shm_attach(&psgi_shm, (const char *) shm_name, pconf); if (rc != APR_SUCCESS) { rc = apr_shm_create(&psgi_shm, sizeof(psgi_apps_t), (const char *) shm_name, pconf); } if (rc == APR_SUCCESS) { psgi_apps = (psgi_apps_t *)apr_shm_baseaddr_get(psgi_shm); psgi_apps->apps = apr_hash_make(pconf); } apr_global_mutex_unlock(psgi_mutex); return OK; }
/* * get a value from the shared memory cache */ static apr_byte_t oidc_cache_shm_get(request_rec *r, const char *key, const char **value) { ap_log_rerror(APLOG_MARK, OIDC_DEBUG, 0, r, "oidc_cache_shm_get: entering \"%s\"", key); oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); oidc_cache_cfg_shm_t *context = (oidc_cache_cfg_shm_t *)cfg->cache_cfg; apr_status_t rv; int i; *value = NULL; /* grab the global lock */ if ((rv = apr_global_mutex_lock(context->mutex)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "oidc_cache_shm_get: apr_global_mutex_lock() failed [%d]", rv); return FALSE; } /* get the pointer to the start of the shared memory block */ oidc_cache_shm_entry_t *table = apr_shm_baseaddr_get(context->shm); /* loop over the block, looking for the key */ for (i = 0; i < cfg->cache_shm_size_max; i++) { const char *tablekey = table[i].key; if (tablekey == NULL) continue; if (strcmp(tablekey, key) == 0) { /* found a match, check if it has expired */ if (table[i].expires > apr_time_now()) { /* update access timestamp */ table[i].access = apr_time_now(); *value = table[i].value; } } } /* release the global lock */ apr_global_mutex_unlock(context->mutex); return (*value == NULL) ? FALSE : TRUE; }
static void create_shm(server_rec *s,apr_pool_t *p) { int threaded_mpm; ap_mpm_query(AP_MPMQ_IS_THREADED, &threaded_mpm); //if (threaded_mpm) { tmpnam(lock_name); apr_global_mutex_create(&lock, lock_name, APR_THREAD_MUTEX_DEFAULT, p); //DEBUGLOG("threaded!"); // } size_t size; size = sizeof(client_list_t) + table_size * sizeof(client_t); ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL, "Create or Joining shmem. name: %s, size: %zd", shmname, size); if(lock) apr_global_mutex_lock(lock); apr_status_t rc = apr_shm_attach(&shm, shmname, p); if (APR_SUCCESS != rc) { DEBUGLOG("dosdetector: Creating shared memory"); apr_shm_remove(shmname, p); rc = apr_shm_create(&shm, size, shmname, p); if (APR_SUCCESS != rc) { ap_log_error(APLOG_MARK, APLOG_ERR, 0,0, "dosdetector: failed to create shared memory %s\n", shmname); //ap_log_error(APLOG_MARK, APLOG_ERR, 0,0, "dosdetector: %s:%d: failed to create shared memory %s\n", __FILE__, __LINE__, shmname); } else { client_list = apr_shm_baseaddr_get(shm); memset(client_list, 0, size); } } else { DEBUGLOG("dosdetector: Joining shared memory"); client_list = apr_shm_baseaddr_get(shm); } apr_shm_remove(shmname, p); // Just to set destroy flag. client_list->head = client_list->base; client_t *c = client_list->base; int i; for (i = 1; i < table_size; i++) { c->next = (c + 1); c++; } c->next = NULL; if (lock) apr_global_mutex_unlock(lock); }
local void domaintree_cache_set(MDT_CNF *DT, apr_time_t atime, const char *host, const char *path) { apr_pool_t *pool; dircache_entry_t *cache_entry; apr_pool_create(&pool, DT->dircache.pool); cache_entry = apr_palloc(pool, sizeof(dircache_entry_t)); cache_entry->pool = pool; cache_entry->lacc = atime; cache_entry->host = apr_pstrdup(pool, host); cache_entry->path = apr_pstrdup(pool, path); apr_global_mutex_lock(DT->dircache.lock); if (apr_hash_count(DT->dircache.hmap) >= DT->dircache.clim) { apr_hash_index_t *idx; dircache_entry_t *purge_this = NULL; ap_log_error(DT_LOG_WRN "reached cache limit (%ld)", DT->dircache.clim); for (idx = apr_hash_first(DT->dircache.pool, DT->dircache.hmap); idx; idx = apr_hash_next(idx)) { dircache_entry_t *current; apr_hash_this(idx, NULL, NULL, (void *) ¤t); if ((!purge_this) || (purge_this->lacc > current->lacc)) { purge_this = current; } } if (purge_this) { ap_log_error(DT_LOG_DBG "cache del = %s", purge_this->host); apr_hash_set(DT->dircache.hmap, purge_this->host, APR_HASH_KEY_STRING, NULL); apr_pool_destroy(purge_this->pool); } } apr_hash_set(DT->dircache.hmap, cache_entry->host, APR_HASH_KEY_STRING, cache_entry); apr_global_mutex_unlock(DT->dircache.lock); ap_log_error(DT_LOG_DBG "cache set = %s for %s", path, host); }
bool IterateOverAPRGlobalStorage (APRGlobalStorage *storage_p, ap_socache_iterator_t *iterator_p, void *data_p) { bool did_all_elements_flag = true; apr_status_t status = apr_global_mutex_lock (storage_p -> ags_mutex_p); if (status == APR_SUCCESS) { status = storage_p -> ags_socache_provider_p -> iterate (storage_p -> ags_socache_instance_p, storage_p -> ags_server_p, data_p, iterator_p, storage_p -> ags_pool_p); if (status != APR_SUCCESS) { did_all_elements_flag = false; } status = apr_global_mutex_unlock (storage_p -> ags_mutex_p); } /* if (status == APR_SUCCESS) */ return did_all_elements_flag; }
local char *domaintree_cache_get(MDT_CNF *DT, apr_time_t atime, const char *host) { char *path = NULL; dircache_entry_t *cache_entry; apr_global_mutex_lock(DT->dircache.lock); if ((cache_entry = apr_hash_get(DT->dircache.hmap, host, APR_HASH_KEY_STRING))) { cache_entry->lacc = atime; path = cache_entry->path; } apr_global_mutex_unlock(DT->dircache.lock); if (path) { ap_log_error(DT_LOG_DBG "cache hit = %s for %s", path, host); } return path; }
static const char * __gen_stats(const dav_resource *resource, apr_pool_t *pool) { DAV_XDEBUG_POOL(pool, 0, "%s()", __FUNCTION__); dav_rainx_server_conf *c = resource_get_server_config(resource); apr_global_mutex_lock(c->lock.handle); struct shm_stats_s *stats = apr_shm_baseaddr_get(c->shm.handle); apr_global_mutex_unlock(c->lock.handle); return apr_pstrcat(pool, STR_KV(time_all, "counter req.time"), STR_KV(time_put, "counter req.time.put"), STR_KV(time_get, "counter req.time.get"), STR_KV(time_del, "counter req.time.del"), STR_KV(time_stat, "counter req.time.stat"), STR_KV(time_info, "counter req.time.info"), STR_KV(time_raw, "counter req.time.raw"), STR_KV(time_other, "counter req.time.other"), STR_KV(req_all, "counter req.hits"), STR_KV(req_chunk_put, "counter req.hits.put"), STR_KV(req_chunk_get, "counter req.hits.get"), STR_KV(req_chunk_del, "counter req.hits.del"), STR_KV(req_stat, "counter req.hits.stat"), STR_KV(req_info, "counter req.hits.info"), STR_KV(req_raw, "counter req.hits.raw"), STR_KV(req_other, "counter req.hits.other"), STR_KV(rep_2XX, "counter rep.hits.2xx"), STR_KV(rep_4XX, "counter rep.hits.4xx"), STR_KV(rep_5XX, "counter rep.hits.5xx"), STR_KV(rep_other, "counter rep.hits.other"), STR_KV(rep_403, "counter rep.hits.403"), STR_KV(rep_404, "counter rep.hits.404"), STR_KV(rep_bread, "counter rep.bread"), STR_KV(rep_bwritten, "counter rep.bwritten"), NULL); }
apr_status_t procmgr_send_spawn_cmd(fcgid_command * command, request_rec * r) { apr_status_t rv; char notifybyte; apr_size_t nbytes = sizeof(*command); /* Get the global mutex before posting the request */ if ((rv = apr_global_mutex_lock(g_pipelock)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, "mod_fcgid: can't get pipe mutex"); exit(0); } if ((rv = apr_file_write_full(g_ap_write_pipe, command, nbytes, NULL)) != APR_SUCCESS) { /* Just print some error log and fall through */ ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, "mod_fcgid: can't write spawn command"); } else { /* Wait the finish notify while send the request successfully */ nbytes = sizeof(notifybyte); if ((rv = apr_file_read(g_ap_read_pipe, ¬ifybyte, &nbytes)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, "mod_fcgid: can't get notify from process manager"); } } /* Release the lock */ if ((rv = apr_global_mutex_unlock(g_pipelock)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_EMERG, rv, r, "mod_fcgid: can't release pipe mutex"); exit(0); } return APR_SUCCESS; }
static void *FindObjectFromAPRGlobalStorage (APRGlobalStorage *storage_p, const void *raw_key_p, unsigned int raw_key_length, const bool remove_flag) { void *result_p = NULL; unsigned int key_len = 0; unsigned char *key_p = NULL; if (storage_p -> ags_make_key_fn) { key_p = storage_p -> ags_make_key_fn (raw_key_p, raw_key_length, &key_len); } else { key_p = (unsigned char *) raw_key_p; key_len = raw_key_length; } if (key_p) { apr_status_t status; bool alloc_key_flag = false; char *key_s = GetKeyAsValidString ((char *) key_p, key_len, &alloc_key_flag); #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINEST PrintLog (STM_LEVEL_FINEST, __FILE__, __LINE__,"Made key: %s", key_s); #endif status = apr_global_mutex_lock (storage_p -> ags_mutex_p); if (status == APR_SUCCESS) { unsigned char *temp_p = NULL; unsigned int array_size = 0; #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINEST PrintLog (STM_LEVEL_FINEST, __FILE__, __LINE__,"Locked mutex"); #endif if (GetLargestEntrySize (storage_p, &array_size)) { /* We don't know how big the value might be so allocate the largest value that we've seen so far */ temp_p = (unsigned char *) AllocMemoryArray (array_size, sizeof (unsigned char)); if (temp_p) { /* get the value */ status = storage_p -> ags_socache_provider_p -> retrieve (storage_p -> ags_socache_instance_p, storage_p -> ags_server_p, key_p, key_len, temp_p, &array_size, storage_p -> ags_pool_p); #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINEST PrintLog (STM_LEVEL_FINEST, __FILE__, __LINE__,"status %d key %s length %u result_p %0.16X remove_flag %d", status, key_s, key_len, temp_p, remove_flag); #endif if (status == APR_SUCCESS) { result_p = temp_p; if (remove_flag == true) { status = storage_p -> ags_socache_provider_p -> remove (storage_p -> ags_socache_instance_p, storage_p -> ags_server_p, key_p, key_len, storage_p -> ags_pool_p); #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINEST PrintLog (STM_LEVEL_FINEST, __FILE__, __LINE__,"status after removal %d", status); #endif } } else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__,"status %d key %s result_p %0.16X remove_flag %d", status, key_s, temp_p, remove_flag); FreeMemory (temp_p); } } /* if (temp_p) */ } /* if (GetLargestEntrySize (storage_p, &array_size)) */ else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__,"Failed to get largest entry size when adding \"%s\"", key_s); } } /* if (status == APR_SUCCESS) */ else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to lock mutex for %s, status %s", key_s, status); } status = apr_global_mutex_unlock (storage_p -> ags_mutex_p); if (status != APR_SUCCESS) { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to unlock mutex for %s, status %s after finding %s", key_s, status); } /* if (status != APR_SUCCESS) */ if (key_p != raw_key_p) { FreeMemory (key_p); } if (alloc_key_flag) { FreeCopiedString (key_s); } } /* if (key_p) */ #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINEST PrintAPRGlobalStorage (storage_p); #endif return result_p; }
bool AddObjectToAPRGlobalStorage (APRGlobalStorage *storage_p, const void *raw_key_p, unsigned int raw_key_length, unsigned char *value_p, unsigned int value_length) { bool success_flag = false; unsigned int key_len = 0; unsigned char *key_p = NULL; if (storage_p -> ags_make_key_fn) { key_p = storage_p -> ags_make_key_fn (raw_key_p, raw_key_length, &key_len); } else { key_p = (unsigned char *) raw_key_p; key_len = raw_key_length; } if (key_p) { bool alloc_key_flag = false; char *key_s = GetKeyAsValidString ((char *) key_p, key_len, &alloc_key_flag); apr_status_t status = apr_global_mutex_lock (storage_p -> ags_mutex_p); if (status == APR_SUCCESS) { apr_time_t end_of_time = APR_INT64_MAX; /* store it */ status = storage_p -> ags_socache_provider_p -> store (storage_p -> ags_socache_instance_p, storage_p -> ags_server_p, key_p, key_len, end_of_time, value_p, value_length, storage_p -> ags_pool_p); if (status == APR_SUCCESS) { success_flag = true; if (!SetLargestEntrySize (storage_p, value_length)) { PrintErrors (STM_LEVEL_FINE, __FILE__, __LINE__, "Failed to possibly set largest entry size to %u", value_length); } #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINE PrintLog (STM_LEVEL_FINE, __FILE__, __LINE__, "Added \"%s\" length %u, value %.16X length %u to global store", key_s, key_len, value_p, value_length); #endif } else { PrintErrors (STM_LEVEL_FINE, __FILE__, __LINE__, "Failed to add \"%s\" length %u, value %.16X length %u to global store", key_s, key_len, value_p, value_length); } status = apr_global_mutex_unlock (storage_p -> ags_mutex_p); if (status != APR_SUCCESS) { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to unlock mutex, status %s after adding %s", status, key_s); } /* if (status != APR_SUCCESS) */ } /* if (status == APR_SUCCESS) */ else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to lock mutex, status %s to add %s", status, key_s); } if (alloc_key_flag) { FreeCopiedString (key_s); } /* * If the key_p isn't pointing to the same address * as raw_key_p it must be new, so delete it. */ if (key_p != raw_key_p) { FreeMemory (key_p); } } /* if (key_p) */ else { PrintErrors (STM_LEVEL_SEVERE, __FILE__, __LINE__, "Failed to make key"); } #if APR_GLOBAL_STORAGE_DEBUG >= STM_LEVEL_FINEST PrintAPRGlobalStorage (storage_p); #endif return success_flag; }
/* This function locks the session table and locates a session entry. * Unlocks the table and returns NULL if the entry wasn't found. * If a entry was found, then you _must_ unlock it with am_cache_unlock * after you are done with it. * * Parameters: * server_rec *s The current server. * const char *key The session key or user * am_cache_key_t type AM_CACHE_SESSION or AM_CACHE_NAMEID * * Returns: * The session entry on success or NULL on failure. */ am_cache_entry_t *am_cache_lock(server_rec *s, am_cache_key_t type, const char *key) { am_mod_cfg_rec *mod_cfg; void *table; apr_size_t i; int rv; char buffer[512]; /* Check if we have a valid session key. We abort if we don't. */ if (key == NULL) return NULL; switch (type) { case AM_CACHE_SESSION: if (strlen(key) != AM_ID_LENGTH) return NULL; break; case AM_CACHE_NAMEID: break; default: return NULL; break; } mod_cfg = am_get_mod_cfg(s); /* Lock the table. */ if((rv = apr_global_mutex_lock(mod_cfg->lock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "apr_global_mutex_lock() failed [%d]: %s", rv, apr_strerror(rv, buffer, sizeof(buffer))); return NULL; } table = apr_shm_baseaddr_get(mod_cfg->cache); for(i = 0; i < mod_cfg->init_cache_size; i++) { am_cache_entry_t *e = am_cache_entry_ptr(mod_cfg, table, i); const char *tablekey; if (e->key[0] == '\0') { /* This entry is empty. Skip it. */ continue; } switch (type) { case AM_CACHE_SESSION: tablekey = e->key; break; case AM_CACHE_NAMEID: /* tablekey may be NULL */ tablekey = am_cache_env_fetch_first(e, "NAME_ID"); break; default: tablekey = NULL; break; } if (tablekey == NULL) continue; if(strcmp(tablekey, key) == 0) { /* We found the entry. */ if(e->expires > apr_time_now()) { /* And it hasn't expired. */ return e; } } } /* We didn't find a entry matching the key. Unlock the table and * return NULL; */ apr_global_mutex_unlock(mod_cfg->lock); return NULL; }
/* This function locks the session table and creates a new session entry. * It will first attempt to locate a free session. If it doesn't find a * free session, then it will take the least recentry used session. * * Remember to unlock the table with am_cache_unlock(...) afterwards. * * Parameters: * server_rec *s The current server. * const char *key The key of the session to allocate. * * Returns: * The new session entry on success. NULL if key is a invalid session * key. */ am_cache_entry_t *am_cache_new(server_rec *s, const char *key) { am_cache_entry_t *t; am_mod_cfg_rec *mod_cfg; void *table; apr_time_t current_time; int i; apr_time_t age; int rv; char buffer[512]; /* Check if we have a valid session key. We abort if we don't. */ if(key == NULL || strlen(key) != AM_ID_LENGTH) { return NULL; } mod_cfg = am_get_mod_cfg(s); /* Lock the table. */ if((rv = apr_global_mutex_lock(mod_cfg->lock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "apr_global_mutex_lock() failed [%d]: %s", rv, apr_strerror(rv, buffer, sizeof(buffer))); return NULL; } table = apr_shm_baseaddr_get(mod_cfg->cache); /* Get current time. If we find a entry with expires <= the current * time, then we can use it. */ current_time = apr_time_now(); /* We will use 't' to remember the best/oldest entry. We * initalize it to the first entry in the table to simplify the * following code (saves test for t == NULL). */ t = am_cache_entry_ptr(mod_cfg, table, 0);; /* Iterate over the session table. Update 't' to match the "best" * entry (the least recently used). 't' will point a free entry * if we find one. Otherwise, 't' will point to the least recently * used entry. */ for(i = 0; i < mod_cfg->init_cache_size; i++) { am_cache_entry_t *e = am_cache_entry_ptr(mod_cfg, table, i); if (e->key[0] == '\0') { /* This entry is free. Update 't' to this entry * and exit loop. */ t = e; break; } if (e->expires <= current_time) { /* This entry is expired, and is therefore free. * Update 't' and exit loop. */ t = e; break; } if (e->access < t->access) { /* This entry is older than 't' - update 't'. */ t = e; } } if(t->key[0] != '\0' && t->expires > current_time) { /* We dropped a LRU entry. Calculate the age in seconds. */ age = (current_time - t->access) / 1000000; if(age < 3600) { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, "Dropping LRU entry entry with age = %" APR_TIME_T_FMT "s, which is less than one hour. It may be a good" " idea to increase MellonCacheSize.", age); } } /* Now 't' points to the entry we are going to use. We initialize * it and returns it. */ strcpy(t->key, key); /* Far far into the future. */ t->expires = 0x7fffffffffffffffLL; t->logged_in = 0; t->size = 0; am_cache_storage_null(&t->user); am_cache_storage_null(&t->lasso_identity); am_cache_storage_null(&t->lasso_session); am_cache_storage_null(&t->lasso_saml_response); am_cache_entry_env_null(t); t->pool_size = am_cache_entry_pool_size(mod_cfg); t->pool[0] = '\0'; t->pool_used = 1; return t; }
void server_add_stat(dav_rainx_server_conf *conf, const char *n, apr_uint32_t value, apr_uint32_t duration) { EXTRA_ASSERT(NULL != conf->shm.handle); EXTRA_ASSERT(NULL != conf->lock.handle); EXTRA_ASSERT(n && n[0] && n[1]); apr_global_mutex_lock(conf->lock.handle); struct shm_stats_s *shm_stats = apr_shm_baseaddr_get(conf->shm.handle); apr_global_mutex_unlock(conf->lock.handle); if (!shm_stats) return; switch (*n) { case 'q': switch (n[1]) { case '0': apr_atomic_add32(&(shm_stats->body.req_all), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_all), duration); break; case '1': apr_atomic_add32(&(shm_stats->body.req_chunk_get), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_get), duration); break; case '2': apr_atomic_add32(&(shm_stats->body.req_chunk_put), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_put), duration); break; case '3': apr_atomic_add32(&(shm_stats->body.req_chunk_del), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_del), duration); break; case '4': apr_atomic_add32(&(shm_stats->body.req_stat), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_stat), value); break; case '5': apr_atomic_add32(&(shm_stats->body.req_info), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_info), value); break; case '6': apr_atomic_add32(&(shm_stats->body.req_raw), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_raw), value); break; case '7': apr_atomic_add32(&(shm_stats->body.req_other), value); if (duration > 0) apr_atomic_add32(&(shm_stats->body.time_other), value); break; } break; case 'r': switch (n[1]) { case '1': apr_atomic_add32(&(shm_stats->body.rep_2XX), value); break; case '2': apr_atomic_add32(&(shm_stats->body.rep_4XX), value); break; case '3': apr_atomic_add32(&(shm_stats->body.rep_5XX), value); break; case '4': apr_atomic_add32(&(shm_stats->body.rep_other), value); break; case '5': apr_atomic_add32(&(shm_stats->body.rep_403), value); break; case '6': apr_atomic_add32(&(shm_stats->body.rep_404), value); break; case '7': apr_atomic_add32(&(shm_stats->body.rep_bread), value); break; case '8': apr_atomic_add32(&(shm_stats->body.rep_bwritten), value); break; } break; } }
static int dosdetector_handler(request_rec *r) { //DEBUGLOG("dosdetector_handler is called"); dosdetector_dir_config *cfg = (dosdetector_dir_config *) ap_get_module_config(r->per_dir_config, &dosdetector_module); if(cfg->detection) return DECLINED; if (!ap_is_initial_req(r)) return DECLINED; //char **ignore_contenttype = (char **) cfg->ignore_contenttype->elts; const char *content_type; const char *address_tmp; const char *address = NULL; int i; content_type = ap_sub_req_lookup_uri(r->uri, r, NULL)->content_type; if (!content_type) { #if (AP_SERVER_MINORVERSION_NUMBER > 2) content_type = DefaultContentType; #else content_type = ap_default_type(r); #endif } if (cfg->forwarded){ if ((address_tmp = apr_table_get(r->headers_in, "X-Forwarded-For")) != NULL){ const char *i = address_tmp; while(*i != 0 && *i != ',') i++; address = apr_pstrndup(r->pool, address_tmp, i - address_tmp); } } if (address == NULL) { #if (AP_SERVER_MINORVERSION_NUMBER > 2) address = r->connection->client_ip; #else address = r->connection->remote_ip; #endif } ap_regmatch_t regmatch[AP_MAX_REG_MATCH]; ap_regex_t **contenttype_regexp = (ap_regex_t **) cfg->contenttype_regexp->elts; for (i = 0; i < cfg->contenttype_regexp->nelts; i++) { if(!ap_regexec(contenttype_regexp[i], content_type, AP_MAX_REG_MATCH, regmatch, 0)){ //ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, 0, "ignoring content-type: %s", content_type); return OK; } } DEBUGLOG("dosdetector: processing content-type: %s", content_type); struct in_addr addr; if(!cfg->forwarded) { #if (AP_SERVER_MINORVERSION_NUMBER > 2) addr = r->connection->client_addr->sa.sin.sin_addr; #else addr = r->connection->remote_addr->sa.sin.sin_addr; #endif } if(cfg->forwarded || addr.s_addr == 0){ if (inet_aton(address, &addr) == 0) { TRACELOG("dosdetector: '%s' is not a valid IP addresss", address); return DECLINED; } } if (lock) apr_global_mutex_lock(lock); client_t *client = get_client(client_list, addr, cfg->period); if (lock) apr_global_mutex_unlock(lock); #ifdef _DEBUG int last_count = client->count; #endif count_increment(client, cfg->threshold); DEBUGLOG("dosdetector: %s, count: %d -> %d, interval: %d", address, last_count, client->count, (int)client->interval); //DEBUGLOG("dosdetector: %s, count: %d -> %d, interval: %d on tid %d, pid %d", address, last_count, client->count, (int)client->interval, gettid(), getpid()); time_t now = time((time_t *)0); if(client->suspected > 0 && client->suspected + cfg->ban_period > now){ apr_table_setn(r->subprocess_env, "SuspectDoS", "1"); //apr_table_setn(r->notes, "SuspectDoS", "1"); DEBUGLOG("dosdetector: '%s' has been still suspected as DoS attack! (suspected %d sec ago)", address, now - client->suspected); if(client->count > cfg->ban_threshold){ if(client->hard_suspected == 0) TRACELOG("dosdetector: '%s' is suspected as Hard DoS attack! (counter: %d)", address, client->count); client->hard_suspected = now; apr_table_setn(r->subprocess_env, "SuspectHardDoS", "1"); //apr_table_setn(r->notes, "SuspectHardDoS", "1"); } } else { if(client->suspected > 0){ client->suspected = 0; client->hard_suspected = 0; client->count = 0; } //int last_count = client->count; //client->count = client->count - client->interval * cfg->threshold; //if(client->count < 0) // client->count = 0; //client->count ++; //DEBUGLOG("client address: %s, count: %d -> %d, interval: %d", address, last_count, client->count, client->interval); if(client->count > cfg->threshold){ client->suspected = now; apr_table_setn(r->subprocess_env, "SuspectDoS", "1"); //apr_table_setn(r->notes, "SuspectDoS", "1"); TRACELOG("dosdetector: '%s' is suspected as DoS attack! (counter: %d)", address, client->count); } } return DECLINED; }
static const char * __gen_stats(const dav_resource *resource, apr_pool_t *pool) { struct shm_stats_s *stats = NULL; DAV_XDEBUG_POOL(pool, 0, "%s()", __FUNCTION__); memset(&stats, 0, sizeof(stats)); dav_rainx_server_conf *c = NULL; c = resource_get_server_config(resource); apr_global_mutex_lock(c->lock.handle); stats = apr_shm_baseaddr_get(c->shm.handle); apr_global_mutex_unlock(c->lock.handle); apr_uint64_t req = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_sec), 4); apr_uint64_t reqavgtime = rainx_stats_rrd_get_delta(&(stats->body.rrd_duration), 4); apr_uint64_t req_put = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_put_sec), 4); apr_uint64_t reqavgtime_put = rainx_stats_rrd_get_delta(&(stats->body.rrd_put_duration), 4); apr_uint64_t req_get = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_get_sec), 4); apr_uint64_t reqavgtime_get = rainx_stats_rrd_get_delta(&(stats->body.rrd_get_duration), 4); apr_uint64_t req_del = rainx_stats_rrd_get_delta(&(stats->body.rrd_req_del_sec), 4); apr_uint64_t reqavgtime_del = rainx_stats_rrd_get_delta(&(stats->body.rrd_del_duration), 4); apr_uint64_t r_time = 0, r_put_time = 0, r_get_time = 0, r_del_time = 0; if(req > 0) r_time = reqavgtime / req; if(req_put > 0) r_put_time = reqavgtime_put / req_put; if(req_get > 0) r_get_time = reqavgtime_get / req_get; if(req_del > 0) r_del_time = reqavgtime_del / req_del; double r_rate = 0, r_put_rate = 0, r_get_rate = 0, r_del_rate = 0; r_rate = (double)req / 4; r_put_rate = (double)req_put / 4; r_get_rate = (double)req_get / 4; r_del_rate = (double)req_del / 4; return apr_pstrcat(pool, STR_KV(req_all, "req.all"), STR_KV(req_chunk_put, "req.put"), STR_KV(req_chunk_get, "req.get"), STR_KV(req_chunk_del, "req.del"), STR_KV(req_stat, "req.stat"), STR_KV(req_info, "req.info"), STR_KV(req_raw, "req.raw"), STR_KV(req_other, "req.other"), STR_KV(rep_2XX, "rep.2xx"), STR_KV(rep_4XX, "rep.4xx"), STR_KV(rep_5XX, "rep.5xx"), STR_KV(rep_other, "rep.other"), STR_KV(rep_403, "rep.403"), STR_KV(rep_404, "rep.404"), STR_KV(rep_bread, "rep.bread"), STR_KV(rep_bwritten, "rep.bwritten"), apr_psprintf(pool, "rainx.reqpersec %f\n", r_rate), apr_psprintf(pool, "rainx.avreqtime %"APR_UINT64_T_FMT"\n", r_time), apr_psprintf(pool, "rainx.reqputpersec %f\n", r_put_rate), apr_psprintf(pool, "rainx.avputreqtime %"APR_UINT64_T_FMT"\n", r_put_time), apr_psprintf(pool, "rainx.reqgetpersec %f\n", r_get_rate), apr_psprintf(pool, "rainx.avgetreqtime %"APR_UINT64_T_FMT"\n", r_get_time), apr_psprintf(pool, "rainx.reqdelpersec %f\n", r_del_rate), apr_psprintf(pool, "rainx.avdelreqtime %"APR_UINT64_T_FMT"\n", r_del_time), NULL); }
static PyObject *_global_lock(PyObject *self, PyObject *args) { PyObject *server; PyObject *key; server_rec *s; py_global_config *glb; int index = -1; apr_status_t rv; if (! PyArg_ParseTuple(args, "OO|i", &server, &key, &index)) return NULL; if (! MpServer_Check(server)) { PyErr_SetString(PyExc_TypeError, "First argument must be a server object"); return NULL; } s = ((serverobject *)server)->server; apr_pool_userdata_get((void **)&glb, MP_CONFIG_KEY, s->process->pool); if ((index >= (glb->nlocks)) || (index < -1)) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, "Index %d is out of range for number of global mutex locks", index); PyErr_SetString(PyExc_ValueError, "Lock index is out of range for number of global mutex locks"); return NULL; } if (index == -1) { int hash = PyObject_Hash(key); if (hash == -1) { return NULL; } else { hash = abs(hash); } /* note that this will never result in 0, * which is reserved for things like dbm * locking (see Session.py) */ index = (hash % (glb->nlocks-1)+1); } /* ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, */ /* "_global_lock at index %d from pid %d", index, getpid()); */ Py_BEGIN_ALLOW_THREADS rv = apr_global_mutex_lock(glb->g_locks[index]); Py_END_ALLOW_THREADS if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_WARNING, rv, s, "Failed to acquire global mutex lock at index %d", index); PyErr_SetString(PyExc_ValueError, "Failed to acquire global mutex lock"); return NULL; } /* ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, */ /* "_global_lock DONE at index %d from pid %d", index, getpid()); */ Py_INCREF(Py_None); return Py_None; }
void server_add_stat(dav_rawx_server_conf *conf, const char *n, apr_uint32_t value, apr_uint32_t duration) { struct shm_stats_s *shm_stats; if (!n) return; if (!conf->shm.handle || !conf->lock.handle) { /* This should never happen! */ #ifdef HAVE_EXTRA_DEBUG abort(); #else return; #endif } if (!n[0] || !n[1] || n[2]!='\0') { /* strlen(n)!=2 */ #ifdef HAVE_EXTRA_DEBUG abort(); #else return; #endif } apr_global_mutex_lock(conf->lock.handle); shm_stats = apr_shm_baseaddr_get(conf->shm.handle); apr_global_mutex_unlock(conf->lock.handle); /* increase the appropriated counter */ if (shm_stats) { switch (*n) { case 'q': switch (n[1]) { case '0': apr_atomic_add32(&(shm_stats->body.req_all), value); if(duration > 0) { apr_atomic_add32(&(shm_stats->body.time_all), duration); rawx_stats_rrd_push(&(shm_stats->body.rrd_req_sec), shm_stats->body.req_all); rawx_stats_rrd_push(&(shm_stats->body.rrd_duration), shm_stats->body.time_all); } break; case '1': apr_atomic_add32(&(shm_stats->body.req_chunk_get), value); if(duration > 0) { apr_atomic_add32(&(shm_stats->body.time_get), duration); rawx_stats_rrd_push(&(shm_stats->body.rrd_req_get_sec), shm_stats->body.req_chunk_get); rawx_stats_rrd_push(&(shm_stats->body.rrd_get_duration), shm_stats->body.time_get); } break; case '2': apr_atomic_add32(&(shm_stats->body.req_chunk_put), value); if(duration > 0) { apr_atomic_add32(&(shm_stats->body.time_put), duration); rawx_stats_rrd_push(&(shm_stats->body.rrd_req_put_sec), shm_stats->body.req_chunk_put); rawx_stats_rrd_push(&(shm_stats->body.rrd_put_duration), shm_stats->body.time_put); } break; case '3': apr_atomic_add32(&(shm_stats->body.req_chunk_del), value); if(duration > 0) { apr_atomic_add32(&(shm_stats->body.time_del), duration); rawx_stats_rrd_push(&(shm_stats->body.rrd_req_del_sec), shm_stats->body.req_chunk_del); rawx_stats_rrd_push(&(shm_stats->body.rrd_del_duration), shm_stats->body.time_del); } break; case '4': apr_atomic_add32(&(shm_stats->body.req_stat), value); break; case '5': apr_atomic_add32(&(shm_stats->body.req_info), value); break; case '6': apr_atomic_add32(&(shm_stats->body.req_raw), value); break; case '7': apr_atomic_add32(&(shm_stats->body.req_other), value); break; } break; case 'r': switch (n[1]) { case '1': apr_atomic_add32(&(shm_stats->body.rep_2XX), value); break; case '2': apr_atomic_add32(&(shm_stats->body.rep_4XX), value); break; case '3': apr_atomic_add32(&(shm_stats->body.rep_5XX), value); break; case '4': apr_atomic_add32(&(shm_stats->body.rep_other), value); break; case '5': apr_atomic_add32(&(shm_stats->body.rep_403), value); break; case '6': apr_atomic_add32(&(shm_stats->body.rep_404), value); break; case '7': apr_atomic_add32(&(shm_stats->body.rep_bread), value); break; case '8': apr_atomic_add32(&(shm_stats->body.rep_bwritten), value); break; } break; } } }
/* * store a value in the shared memory cache */ static apr_byte_t oidc_cache_shm_set(request_rec *r, const char *key, const char *value, apr_time_t expiry) { oidc_cfg *cfg = ap_get_module_config(r->server->module_config, &auth_openidc_module); oidc_cache_cfg_shm_t *context = (oidc_cache_cfg_shm_t *)cfg->cache_cfg; ap_log_rerror(APLOG_MARK, OIDC_DEBUG, 0, r, "oidc_cache_shm_set: entering \"%s\" (value size=(%llu)", key, value ? (unsigned long long)strlen(value) : 0); oidc_cache_shm_entry_t *match, *free, *lru; oidc_cache_shm_entry_t *table; apr_time_t current_time; int i; apr_time_t age; /* check that the passed in key is valid */ if (key == NULL || strlen(key) > OIDC_CACHE_SHM_KEY_MAX) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "oidc_cache_shm_set: could not set value since key is NULL or too long (%s)", key); return FALSE; } /* check that the passed in value is valid */ if ( (value != NULL) && strlen(value) > OIDC_CACHE_SHM_VALUE_MAX) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "oidc_cache_shm_set: could not set value since value is too long (%zu > %d)", strlen(value), OIDC_CACHE_SHM_VALUE_MAX); return FALSE; } /* grab the global lock */ if (apr_global_mutex_lock(context->mutex) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "oidc_cache_shm_set: apr_global_mutex_lock() failed"); return FALSE; } /* get a pointer to the shared memory block */ table = apr_shm_baseaddr_get(context->shm); /* get the current time */ current_time = apr_time_now(); /* loop over the block, looking for the key */ match = NULL; free = NULL; lru = &table[0]; for (i = 0; i < cfg->cache_shm_size_max; i++) { /* see if this slot is free */ if (table[i].key[0] == '\0') { if (free == NULL) free = &table[i]; continue; } /* see if a value already exists for this key */ if (strcmp(table[i].key, key) == 0) { match = &table[i]; break; } /* see if this slot has expired */ if (table[i].expires <= current_time) { if (free == NULL) free = &table[i]; continue; } /* see if this slot was less recently used than the current pointer */ if (table[i].access < lru->access) { lru = &table[i]; } } /* if we have no free slots, issue a warning about the LRU entry */ if (match == NULL && free == NULL) { age = (current_time - lru->access) / 1000000; if (age < 3600) { ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "oidc_cache_shm_set: dropping LRU entry with age = %" APR_TIME_T_FMT "s, which is less than one hour; consider increasing the shared memory caching space (which is %d now) with the (global) OIDCCacheShmMax setting.", age, cfg->cache_shm_size_max); } } oidc_cache_shm_entry_t *t = match ? match : (free ? free : lru); if (value != NULL) { /* fill out the entry with the provided data */ strcpy(t->key, key); strcpy(t->value, value); t->expires = expiry; t->access = current_time; } else { t->key[0] = '\0'; } /* release the global lock */ apr_global_mutex_unlock(context->mutex); return TRUE; }
static int vlimit_response_end(request_rec *r) { int counter_stat = -2; VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "start", r->pool); vlimit_config *cfg = (vlimit_config *) ap_get_module_config(r->per_dir_config, &vlimit_module); SHM_DATA *limit_stat; limit_stat = shm_base + cfg->conf_id; // vlimit_mutex lock VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "vlimit_mutex locked.", r->pool); if (apr_global_mutex_lock(vlimit_mutex) != APR_SUCCESS) { VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "vlimit_mutex lock failed.", r->pool); return OK; } if (cfg->conf_id != 0 && cfg->file_match == 1) { VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "type FILE: file_count--", r->pool); if (get_file_counter(limit_stat, r) > 0) counter_stat = dec_file_counter(limit_stat, r); if (get_file_counter(limit_stat, r) == 0) unset_file_counter(limit_stat, r); cfg->file_match = 0; if (counter_stat != -2) vlimit_logging("RESULT: END DEC", r, cfg, limit_stat); } if (cfg->conf_id != 0 && cfg->ip_match == 1) { VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "type IP: ip_count--", r->pool); if (get_ip_counter(limit_stat, r) > 0) counter_stat = dec_ip_counter(limit_stat, r); if (get_ip_counter(limit_stat, r) == 0) unset_ip_counter(limit_stat, r); cfg->ip_match = 0; if (counter_stat != -2) vlimit_logging("RESULT: END DEC", r, cfg, limit_stat); } // vlimit_mutex unlock VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "vlimit_mutex unlocked.", r->pool); if (apr_global_mutex_unlock(vlimit_mutex) != APR_SUCCESS) { VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "vlimit_mutex unlock failed.", r->pool); return OK; } vlimit_debug_log_buf = apr_psprintf(r->pool , "conf_id: %d name: %s uri: %s ip_count: %d/%d file_count: %d/%d" , cfg->conf_id , r->server->server_hostname , r->filename , get_ip_counter(limit_stat, r) , cfg->ip_limit , get_file_counter(limit_stat, r) , cfg->file_limit ); VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", vlimit_debug_log_buf, r->pool); VLIMIT_DEBUG_SYSLOG("vlimit_response_end: ", "end", r->pool); return OK; }
/* * Apache output filter. Return values: * HTTP_* HTTP status code for errors * ap_pass_brigade() to pass request down the filter chain * * This function parses the http-response headers from a backend system. * We want to find out if the response-header has a * a) Set-Cookie header which should be stored to the session store * b) Set-Cookie header which is configured as "free" cookie * c) Set-Cookie header which has a special meaning to us (Auth=ok) */ static apr_status_t but_output_filter(ap_filter_t *f, apr_bucket_brigade *bb_in) { request_rec *r = f->r; mod_but_server_t *config; session_t session; cookie_res *cr; apr_status_t status; config = ap_get_module_config(r->server->module_config, &but_module); if (config == NULL) { ERRLOG_CRIT("Could not get configuration from apache"); return HTTP_INTERNAL_SERVER_ERROR; } if (!config->enabled) { return ap_pass_brigade(f->next, bb_in); } if (apr_global_mutex_lock(but_mutex) != APR_SUCCESS) { ERRLOG_CRIT("Could not acquire mutex."); return HTTP_INTERNAL_SERVER_ERROR; } but_session_init(&session, r, config); /* get session from request notes */ { const char *indexstr = apr_table_get(r->notes, "BUTSESS"); if (indexstr) { /*OPEN*/ if (but_session_open(&session, atoi(indexstr)) != STATUS_OK) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Session not found!"); return HTTP_INTERNAL_SERVER_ERROR; /* XXX this may happen in some race conditions. Handle gracefully. */ } } } /* * If no session was found for this response, then this is a free URL and * we have no way to store cookies. Skip cookie filtering. */ if (!but_session_isnull(&session)) { /* * Do Header Parsing for all Set-Cookie Response Headers. We are looking for * a) Session cookie * b) Free cookies * c) Service list cookies * d) Other cookies */ cr = apr_pcalloc(r->pool, sizeof(cookie_res)); if (!cr) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Out of memory!"); return HTTP_INTERNAL_SERVER_ERROR; } cr->r = r; cr->session = &session; cr->status = STATUS_OK; cr->headers = apr_table_make(r->pool, 0); /*SET*/ apr_table_do(mod_but_filter_response_cookies_cb, cr, r->headers_out, "Set-Cookie", NULL); if (cr->status != STATUS_OK) { if (cr->status == STATUS_ESHMFULL) { status = mod_but_redirect_to_shm_error(r, config); apr_global_mutex_unlock(but_mutex); return status; } ERRLOG_CRIT("Error filtering the response cookies!"); apr_global_mutex_unlock(but_mutex); return HTTP_INTERNAL_SERVER_ERROR; } /* Remove all Set-Cookie headers from response. */ apr_table_unset(r->headers_out, "Set-Cookie"); apr_table_unset(r->err_headers_out, "Set-Cookie"); /* Add selected Set-Cookie headers back into r->headers_out. */ apr_table_do(but_add_to_headers_out_cb, r, cr->headers, NULL); /* * If iteration detected a valid LOGON=ok Set-Cookie header, cr->must_renew is set. */ if (cr->must_renew) { const char *session_handle_str; apr_status_t status; ERRLOG_INFO("=============================== START RENEW SESSION ===================================="); ERRLOG_INFO("Renewing session after login."); /*RENEW*/ status = but_session_renew(&session); if (status != STATUS_OK) { if (status == STATUS_ESHMFULL) { status = mod_but_redirect_to_shm_error(r, config); apr_global_mutex_unlock(but_mutex); return status; } apr_global_mutex_unlock(but_mutex); ERRLOG_INFO("Error renewing session"); return HTTP_INTERNAL_SERVER_ERROR; } if (but_add_session_cookie_to_headers(r, config, r->headers_out, &session) != STATUS_OK) { apr_global_mutex_unlock(but_mutex); return HTTP_INTERNAL_SERVER_ERROR; } /* * renew_mod_but_session returned the new session index we have to update in r->notes. */ session_handle_str = apr_itoa(r->pool, session.handle); if (!session_handle_str) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Out of memory!"); return HTTP_INTERNAL_SERVER_ERROR; } apr_table_set(r->notes, "BUTSESS", session_handle_str); // REDIRECT TO MOD_BUT_REDIRECT IF ORIG_URL HANDLING IS DISABLED if (!config->but_config_enabled_return_to_orig_url) { ERRLOG_INFO("REDIRECT TO ORIG URL IS DISABLED: REDIRECT TO MOD_BUT_REDIRECT [%s]", session.data->url); ERRLOG_INFO("Redirect to MOD_BUT_REDIRECT if LOGON=ok"); r->status = mod_but_redirect_to_relurl(r, session.data->redirect_url_after_login); } } /* must renew */ } /* have session */ apr_global_mutex_unlock(but_mutex); ap_remove_output_filter(f); return ap_pass_brigade(f->next, bb_in); }
/* * Apache access hook. Return values: * OK we have handled the request, do not pass it on * DECLINED we have not handled the request, pass it on to next module * HTTP_* HTTP status codes for redirection or errors * * This is the most important function in mod_but. It is the core for handling * requests from the Internet client. It implements: * * a) MOD_BUT session is required for the requesting URL * if a) is true * a1) Check if the user is sending a MOD_BUT session * a2) If an invalid session is sent -> redirect client to error page * a3) If no session is sent -> create new session and go ahead * a4) If an old session is sent -> redirect client ot error page * * a5) If the client is sending some "free" cookies * * if a) is false * b1) Check */ static int but_access_checker(request_rec *r) { mod_but_dir_t *dconfig; mod_but_server_t *config; session_t session; cookie_res *cr; apr_status_t status; config = ap_get_module_config(r->server->module_config, &but_module); if (!config) { ERRLOG_CRIT("Could not get configuration from apache"); return HTTP_INTERNAL_SERVER_ERROR; } if (!config->enabled) { ERRLOG_INFO("mod_but is not enabled, skip request (DECLINED)"); return DECLINED; } /* get per-directory configuration */ dconfig = ap_get_module_config(r->per_dir_config, &but_module); if (!dconfig) { ERRLOG_INFO("Illegal Directory Config"); } if (apr_global_mutex_lock(but_mutex) != APR_SUCCESS) { ERRLOG_CRIT("Could not acquire mutex."); return HTTP_INTERNAL_SERVER_ERROR; } but_session_init(&session, r, config); ERRLOG_INFO("Request %s", r->uri); /****************************** PART 1 ******************************************************* * Handle special URLs which do not require a session. */ /* * Session renewal? */ switch (mod_but_regexp_match(r, config->session_renew_url, r->uri)) { case STATUS_MATCH: ERRLOG_INFO("Renew URL found [%s]", r->uri); /*CREATE*/ switch (but_session_create(&session)) { case STATUS_OK: /* session renewed, set cookie and redirect */ if (but_add_session_cookie_to_headers(r, config, r->err_headers_out, &session) != STATUS_OK) { apr_global_mutex_unlock(but_mutex); return HTTP_INTERNAL_SERVER_ERROR; } status = mod_but_redirect_to_relurl(r, config->url_after_renew); /* XXX make configurable; default URL */ apr_global_mutex_unlock(but_mutex); return status; case STATUS_ESHMFULL: status = mod_but_redirect_to_shm_error(r, config); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error creating new session"); return HTTP_INTERNAL_SERVER_ERROR; } break; /* not reached */ case STATUS_NOMATCH: /* do nothing */ break; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error while matching MOD_BUT_SESSION_RENEW_URL"); return HTTP_INTERNAL_SERVER_ERROR; } /* * Session free URL? */ switch (mod_but_regexp_match(r, config->session_free_url, r->uri)) { case STATUS_MATCH: apr_global_mutex_unlock(but_mutex); ERRLOG_INFO("Session free URL [%s]", r->uri); return DECLINED; case STATUS_NOMATCH: /* do nothing */ break; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error while matching MOD_BUT_SESSION_FREE_URL"); return HTTP_INTERNAL_SERVER_ERROR; } /****************************** PART 2 ******************************************************* * Check of the mod_but session * a) mod_but session is not sent by client * b) mod_but session sent is invalid * c) mod_but session sent is ok * The code below will only be executed if the requesting URI * requires a mod_but session */ /* * BUT-1 (coming from BUT-0) -> session is required * Here we will first parse the request headers for * * a) MOD_BUT_SESSION (will be unset, because we don't want to have it in the backend) * b) FREE COOKIES (will be accepted, if configured in httpd.conf) * c) OTHER COOKIES (will be unset) */ /* * iterate over all Cookie headers and unset them; * cookies for backend are now in r->notes */ cr = apr_pcalloc(r->pool, sizeof(cookie_res)); if (!cr) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Out of memory!"); return HTTP_INTERNAL_SERVER_ERROR; } cr->r = r; cr->status = STATUS_OK; cr->headers = apr_table_make(r->pool, 0); apr_table_do(mod_but_filter_request_cookies_cb, cr, r->headers_in, "Cookie", NULL); if (cr->status != STATUS_OK) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error while iterating Cookie headers."); return HTTP_INTERNAL_SERVER_ERROR; } apr_table_unset(r->headers_in, "Cookie"); ERRLOG_INFO("Session ID [%s]", cr->sessionid); /* * If the client has sent no session cookie, create a new session * and redirect to cookie try. */ if (!cr->sessionid) { ERRLOG_INFO("Client did not send mod_but session"); /*CREATE*/ switch (but_session_create(&session)) { case STATUS_OK: /* session created, set cookie and redirect */ if (but_add_session_cookie_to_headers(r, config, r->err_headers_out, &session) != STATUS_OK) { apr_global_mutex_unlock(but_mutex); return HTTP_INTERNAL_SERVER_ERROR; } status = mod_but_redirect_to_cookie_try(r, config); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ESHMFULL: status = mod_but_redirect_to_shm_error(r, config); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error creating new session"); return HTTP_INTERNAL_SERVER_ERROR; } /* not reached */ } /* * The client has sent a session (valid or invalid) */ /* Initialize the session struct. */ but_session_init(&session, r, config); /* Look up the session. */ /*FIND*/switch (but_session_find(&session, config->cookie_name, cr->sessionid)) { case STATUS_OK: break; case STATUS_ENOEXIST: /* session not found */ ERRLOG_INFO("Session timed out or invalid"); if (!config->session_expired_url) { apr_global_mutex_unlock(but_mutex); ERRLOG_INFO("MOD_BUT_SESSION_TIMEOUT_URL not configured"); return HTTP_INTERNAL_SERVER_ERROR; } status = mod_but_redirect_to_relurl(r, config->session_expired_url); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error finding session!"); return HTTP_INTERNAL_SERVER_ERROR; } /* Validate the session, time it out if necessary, updating atime. */ /*UNLINK,SET*/switch (but_session_validate(&session, config->session_hard_timeout, config->session_inactivity_timeout)) { case STATUS_OK: break; case STATUS_ENOEXIST: /* the sent session has reached its hard or soft timeout */ ERRLOG_INFO("Session timed out."); if (!config->session_expired_url) { apr_global_mutex_unlock(but_mutex); ERRLOG_INFO("MOD_BUT_SESSION_TIMEOUT_URL not configured"); return HTTP_INTERNAL_SERVER_ERROR; } status = mod_but_redirect_to_relurl(r, config->session_expired_url); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error validating session!"); return HTTP_INTERNAL_SERVER_ERROR; } /* * If we are here, the client has sent a valid mod_but session */ ERRLOG_INFO("Client has sent a valid mod_but session"); /* * We will first check, if the requesting URI asks for the session destroy function * This implements the "logout" functionality. */ switch (mod_but_regexp_match(r, config->session_destroy, r->uri)) { case STATUS_MATCH: ERRLOG_INFO("Session destroy URL matched, destroying session"); /*UNLINK*/ but_session_unlink(&session); status = mod_but_redirect_to_relurl(r, config->session_destroy_url); apr_global_mutex_unlock(but_mutex); return status; case STATUS_NOMATCH: ERRLOG_INFO("r->uri does not match session destroy URL [%s]", r->uri); break; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error matching session destroy URL"); return HTTP_INTERNAL_SERVER_ERROR; } /* * If we are here, the requesting URL does not want to be destroyed and we analyze * the request for the cookie_try. If we are still in the cookie test phase, we * have to give the client the Original URI (from the first request) as redirect */ /* store session index into request notes for output filter to process */ { const char *session_index_str = apr_itoa(r->pool, session.handle); if (!session_index_str) { apr_global_mutex_unlock(but_mutex); ERRLOG_INFO("Out of memory!"); return HTTP_INTERNAL_SERVER_ERROR; } ERRLOG_INFO("Setting r->notes[BUTSESS] to [%s]", session_index_str); apr_table_set(r->notes, "BUTSESS", session_index_str); } /* * Cookie is sent by the client, it is a valid session and the * requesting URL contains the cookie_try parameter. * session.data->url was set before redirecting to cookie_try. */ if (mod_but_find_cookie_try(r) > 0) { /*GET*/ if (!apr_strnatcmp(session.data->url, "empty")) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Session contains no URL!"); return HTTP_INTERNAL_SERVER_ERROR; } ERRLOG_INFO("Client session is valid and cookie test succeeded"); /*GET*/ status = mod_but_redirect_to_relurl(r, session.data->url); apr_global_mutex_unlock(but_mutex); return status; } ERRLOG_INFO("Client session is valid and no cookie try in URL"); /* * If we are here, the request will be authorized. */ /* * Now let's do the authorization stuff, if enabled by config. */ if (config->authorization_enabled) { ERRLOG_INFO("Authorization checks are enabled"); /*GET*/ switch (but_access_control(r, &session, config, dconfig)) { case STATUS_ELOGIN: ERRLOG_INFO("URI requires auth, but user not logged in yet [%s]", r->unparsed_uri); /* use r->unparsed_uri instead of r->uri to safeguard against HTTP Response Splitting */ /*SET*/ apr_cpystrn(session.data->url, r->unparsed_uri, sizeof(session.data->url)); ERRLOG_INFO("Storing original URL before logon [%s]", session.data->url); /*SET*/ session.data->redirect_on_auth_flag = 1; ERRLOG_INFO("Setting redirect on auth flag to [%d]", session.data->redirect_on_auth_flag); if (dconfig->logon_server_url) { /* login server is configured for this Location */ status = mod_but_redirect_to_relurl(r, dconfig->logon_server_url); apr_global_mutex_unlock(but_mutex); return status; } else { /* No login server is configured for this Location */ if (!config->global_logon_server_url) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Global logon server URL is not set"); return HTTP_INTERNAL_SERVER_ERROR; } status = mod_but_redirect_to_relurl(r, config->global_logon_server_url); apr_global_mutex_unlock(but_mutex); return status; } break; /* not reached */ case STATUS_OK: ERRLOG_INFO("client is sufficiently authorized or no auth required"); break; case STATUS_EDENIED: ERRLOG_CRIT("Client authenticated but not authorized for this URL"); if (!config->service_list_error_url) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Service list error URL not set"); return HTTP_INTERNAL_SERVER_ERROR; } status = mod_but_redirect_to_relurl(r, config->service_list_error_url); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ESTEPUP1: ERRLOG_INFO("Client authenticated but auth_strength too low for this URL"); if (!config->global_logon_server_url_1) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Gobal logon server URL 1 not set"); return HTTP_INTERNAL_SERVER_ERROR; } /* use r->unparsed_uri instead of r->uri to safeguard against HTTP Response Splitting */ /*SET*/ apr_cpystrn(session.data->url, r->unparsed_uri, sizeof(session.data->url)); ERRLOG_INFO("Storing original URL before logon [%s]", session.data->url); /*SET*/ session.data->redirect_on_auth_flag = 1; ERRLOG_INFO("Setting redirect on auth flag to [%d]", session.data->redirect_on_auth_flag); status = mod_but_redirect_to_relurl(r, config->global_logon_server_url_1); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ESTEPUP2: ERRLOG_INFO("Client authenticated but auth_strength too low for this URL"); if (!config->global_logon_server_url_2) { apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Global logon server URL 2 not set"); return HTTP_INTERNAL_SERVER_ERROR; } /* use r->unparsed_uri instead of r->uri to safeguard against HTTP Response Splitting */ /*SET*/ apr_cpystrn(session.data->url, r->unparsed_uri, sizeof(session.data->url)); ERRLOG_INFO("Storing original URL before logon [%s]", session.data->url); /*SET*/ session.data->redirect_on_auth_flag = 1; ERRLOG_INFO("Setting redirect on auth flag to [%d]", session.data->redirect_on_auth_flag); status = mod_but_redirect_to_relurl(r, config->global_logon_server_url_2); apr_global_mutex_unlock(but_mutex); return status; case STATUS_ERROR: default: apr_global_mutex_unlock(but_mutex); ERRLOG_CRIT("Error while checking authorization"); return HTTP_INTERNAL_SERVER_ERROR; } } else { ERRLOG_INFO("Authorization checks are disabled"); } /* * If we are here, the client is properly authenticated and we start proceeding * the request. */ /* * This is the redirection to the original protected URL function after login. * If the user was successfully authenticated and the session_data->redirect_on_auth_flag is 1, * we need to redirect the user to his original URL or / if none was found. * That can happen when the user directly enters the site on the login URL. */ ERRLOG_INFO("Redirect on auth flag [%d] logon state [%d]", session.data->redirect_on_auth_flag, session.data->logon_state); /*GET*/ if (session.data->redirect_on_auth_flag == 1 && session.data->logon_state == 1) { /*SET*/ session.data->redirect_on_auth_flag = 0; if (config->but_config_enabled_return_to_orig_url) { ERRLOG_INFO("REDIRECT TO ORIG URL IS ENABLED: Redirect to [%s]", session.data->url); /*GET*/ if (!apr_strnatcmp(session.data->url, "empty")) { ERRLOG_INFO("============ REDIRECT TO [/] because orig_url was empty "); status = mod_but_redirect_to_relurl(r, "/"); /* XXX make URL configurable: default rel URL */ apr_global_mutex_unlock(but_mutex); return status; } else { ERRLOG_INFO("============ REDIRECT TO [%s] to orig_url", session.data->url); /*GET*/ status = mod_but_redirect_to_relurl(r, session.data->url); apr_global_mutex_unlock(but_mutex); return status; } } else { ERRLOG_INFO("REDIRECT TO ORIG URL IS DISABLED: Redirect to = [%s]", session.data->redirect_url_after_login); /*GET*/ //status = mod_but_redirect_to_relurl(r, session.data->redirect_url_after_login); //apr_global_mutex_unlock(but_mutex); //return status; } } else { ERRLOG_INFO("Logon state or redirect on auth flag was 0, not redirecting"); } /* Add cookies from cookie store to request headers. */ /*GET*/ if (session.data->cookiestore_index != -1) { /*GET*/ const char *cookie = but_session_get_cookies(&session); if (cookie) { apr_table_set(r->headers_in, "Cookie", cookie); } } apr_global_mutex_unlock(but_mutex); /* Add selected Cookie headers back into r->headers_in. */ apr_table_do(but_add_to_headers_in_cb, r, cr->headers, NULL); /* Hand request down to the next module. */ return OK; }
/** * Perform geographical lookup on target. */ int geo_lookup(modsec_rec *msr, geo_rec *georec, const char *target, char **error_msg) { apr_sockaddr_t *addr; long ipnum = 0; char *targetip = NULL; geo_db *geo = msr->txcfg->geo; char errstr[1024]; unsigned char buf[2* GEO_MAX_RECORD_LEN]; const int reclen = 3; /* Algorithm needs changed if this changes */ apr_size_t nbytes; unsigned int rec_val = 0; apr_off_t seekto = 0; apr_status_t ret; int rc; int country = 0; int level; double dtmp; int itmp; *error_msg = NULL; /* init */ georec->country_code = geo_country_code[0]; georec->country_code3 = geo_country_code3[0]; georec->country_name = geo_country_name[0]; georec->country_continent = geo_country_continent[0]; georec->region = ""; georec->city = ""; georec->postal_code = ""; georec->latitude = 0; georec->longitude = 0; georec->dma_code = 0; georec->area_code = 0; if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: Looking up \"%s\".", log_escape(msr->mp, target)); } /* NOTE: This only works with ipv4 */ if ((rc = apr_sockaddr_info_get(&addr, target, APR_INET, 0, 0, msr->mp)) != APR_SUCCESS) { *error_msg = apr_psprintf(msr->mp, "Geo lookup for \"%s\" failed: %s", log_escape(msr->mp, target), apr_strerror(rc, errstr, 1024)); msr_log(msr, 4, "%s", *error_msg); return 0; } if ((rc = apr_sockaddr_ip_get(&targetip, addr)) != APR_SUCCESS) { *error_msg = apr_psprintf(msr->mp, "Geo lookup for \"%s\" failed: %s", log_escape(msr->mp, target), apr_strerror(rc, errstr, 1024)); msr_log(msr, 4, "%s", *error_msg); return 0; }; /* Why is this in host byte order? */ ipnum = ntohl(addr->sa.sin.sin_addr.s_addr); if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: Using address \"%s\" (0x%08lx). %lu", targetip, ipnum, ipnum); } ret = apr_global_mutex_lock(msr->modsecurity->geo_lock); if (ret != APR_SUCCESS) { msr_log(msr, 1, "Geo Lookup: Failed to lock proc mutex: %s", get_apr_error(msr->mp, ret)); } for (level = 31; level >= 0; level--) { /* Read the record */ seekto = 2 * reclen * rec_val; apr_file_seek(geo->db, APR_SET, &seekto); /* TODO: check rc */ rc = apr_file_read_full(geo->db, &buf, (2 * reclen), &nbytes); /* NOTE: This is hard-coded for size 3 records */ /* Left */ if ((ipnum & (1 << level)) == 0) { rec_val = (buf[3*0 + 0] << (0*8)) + (buf[3*0 + 1] << (1*8)) + (buf[3*0 + 2] << (2*8)); } /* Right */ else { rec_val = (buf[3*1 + 0] << (0*8)) + (buf[3*1 + 1] << (1*8)) + (buf[3*1 + 2] << (2*8)); } /* If we are past the country offset, then we are done */ if (rec_val >= geo->ctry_offset) { break; } } if (rec_val == geo->ctry_offset) { *error_msg = apr_psprintf(msr->mp, "No geo data for \"%s\").", log_escape(msr->mp, target)); msr_log(msr, 4, "%s", *error_msg); ret = apr_global_mutex_unlock(msr->modsecurity->geo_lock); if (ret != APR_SUCCESS) { msr_log(msr, 1, "Geo Lookup: Failed to lock proc mutex: %s", get_apr_error(msr->mp, ret)); } return 0; } if (geo->dbtype == GEO_COUNTRY_DATABASE) { country = rec_val; country -= geo->ctry_offset; if ((country <= 0) || (country > GEO_COUNTRY_LAST)) { *error_msg = apr_psprintf(msr->mp, "No geo data for \"%s\" (country %d).", log_escape(msr->mp, target), country); msr_log(msr, 4, "%s", *error_msg); ret = apr_global_mutex_unlock(msr->modsecurity->geo_lock); if (ret != APR_SUCCESS) { msr_log(msr, 1, "Geo Lookup: Failed to lock proc mutex: %s", get_apr_error(msr->mp, ret)); } return 0; } /* Country */ georec->country_code = geo_country_code[country]; georec->country_code3 = geo_country_code3[country]; georec->country_name = geo_country_name[country]; georec->country_continent = geo_country_continent[country]; } else { int field_len = 0; int rec_offset = 0; int remaining = GEO_CITY_RECORD_LEN; unsigned char cbuf[GEO_CITY_RECORD_LEN]; seekto = rec_val + (2 * reclen - 1) * geo->ctry_offset; apr_file_seek(geo->db, APR_SET, &seekto); /* TODO: check rc */ rc = apr_file_read_full(geo->db, &cbuf, sizeof(cbuf), &nbytes); country = cbuf[0]; if ((country <= 0) || (country > GEO_COUNTRY_LAST)) { *error_msg = apr_psprintf(msr->mp, "No geo data for \"%s\" (country %d).", log_escape(msr->mp, target), country); msr_log(msr, 4, "%s", *error_msg); ret = apr_global_mutex_unlock(msr->modsecurity->geo_lock); if (ret != APR_SUCCESS) { msr_log(msr, 1, "Geo Lookup: Failed to lock proc mutex: %s", get_apr_error(msr->mp, ret)); } return 0; } if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: rec=\"%s\"", log_escape_raw(msr->mp, cbuf, sizeof(cbuf))); } /* Country */ if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: country=\"%.*s\"", (1*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))); } georec->country_code = geo_country_code[country]; georec->country_code3 = geo_country_code3[country]; georec->country_name = geo_country_name[country]; georec->country_continent = geo_country_continent[country]; rec_offset++; remaining -= rec_offset; /* Region */ field_len = field_length((const char *)cbuf+rec_offset, remaining); if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: region=\"%.*s\"", ((field_len+1)*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))+(rec_offset*4)); } georec->region = apr_pstrmemdup(msr->mp, (const char *)cbuf+rec_offset, (remaining)); rec_offset += field_len + 1; remaining -= field_len + 1; /* City */ field_len = field_length((const char *)cbuf+rec_offset, remaining); if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: city=\"%.*s\"", ((field_len+1)*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))+(rec_offset*4)); } georec->city = apr_pstrmemdup(msr->mp, (const char *)cbuf+rec_offset, (remaining)); rec_offset += field_len + 1; remaining -= field_len + 1; /* Postal Code */ field_len = field_length((const char *)cbuf+rec_offset, remaining); if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: postal_code=\"%.*s\"", ((field_len+1)*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))+(rec_offset*4)); } georec->postal_code = apr_pstrmemdup(msr->mp, (const char *)cbuf+rec_offset, (remaining)); rec_offset += field_len + 1; remaining -= field_len + 1; /* Latitude */ if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: latitude=\"%.*s\"", (3*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))+(rec_offset*4)); } dtmp = cbuf[rec_offset] + (cbuf[rec_offset+1] << 8) + (cbuf[rec_offset+2] << 16); georec->latitude = dtmp/10000 - 180; rec_offset += 3; remaining -= 3; /* Longitude */ if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: longitude=\"%.*s\"", (3*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))+(rec_offset*4)); } dtmp = cbuf[rec_offset] + (cbuf[rec_offset+1] << 8) + (cbuf[rec_offset+2] << 16); georec->longitude = dtmp/10000 - 180; rec_offset += 3; remaining -= 3; /* dma/area codes are in city rev1 and US only */ if (msr->txcfg->debuglog_level >= 9) { msr_log(msr, 9, "GEO: dma/area=\"%.*s\"", (3*4), log_escape_raw(msr->mp, cbuf, sizeof(cbuf))+(rec_offset*4)); } if (geo->dbtype == GEO_CITY_DATABASE_1 && georec->country_code[0] == 'U' && georec->country_code[1] == 'S') { /* DMA Code */ itmp = cbuf[rec_offset] + (cbuf[rec_offset+1] << 8) + (cbuf[rec_offset+2] << 16); georec->dma_code = itmp / 1000; georec->area_code = itmp % 1000; rec_offset += 6; remaining -= 6; } } *error_msg = apr_psprintf(msr->mp, "Geo lookup for \"%s\" succeeded.", log_escape(msr->mp, target)); ret = apr_global_mutex_unlock(msr->modsecurity->geo_lock); if (ret != APR_SUCCESS) { msr_log(msr, 1, "Geo Lookup: Failed to lock proc mutex: %s", get_apr_error(msr->mp, ret)); } return 1; }
/* The sample content handler */ static int exipc_handler(request_rec *r) { int gotlock = 0; int camped; apr_time_t startcamp; apr_int64_t timecamped; apr_status_t rs; exipc_data *base; if (strcmp(r->handler, "example_ipc")) { return DECLINED; } /* * The main function of the handler, aside from sending the * status page to the client, is to increment the counter in * the shared memory segment. This action needs to be mutexed * out using the global mutex. */ /* * First, acquire the lock. This code is a lot more involved than * it usually needs to be, because the process based trylock * routine is not implemented on unix platforms. I left it in to * show how it would work if trylock worked, and for situations * and platforms where trylock works. */ for (camped = 0, timecamped = 0; camped < MAXCAMP; camped++) { rs = apr_global_mutex_trylock(exipc_mutex); if (APR_STATUS_IS_EBUSY(rs)) { apr_sleep(CAMPOUT); } else if (APR_SUCCESS == rs) { gotlock = 1; break; /* Get out of the loop */ } else if (APR_STATUS_IS_ENOTIMPL(rs)) { /* If it's not implemented, just hang in the mutex. */ startcamp = apr_time_now(); rs = apr_global_mutex_lock(exipc_mutex); timecamped = (apr_int64_t) (apr_time_now() - startcamp); if (APR_SUCCESS == rs) { gotlock = 1; break; /* Out of the loop */ } else { /* Some error, log and bail */ ap_log_error(APLOG_MARK, APLOG_ERR, rs, r->server, "Child %ld failed to acquire lock", (long int)getpid()); break; /* Out of the loop without having the lock */ } } else { /* Some other error, log and bail */ ap_log_error(APLOG_MARK, APLOG_ERR, rs, r->server, "Child %ld failed to try and acquire lock", (long int)getpid()); break; /* Out of the loop without having the lock */ } /* * The only way to get to this point is if the trylock worked * and returned BUSY. So, bump the time and try again */ timecamped += CAMPOUT; ap_log_error(APLOG_MARK, APLOG_NOERRNO | APLOG_NOTICE, 0, r->server, "Child %ld camping out on mutex for %d " "microseconds", (long int) getpid(), timecamped); } /* Lock acquisition loop */ /* Sleep for a millisecond to make it a little harder for * httpd children to acquire the lock. */ apr_sleep(SLEEPYTIME); r->content_type = "text/html"; if (!r->header_only) { ap_rputs(HTML_HEADER, r); if (gotlock) { /* Increment the counter */ base = (exipc_data *)apr_shm_baseaddr_get(exipc_shm); base->counter++; /* Send a page with our pid and the new value of the counter. */ ap_rprintf(r, "<p>Lock acquired after %ld microseoncds.</p>\n", (long int) timecamped); ap_rputs("<table border=\"1\">\n", r); ap_rprintf(r, "<tr><td>Child pid:</td><td>%d</td></tr>\n", (int) getpid()); ap_rprintf(r, "<tr><td>Counter:</td><td>%u</td></tr>\n", (unsigned int)base->counter); ap_rputs("</table>\n", r); } else { /* * Send a page saying that we couldn't get the lock. Don't say * what the counter is, because without the lock the value could * race. */ ap_rprintf(r, "<p>Child %d failed to acquire lock " "after camping out for %d microseconds.</p>\n", (int) getpid(), (int) timecamped); } ap_rputs(HTML_FOOTER, r); } /* r->header_only */ /* Release the lock */ if (gotlock) rs = apr_global_mutex_unlock(exipc_mutex); /* Swallowing the result because what are we going to do with it at * this stage? */ return OK; }
/* Generic function to check a request against a config. */ static int vlimit_check_limit(request_rec *r, vlimit_config *cfg) { const char *header_name; int ip_count = 0; int file_count = 0; int counter_stat = 0; if (!ap_is_initial_req(r)) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "SKIPPED: Initial Reqeusts.", r->pool); return DECLINED; } if (cfg->ip_limit <= 0 && cfg->file_limit <= 0) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "SKIPPED: cfg->ip_limit <= 0 && cfg->file_limit <= 0", r->pool); return DECLINED; } header_name = apr_table_get(r->headers_in, "HOST"); vlimit_debug_log_buf = apr_psprintf(r->pool, "client info: address=(%s) header_name=(%s)" , r->connection->remote_ip , header_name ); VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", vlimit_debug_log_buf, r->pool); SHM_DATA *limit_stat; limit_stat = shm_base + cfg->conf_id; if (make_ip_slot_list(limit_stat, r) != -1) VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "make_ip_slot_list exec. create list(" VLIMIT_IP_STAT_FILE ").", r->pool); if (make_file_slot_list(limit_stat, r) != -1) VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "make_file_slot_list exec. create list(" VLIMIT_FILE_STAT_FILE ").", r->pool); if (check_virtualhost_name(r)) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "header_name != server_hostname. return OK.", r->pool); return OK; } // vlimit_mutex lock VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "vlimit_mutex locked.", r->pool); if (apr_global_mutex_lock(vlimit_mutex) != APR_SUCCESS) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "vlimit_mutex lock failed.", r->pool); return OK; } if (cfg->file_limit > 0) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "type File: file_count++", r->pool); counter_stat = inc_file_counter(limit_stat, r); if (counter_stat == -1) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "file counter slot full. maxclients?", r->pool); return HTTP_SERVICE_UNAVAILABLE; } file_count = get_file_counter(limit_stat, r); cfg->file_match = 1; } else if (cfg->ip_limit > 0) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "type IP: ip_count++", r->pool); counter_stat = inc_ip_counter(limit_stat, r); if (counter_stat == -1) { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "ip counter slot full. maxclients?", r->pool); return HTTP_SERVICE_UNAVAILABLE; } ip_count = get_ip_counter(limit_stat, r); cfg->ip_match = 1; } // vlimit_mutex unlock VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "vlimit_mutex unlocked.", r->pool); if (apr_global_mutex_unlock(vlimit_mutex) != APR_SUCCESS){ VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "vlimit_mutex unlock failed.", r->pool); return OK; } vlimit_debug_log_buf = apr_psprintf(r->pool , "conf_id: %d name: %s uri: %s ip_count: %d/%d file_count: %d/%d" , cfg->conf_id , r->server->server_hostname , r->filename , ip_count , cfg->ip_limit , file_count , cfg->file_limit ); VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", vlimit_debug_log_buf, r->pool); if (cfg->ip_limit > 0 && ip_count > cfg->ip_limit) { vlimit_debug_log_buf = apr_psprintf(r->pool , "Rejected, too many connections from this host(%s) to the file(%s) by VlimitIP[ip_limig=(%d) docroot=(%s)]." , r->connection->remote_ip , header_name , cfg->ip_limit , cfg->full_path ); VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", vlimit_debug_log_buf, r->pool); if (counter_stat != -2) vlimit_logging("RESULT: 503 INC", r, cfg, limit_stat); return HTTP_SERVICE_UNAVAILABLE; } else if (cfg->file_limit > 0 && file_count > cfg->file_limit) { vlimit_debug_log_buf = apr_psprintf(r->pool , "Rejected, too many connections to the file(%s) by VlimitFile[limit=(%d) docroot=(%s)]." , header_name , cfg->file_limit , cfg->full_path ); VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", vlimit_debug_log_buf, r->pool); if (counter_stat != -2) vlimit_logging("RESULT: 503 INC", r, cfg, limit_stat); return HTTP_SERVICE_UNAVAILABLE; //return HTTP_NOT_FOUND; } else { VLIMIT_DEBUG_SYSLOG("vlimit_check_limit: ", "OK: Passed all checks", r->pool); if (counter_stat != -2) vlimit_logging("RESULT: OK INC", r, cfg, limit_stat); return OK; } return OK; }
static int dosdetector_read_request(request_rec *r) { if(!shm || !lock) { DEBUGLOG_R("shared memory or global mutex is null; skip DoS check"); return OK; } dosdetector_dir_config *cfg = (dosdetector_dir_config *) ap_get_module_config(r->per_dir_config, &dosdetector_module); if(cfg->detection) return DECLINED; if(!ap_is_initial_req(r)) return DECLINED; if(apr_table_get(r->subprocess_env, "NoCheckDoS")) { DEBUGLOG_R("'NoCheckDoS' is set, skipping DoS check for %s", r->uri); return OK; } if(cfg->contenttype_regexp->nelts > 0 && is_contenttype_ignored(cfg, r)) { return OK; } const char *address; address = r->useragent_ip; struct in_addr addr; addr = r->useragent_addr->sa.sin.sin_addr; if(addr.s_addr == 0) { inet_aton(address, &addr); } time_t now = time(NULL); /* enter the critical section */ if (lock) apr_global_mutex_lock(lock); client_t *client = get_client(client_list, addr, cfg->period); #ifdef _DEBUG int last_count = client->count; #endif client_status_e status = update_client_status(client, cfg, now); DEBUGLOG_R("%s, count: %d -> %d, interval: %d", address, last_count, client->count, (int)client->interval); if (lock) apr_global_mutex_unlock(lock); /* leave the critical section */ switch(status) { case SUSPECTED_FIRST: ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "'%s' is suspected as DoS attack! (counter: %d)", address, client->count); apr_table_setn(r->subprocess_env, "SuspectDoS", "1"); break; case SUSPECTED_HARD_FIRST: ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "'%s' is suspected as Hard DoS attack! (counter: %d)", address, client->count); apr_table_setn(r->subprocess_env, "SuspectHardDoS", "1"); apr_table_setn(r->subprocess_env, "SuspectDoS", "1"); break; case SUSPECTED_HARD: apr_table_setn(r->subprocess_env, "SuspectHardDoS", "1"); case SUSPECTED: apr_table_setn(r->subprocess_env, "SuspectDoS", "1"); DEBUGLOG_R("'%s' has been still suspected as DoS attack! (suspected %d sec ago)", address, (int)(now - client->suspected)); break; case NORMAL: break; } return DECLINED; }