shared_page* shmdata_getNewSharedPage(pool* p, apr_shm_t** shm_t, int segmentsize, char* path){ int overheadsize,usersize, x; shared_page* page; apr_status_t rv; overheadsize=sizeof(shared_page); usersize=segmentsize*2; rv=apr_shm_create(shm_t,overheadsize+usersize,path,p); #ifdef WIN32 if(rv==APR_EEXIST) { // try to attach it; seems windows specific behaviour rv = apr_shm_attach(shm_t,path,p); } #endif if(rv!=APR_SUCCESS){ //if failure then try to remove shm segment and try again if(apr_shm_attach(shm_t,path,p)==APR_SUCCESS){ apr_shm_destroy(*shm_t); rv=apr_shm_create(shm_t,overheadsize+usersize,path,p); //if again failure..possible bad shm file...remove and retry if(rv!=APR_SUCCESS){ rv=apr_file_remove(path,p); if(rv==APR_SUCCESS){ rv=apr_shm_create(shm_t,overheadsize+usersize,path,p); } } }else{ //if cannot attach blow file away and try again rv=apr_file_remove(path,p); if(rv==APR_SUCCESS){ rv=apr_shm_create(shm_t,overheadsize+usersize,path,p); } } } if(rv==APR_SUCCESS){ APACHE_LOG_DEBUG1("SHARED PAGES CREATED: Path=%s",path); page=apr_shm_baseaddr_get(*shm_t); page->itemmax=MAX_PAGE_ITEMS; page->segmentsize=segmentsize; page->timestamp=SHM_TIMESTAMP_INIT; page->flipcount=0; page->frontsegment=1; page->backsegment=0; page->data=(char*)(page+1); page->cursor=page->data; for(x=0;x<SEGMENTS_PER_PAGE;x++){ page->segments[x].itemcount=0; } return page; } APACHE_LOG_DEBUG("SHARED PAGE BAD"); return NULL; }
apr_status_t mod_but_shm_initialize(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { apr_status_t rv; apr_pool_t *mypool; apr_status_t sts; apr_size_t size; int i; rv = apr_pool_create(&mypool, p); if (rv != APR_SUCCESS) { ERRLOG_SRV_INFO("(SHM) Unable to create client pool for SHM"); return rv; } size = (apr_size_t)MOD_BUT_SESSION_COUNT * sizeof(mod_but_cookie) + apr_rmm_overhead_get(MOD_BUT_SESSION_COUNT + 1); ERRLOG_SRV_INFO("(SHM) Size of the shared memory allocation: %d kBytes", size/1024); sts = apr_shm_create(&cs_shm, size, tmpnam(NULL), p); if (sts != APR_SUCCESS) { ERRLOG_SRV_INFO("(SHM) Failed to create shared memory"); return sts; } else { ERRLOG_SRV_INFO("(SHM) Successfully created shared memory"); } sts = apr_rmm_init(&cs_rmm, NULL, apr_shm_baseaddr_get(cs_shm), size, p); if (sts != APR_SUCCESS) { ERRLOG_SRV_INFO("(SHM) Failed to initialize the RMM segment"); return sts; } else { ERRLOG_SRV_INFO("(SHM) Initialized RMM successfully"); } ERRLOG_SRV_INFO("(SHM) STARTING to malloc offsets in RMM"); off = apr_palloc(p, MOD_BUT_SESSION_COUNT * sizeof(apr_rmm_off_t)); for (i = 0; i < MOD_BUT_SESSION_COUNT; i++) { //ap_log_error(PC_LOG_INFO, s, "mod_but_shm.c: Malloc cs_rmm %d", i); off[i] = apr_rmm_malloc(cs_rmm, sizeof(mod_but_cookie)); } /* * Init of RMM with default values */ ERRLOG_SRV_INFO("(SHM) STARTING to give every session the default values"); for (i = 0; i < MOD_BUT_SESSION_COUNT; i++) { mod_but_cookie *c = apr_rmm_addr_get(cs_rmm, off[i]); apr_cpystrn(c->session_name, "empty", sizeof(c->session_name)); apr_cpystrn(c->session_value, "empty", sizeof(c->session_value)); apr_cpystrn(c->service_list, "empty", sizeof(c->service_list)); c->link_to_cookiestore = -1; c->logon_state = 0; c->logon_flag = 0; // used for redirect to ORIG_URL after successful authentication c->auth_strength = 0; } ERRLOG_SRV_INFO("(SHM) END to give every session the default values"); ERRLOG_SRV_INFO("(SHM) Execution of mod_but_shm_initialize was successfully"); apr_pool_cleanup_register(mypool, NULL, shm_cleanup, apr_pool_cleanup_null); return OK; }
/* * initialized the shared memory block in the parent process */ int oidc_cache_shm_post_config(server_rec *s) { oidc_cfg *cfg = (oidc_cfg *) ap_get_module_config(s->module_config, &auth_openidc_module); if (cfg->cache_cfg != NULL) return APR_SUCCESS; oidc_cache_cfg_shm_t *context = oidc_cache_shm_cfg_create(s->process->pool); cfg->cache_cfg = context; /* create the shared memory segment */ apr_status_t rv = apr_shm_create(&context->shm, cfg->cache_shm_entry_size_max * cfg->cache_shm_size_max, NULL, s->process->pool); if (rv != APR_SUCCESS) { oidc_serror(s, "apr_shm_create failed to create shared memory segment"); return HTTP_INTERNAL_SERVER_ERROR; } /* initialize the whole segment to '/0' */ int i; oidc_cache_shm_entry_t *t = apr_shm_baseaddr_get(context->shm); for (i = 0; i < cfg->cache_shm_size_max; i++, OIDC_CACHE_SHM_ADD_OFFSET(t, cfg->cache_shm_entry_size_max)) { t->section_key[0] = '\0'; t->access = 0; } if (oidc_cache_mutex_post_config(s, context->mutex, "shm") == FALSE) return HTTP_INTERNAL_SERVER_ERROR; oidc_sdebug(s, "initialized shared memory with a cache size (# entries) of: %d, and a max (single) entry size of: %d", cfg->cache_shm_size_max, cfg->cache_shm_entry_size_max); return OK; }
/***************************************************************************** * Cookie Store Functionality */ apr_status_t mod_but_shm_initialize_cookiestore(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { apr_status_t rv; apr_pool_t *mypool; apr_status_t sts; apr_size_t size; int i; rv = apr_pool_create(&mypool, p); if (rv != APR_SUCCESS) { ERRLOG_SRV_INFO("(SHM COOKIESTORE) Unable to create client pool for SHM cookiestore"); return rv; } size = (apr_size_t)MOD_BUT_COOKIESTORE_COUNT * sizeof(mod_but_cookie_cookiestore) + apr_rmm_overhead_get(MOD_BUT_COOKIESTORE_COUNT + 1); ERRLOG_SRV_INFO("(SHM COOKIESTORE) Size of the shared cookiestore memory allocation: %d kBytes", size/1024); sts = apr_shm_create(&cs_shm_cookiestore, size, tmpnam(NULL), p); if (sts != APR_SUCCESS) { ERRLOG_SRV_INFO("(SHM COOKIESTORE) Failed to create shared cookiestore memory"); return sts; } else { ERRLOG_SRV_INFO("(SHM COOKIESTORE) Successfully created shared cookiestore memory"); } sts = apr_rmm_init(&cs_rmm_cookiestore, NULL, apr_shm_baseaddr_get(cs_shm_cookiestore), size, p); if (sts != APR_SUCCESS) { ERRLOG_SRV_INFO("(SHM COOKIESTORE) Failed to initialize the RMM segment"); return sts; } else { ERRLOG_SRV_INFO("(SHM COOKIESTORE) Initialized RMM successfully"); } ERRLOG_SRV_INFO("(SHM COOKIESTORE) STARTING to malloc offsets in RMM"); off_cookiestore = apr_palloc(p, MOD_BUT_COOKIESTORE_COUNT * sizeof(apr_rmm_off_t)); for (i = 0; i < MOD_BUT_COOKIESTORE_COUNT; i++) { //ERRLOG_SRV_INFO("Malloc cs_rmm_cookiestore %d", i); off_cookiestore[i] = apr_rmm_malloc(cs_rmm_cookiestore, sizeof(mod_but_cookie_cookiestore)); } /* * Init of RMM with default values */ ERRLOG_SRV_INFO("(SHM COOKIESTORE) STARTING to give every session the default values"); for (i = 0; i < MOD_BUT_COOKIESTORE_COUNT; i++) { mod_but_cookie_cookiestore *c = apr_rmm_addr_get(cs_rmm_cookiestore, off_cookiestore[i]); apr_cpystrn(c->cookie_name, "empty", sizeof(c->cookie_name)); apr_cpystrn(c->cookie_value, "empty", sizeof(c->cookie_value)); c->cookie_next = -1; c->cookie_before = -1; c->cookie_slot_used = -1; c->location_id = -1; } ERRLOG_SRV_INFO("(SHM COOKIESTORE) END to give every session the default values"); ERRLOG_SRV_INFO("(SHM COOKIESTORE) Execution of mod_but_shm_initialize_cookiestore was successfully"); apr_pool_cleanup_register(mypool, NULL, shm_cleanup_cookiestore, apr_pool_cleanup_null); return OK; }
/* * initialized the shared memory block in the parent process */ int oidc_cache_shm_post_config(server_rec *s) { oidc_cfg *cfg = (oidc_cfg *) ap_get_module_config(s->module_config, &auth_openidc_module); if (cfg->cache_cfg != NULL) return APR_SUCCESS; oidc_cache_cfg_shm_t *context = oidc_cache_shm_cfg_create(s->process->pool); cfg->cache_cfg = context; /* create the shared memory segment */ apr_status_t rv = apr_shm_create(&context->shm, sizeof(oidc_cache_shm_entry_t) * cfg->cache_shm_size_max, NULL, s->process->pool); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "oidc_cache_shm_post_config: apr_shm_create failed to create shared memory segment"); return HTTP_INTERNAL_SERVER_ERROR; } /* initialize the whole segment to '/0' */ int i; oidc_cache_shm_entry_t *table = apr_shm_baseaddr_get(context->shm); for (i = 0; i < cfg->cache_shm_size_max; i++) { table[i].key[0] = '\0'; table[i].access = 0; } const char *dir; apr_temp_dir_get(&dir, s->process->pool); /* construct the mutex filename */ context->mutex_filename = apr_psprintf(s->process->pool, "%s/httpd_mutex.%ld.%pp", dir, (long int) getpid(), s); /* create the mutex lock */ rv = apr_global_mutex_create(&context->mutex, (const char *) context->mutex_filename, APR_LOCK_DEFAULT, s->process->pool); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "oidc_cache_shm_post_config: apr_global_mutex_create failed to create mutex on file %s", context->mutex_filename); return HTTP_INTERNAL_SERVER_ERROR; } /* need this on Linux */ #ifdef AP_NEED_SET_MUTEX_PERMS #if MODULE_MAGIC_NUMBER_MAJOR >= 20081201 rv = ap_unixd_set_global_mutex_perms(context->mutex); #else rv = unixd_set_global_mutex_perms(context->mutex); #endif if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "oidc_cache_shm_post_config: unixd_set_global_mutex_perms failed; could not set permissions "); return HTTP_INTERNAL_SERVER_ERROR; } #endif return OK; }
APR_DECLARE(apr_status_t) apr_shm_create_ex(apr_shm_t **m, apr_size_t reqsize, const char *filename, apr_pool_t *p, apr_int32_t flags) { return apr_shm_create(m, reqsize, filename, p); }
/* * shm init. it should be called in ap_hook_post_config phase * code partially from mod_auth_digest.c */ static int shm_init(apr_pool_t *p, server_rec *s) { apr_status_t ret; void *data; const char *userdata_key = "akismet_dummy_key"; /* initialize_module() will be called twice, and if it's a DSO * then all static data from the first call will be lost. Only * set up our static data on the second call. */ apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); return OK; /* This would be the first time through */ } if ( ret = apr_shm_create(&api_cache_shm, API_CACHE_SHM_SIZE, api_cache_shm_file, p) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, ret, s, "Failed to create shared segment file '%s'", api_cache_shm_file ); return HTTP_INTERNAL_SERVER_ERROR; } if (ret = apr_global_mutex_create(&global_lock, global_lock_file, APR_LOCK_DEFAULT, p) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, ret, s, "Failed to create global mutex file '%s'", global_lock_file); return HTTP_INTERNAL_SERVER_ERROR; } #ifdef AP_NEED_SET_MUTEX_PERMS if( ret = unixd_set_global_mutex_perms(global_lock) != APR_SUCCESS ) { ap_log_perror(APLOG_MARK, APLOG_CRIT, ret, p, "%s:Failed to set mutex permission. " "please check out parent process's privileges!"); return HTTP_INTERNAL_SERVER_ERROR; } #endif key_verified_infos = apr_shm_baseaddr_get(api_cache_shm); if (!key_verified_infos) { ap_log_error(APLOG_MARK, APLOG_CRIT, -1, s, "failed to allocate shared memory" ); return HTTP_INTERNAL_SERVER_ERROR; } /* Clear all key_verified_info */ int i; for (i = 0; i < NUM_BUCKETS; i++) { key_verified_infos[i].status = -1; memset(key_verified_infos[i].key, 0, 1024); } /* Register a cleanup function */ apr_pool_cleanup_register(p, NULL, cleanup_shm_resources, apr_pool_cleanup_null); return OK; }
static void test_anon_create(abts_case *tc, void *data) { apr_status_t rv; apr_shm_t *shm = NULL; rv = apr_shm_create(&shm, SHARED_SIZE, NULL, p); APR_ASSERT_SUCCESS(tc, "Error allocating shared memory block", rv); ABTS_PTR_NOTNULL(tc, shm); rv = apr_shm_destroy(shm); APR_ASSERT_SUCCESS(tc, "Error destroying shared memory block", rv); }
static void test_named_remove(abts_case *tc, void *data) { apr_status_t rv; apr_shm_t *shm, *shm2; apr_shm_remove(SHARED_FILENAME, p); rv = apr_shm_create(&shm, SHARED_SIZE, SHARED_FILENAME, p); APR_ASSERT_SUCCESS(tc, "Error allocating shared memory block", rv); if (rv != APR_SUCCESS) { return; } ABTS_PTR_NOTNULL(tc, shm); rv = apr_shm_remove(SHARED_FILENAME, p); /* On platforms which acknowledge the removal of the shared resource, * ensure another of the same name may be created after removal; */ if (rv == APR_SUCCESS) { rv = apr_shm_create(&shm2, SHARED_SIZE, SHARED_FILENAME, p); APR_ASSERT_SUCCESS(tc, "Error allocating shared memory block", rv); if (rv != APR_SUCCESS) { return; } ABTS_PTR_NOTNULL(tc, shm2); rv = apr_shm_destroy(shm2); APR_ASSERT_SUCCESS(tc, "Error destroying shared memory block", rv); } rv = apr_shm_destroy(shm); APR_ASSERT_SUCCESS(tc, "Error destroying shared memory block", rv); /* Now ensure no named resource remains which we may attach to */ rv = apr_shm_attach(&shm, SHARED_FILENAME, p); ABTS_TRUE(tc, rv != 0); }
static void proc_mutex(CuTest *tc) { #if APR_HAS_FORK apr_status_t rv; const char *shmname = "tpm.shm"; apr_shm_t *shm; /* Use anonymous shm if available. */ rv = apr_shm_create(&shm, sizeof(int), NULL, p); if (rv == APR_ENOTIMPL) { apr_file_remove(shmname, p); rv = apr_shm_create(&shm, sizeof(int), shmname, p); } apr_assert_success(tc, "create shm segment", rv); x = apr_shm_baseaddr_get(shm); test_exclusive(tc, NULL); #else CuNotImpl(tc, "APR lacks fork() support"); #endif }
/* ToDo: This function should be made to handle setting up * a scoreboard shared between processes using any IPC technique, * not just a shared memory segment */ static apr_status_t open_scoreboard(apr_pool_t *pconf) { #if APR_HAS_SHARED_MEMORY apr_status_t rv; char *fname = NULL; apr_pool_t *global_pool; /* We don't want to have to recreate the scoreboard after * restarts, so we'll create a global pool and never clean it. */ rv = apr_pool_create(&global_pool, NULL); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, "Fatal error: unable to create global pool " "for use with the scoreboard"); return rv; } /* The config says to create a name-based shmem */ if (ap_scoreboard_fname) { /* make sure it's an absolute pathname */ fname = ap_server_root_relative(pconf, ap_scoreboard_fname); if (!fname) { ap_log_error(APLOG_MARK, APLOG_CRIT, APR_EBADPATH, NULL, "Fatal error: Invalid Scoreboard path %s", ap_scoreboard_fname); return APR_EBADPATH; } return create_namebased_scoreboard(global_pool, fname); } else { /* config didn't specify, we get to choose shmem type */ rv = apr_shm_create(&ap_scoreboard_shm, scoreboard_size, NULL, global_pool); /* anonymous shared memory */ if ((rv != APR_SUCCESS) && (rv != APR_ENOTIMPL)) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, "Unable to create or access scoreboard " "(anonymous shared memory failure)"); return rv; } /* Make up a filename and do name-based shmem */ else if (rv == APR_ENOTIMPL) { /* Make sure it's an absolute pathname */ ap_scoreboard_fname = DEFAULT_SCOREBOARD; fname = ap_server_root_relative(pconf, ap_scoreboard_fname); return create_namebased_scoreboard(global_pool, fname); } } #endif /* APR_HAS_SHARED_MEMORY */ return APR_SUCCESS; }
apr_status_t upload_progress_cache_init(apr_pool_t *pool, ServerConfig *config) { #if APR_HAS_SHARED_MEMORY apr_status_t result; apr_size_t size; upload_progress_cache_t *cache; apr_rmm_off_t block; if (config->cache_file) { /* Remove any existing shm segment with this name. */ apr_shm_remove(config->cache_file, config->pool); } size = APR_ALIGN_DEFAULT(config->cache_bytes); result = apr_shm_create(&config->cache_shm, size, config->cache_file, config->pool); if (result != APR_SUCCESS) { return result; } /* Determine the usable size of the shm segment. */ size = apr_shm_size_get(config->cache_shm); /* This will create a rmm "handler" to get into the shared memory area */ result = apr_rmm_init(&config->cache_rmm, NULL, apr_shm_baseaddr_get(config->cache_shm), size, config->pool); if (result != APR_SUCCESS) { return result; } apr_pool_cleanup_register(config->pool, config , upload_progress_cache_module_kill, apr_pool_cleanup_null); /* init cache object */ CACHE_LOCK(); block = apr_rmm_calloc(config->cache_rmm, sizeof(upload_progress_cache_t)); cache = block ? (upload_progress_cache_t *)apr_rmm_addr_get(config->cache_rmm, block) : NULL; if(cache == NULL) { CACHE_UNLOCK(); return 0; } cache->head = NULL; config->cache_offset = block; config->cache = cache; CACHE_UNLOCK(); #endif return APR_SUCCESS; }
static int psgi_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { dTHX; const void *key; char *file; SV *app; apr_hash_index_t *hi; void *data; const char *userdata_key = "psgi_post_config"; psgi_apps_t *psgi_apps = NULL; apr_status_t rc; apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (data == NULL) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); return OK; } ap_add_version_component(pconf, apr_psprintf(pconf, "mod_psgi/%s", MOD_PSGI_VERSION)); mutex_name = apr_psprintf(pconf, "/tmp/psgi_mutex.%ld", (long int) getpid()); rc = apr_global_mutex_create(&psgi_mutex, (const char *) mutex_name, APR_LOCK_DEFAULT, pconf); if (rc != APR_SUCCESS) { return DECLINED; } rc = apr_global_mutex_lock(psgi_mutex); if (rc != APR_SUCCESS) { return DECLINED; } /* shared name to store apps */ shm_name = apr_pstrdup(pconf, "/tmp/psgi_shm"); rc = apr_shm_attach(&psgi_shm, (const char *) shm_name, pconf); if (rc != APR_SUCCESS) { rc = apr_shm_create(&psgi_shm, sizeof(psgi_apps_t), (const char *) shm_name, pconf); } if (rc == APR_SUCCESS) { psgi_apps = (psgi_apps_t *)apr_shm_baseaddr_get(psgi_shm); psgi_apps->apps = apr_hash_make(pconf); } apr_global_mutex_unlock(psgi_mutex); return OK; }
static void test_anon(abts_case *tc, void *data) { apr_proc_t proc; apr_status_t rv; apr_shm_t *shm; apr_size_t retsize; int cnt, i; int recvd; rv = apr_shm_create(&shm, SHARED_SIZE, NULL, p); APR_ASSERT_SUCCESS(tc, "Error allocating shared memory block", rv); ABTS_PTR_NOTNULL(tc, shm); retsize = apr_shm_size_get(shm); ABTS_INT_EQUAL(tc, SHARED_SIZE, retsize); boxes = apr_shm_baseaddr_get(shm); ABTS_PTR_NOTNULL(tc, boxes); rv = apr_proc_fork(&proc, p); if (rv == APR_INCHILD) { /* child */ int num = msgwait(5, 0, N_BOXES); /* exit with the number of messages received so that the parent * can check that all messages were received. */ exit(num); } else if (rv == APR_INPARENT) { /* parent */ i = N_BOXES; cnt = 0; while (cnt++ < N_MESSAGES) { if ((i-=3) < 0) { i += N_BOXES; /* start over at the top */ } msgput(i, MSG); apr_sleep(apr_time_make(0, 10000)); } } else { ABTS_FAIL(tc, "apr_proc_fork failed"); } /* wait for the child */ rv = apr_proc_wait(&proc, &recvd, NULL, APR_WAIT); ABTS_INT_EQUAL(tc, N_MESSAGES, recvd); rv = apr_shm_destroy(shm); APR_ASSERT_SUCCESS(tc, "Error destroying shared memory block", rv); }
static void test_check_size(abts_case *tc, void *data) { apr_status_t rv; apr_shm_t *shm = NULL; apr_size_t retsize; rv = apr_shm_create(&shm, SHARED_SIZE, NULL, p); APR_ASSERT_SUCCESS(tc, "Error allocating shared memory block", rv); ABTS_PTR_NOTNULL(tc, shm); retsize = apr_shm_size_get(shm); ABTS_SIZE_EQUAL(tc, SHARED_SIZE, retsize); rv = apr_shm_destroy(shm); APR_ASSERT_SUCCESS(tc, "Error destroying shared memory block", rv); }
static void create_shm(server_rec *s,apr_pool_t *p) { int threaded_mpm; ap_mpm_query(AP_MPMQ_IS_THREADED, &threaded_mpm); //if (threaded_mpm) { tmpnam(lock_name); apr_global_mutex_create(&lock, lock_name, APR_THREAD_MUTEX_DEFAULT, p); //DEBUGLOG("threaded!"); // } size_t size; size = sizeof(client_list_t) + table_size * sizeof(client_t); ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL, "Create or Joining shmem. name: %s, size: %zd", shmname, size); if(lock) apr_global_mutex_lock(lock); apr_status_t rc = apr_shm_attach(&shm, shmname, p); if (APR_SUCCESS != rc) { DEBUGLOG("dosdetector: Creating shared memory"); apr_shm_remove(shmname, p); rc = apr_shm_create(&shm, size, shmname, p); if (APR_SUCCESS != rc) { ap_log_error(APLOG_MARK, APLOG_ERR, 0,0, "dosdetector: failed to create shared memory %s\n", shmname); //ap_log_error(APLOG_MARK, APLOG_ERR, 0,0, "dosdetector: %s:%d: failed to create shared memory %s\n", __FILE__, __LINE__, shmname); } else { client_list = apr_shm_baseaddr_get(shm); memset(client_list, 0, size); } } else { DEBUGLOG("dosdetector: Joining shared memory"); client_list = apr_shm_baseaddr_get(shm); } apr_shm_remove(shmname, p); // Just to set destroy flag. client_list->head = client_list->base; client_t *c = client_list->base; int i; for (i = 1; i < table_size; i++) { c->next = (c + 1); c++; } c->next = NULL; if (lock) apr_global_mutex_unlock(lock); }
bool LLPluginSharedMemory::create(size_t size) { mName = APR_SHARED_MEMORY_PREFIX_STRING; mName += createName(); mSize = size; apr_status_t status = apr_shm_create( &(mImpl->mAprSharedMemory), mSize, mName.c_str(), gAPRPoolp ); if(ll_apr_warn_status(status)) { return false; } mNeedsDestroy = true; return map(); }
apr_status_t util_ldap_cache_init(apr_pool_t *pool, util_ldap_state_t *st) { #if APR_HAS_SHARED_MEMORY apr_status_t result; apr_size_t size; if (st->cache_bytes > 0) { if (st->cache_file) { /* Remove any existing shm segment with this name. */ apr_shm_remove(st->cache_file, st->pool); } size = APR_ALIGN_DEFAULT(st->cache_bytes); result = apr_shm_create(&st->cache_shm, size, st->cache_file, st->pool); if (result != APR_SUCCESS) { return result; } /* Determine the usable size of the shm segment. */ size = apr_shm_size_get(st->cache_shm); /* This will create a rmm "handler" to get into the shared memory area */ result = apr_rmm_init(&st->cache_rmm, NULL, apr_shm_baseaddr_get(st->cache_shm), size, st->pool); if (result != APR_SUCCESS) { return result; } } #endif apr_pool_cleanup_register(st->pool, st , util_ldap_cache_module_kill, apr_pool_cleanup_null); st->util_ldap_cache = util_ald_create_cache(st, st->search_cache_size, st->search_cache_ttl, util_ldap_url_node_hash, util_ldap_url_node_compare, util_ldap_url_node_copy, util_ldap_url_node_free, util_ldap_url_node_display); return APR_SUCCESS; }
apr_status_t server_init_master_stat(dav_rainx_server_conf *conf, apr_pool_t *pool, apr_pool_t *plog) { char buff[256]; apr_status_t rc; DAV_XDEBUG_POOL(plog, 0, "%s()", __FUNCTION__); /* Create and attach the segment */ rc = apr_shm_create(&(conf->shm.handle), sizeof(struct shm_stats_s), conf->shm.path, pool); if (APR_SUCCESS != rc) { DAV_ERROR_POOL(plog, 0, "%s : Cannot create a SHM segment at [%s] rc=%d : %s", __FUNCTION__, conf->shm.path, rc, apr_strerror(rc, buff, sizeof(buff))); conf->shm.handle = NULL; return rc; } DAV_DEBUG_POOL(plog, 0, "%s : SHM segment created at [%s]", __FUNCTION__, conf->shm.path); /* Create a processus lock*/ rc = apr_global_mutex_create(&(conf->lock.handle), conf->lock.path, APR_LOCK_DEFAULT, pool); if (rc != APR_SUCCESS) { DAV_ERROR_POOL(plog, 0, "%s : Cannot create a global_mutex at [%s] rc=%d : %s", __FUNCTION__, conf->lock.path, rc, apr_strerror(rc, buff, sizeof(buff))); (void) apr_shm_destroy(conf->shm.handle); conf->shm.handle = NULL; return rc; } DAV_DEBUG_POOL(plog, 0, "%s : globalmutex created at [%s]", __FUNCTION__, conf->lock.path); /* Init the SHM */ void *ptr_counter = apr_shm_baseaddr_get(conf->shm.handle); if (ptr_counter) { bzero(ptr_counter, sizeof(struct shm_stats_s)); /* init rrd's */ rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_duration)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_put_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_put_duration)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_get_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_get_duration)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_del_sec)); rainx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_del_duration)); } return APR_SUCCESS; }
static apr_status_t _create_shm_if_needed(char *shm_path, server_rec *server, apr_pool_t *plog) { apr_pool_t *ppool = server->process->pool; apr_shm_t *shm = NULL; apr_status_t rc; // Test if an SHM segment already exists apr_pool_userdata_get((void**)&shm, SHM_HANDLE_KEY, ppool); if (shm == NULL) { DAV_DEBUG_POOL(plog, 0, "%s: Creating SHM segment at [%s]", __FUNCTION__, shm_path); // Create a new SHM segment rc = apr_shm_create(&shm, sizeof(struct shm_stats_s), shm_path, ppool); if (rc != APR_SUCCESS) { char buff[256]; DAV_ERROR_POOL(plog, 0, "Failed to create the SHM segment at [%s]: %s", shm_path, apr_strerror(rc, buff, sizeof(buff))); return rc; } /* Init the SHM */ void *ptr_counter = apr_shm_baseaddr_get(shm); if (ptr_counter) { memset(ptr_counter, 0, sizeof(struct shm_stats_s)); /* init rrd's */ rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_sec)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_duration)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_put_sec)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_put_duration)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_get_sec)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_get_duration)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_req_del_sec)); rawx_stats_rrd_init(&(((struct shm_stats_s *) ptr_counter)->body.rrd_del_duration)); } // Save the SHM handle in the process' pool, without cleanup callback apr_pool_userdata_set(shm, SHM_HANDLE_KEY, NULL, ppool); // Register the cleanup callback to be executed BEFORE pool cleanup apr_pool_pre_cleanup_register(ppool, shm, _destroy_shm_cb); } else { DAV_DEBUG_POOL(plog, 0, "%s: Found an already created SHM segment", __FUNCTION__); } return APR_SUCCESS; }
apr_status_t util_ldap_cache_init(apr_pool_t *pool, util_ldap_state_t *st) { #if APR_HAS_SHARED_MEMORY apr_status_t result; apr_size_t size; size = APR_ALIGN_DEFAULT(st->cache_bytes); result = apr_shm_create(&st->cache_shm, size, st->cache_file, st->pool); if (result == APR_EEXIST) { /* * The cache could have already been created (i.e. we may be a child process). See * if we can attach to the existing shared memory */ result = apr_shm_attach(&st->cache_shm, st->cache_file, st->pool); } if (result != APR_SUCCESS) { return result; } /* Determine the usable size of the shm segment. */ size = apr_shm_size_get(st->cache_shm); /* This will create a rmm "handler" to get into the shared memory area */ result = apr_rmm_init(&st->cache_rmm, NULL, apr_shm_baseaddr_get(st->cache_shm), size, st->pool); if (result != APR_SUCCESS) { return result; } #endif apr_pool_cleanup_register(st->pool, st , util_ldap_cache_module_kill, apr_pool_cleanup_null); st->util_ldap_cache = util_ald_create_cache(st, util_ldap_url_node_hash, util_ldap_url_node_compare, util_ldap_url_node_copy, util_ldap_url_node_free, util_ldap_url_node_display); return APR_SUCCESS; }
/** * Create a name-based scoreboard in the given pool using the * given filename. */ static apr_status_t create_namebased_scoreboard(apr_pool_t *pool, const char *fname) { #if APR_HAS_SHARED_MEMORY apr_status_t rv; /* The shared memory file must not exist before we create the * segment. */ apr_shm_remove(fname, pool); /* ignore errors */ rv = apr_shm_create(&ap_scoreboard_shm, scoreboard_size, fname, pool); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, "unable to create or access scoreboard \"%s\" " "(name-based shared memory failure)", fname); return rv; } #endif /* APR_HAS_SHARED_MEMORY */ return APR_SUCCESS; }
static apr_status_t create_shm(server_rec *s,apr_pool_t *p) { size_t size; apr_status_t rc; size = sizeof(client_list_t) + table_size * sizeof(client_t); if(shmname != NULL) { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, "creating named shared memory '%s'", shmname); rc = apr_shm_remove(shmname, p); if (APR_SUCCESS == rc) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, "removed the existing shared memory segment named '%s'", shmname); } } else { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, "creating anonymous shared memory"); } rc = apr_shm_create(&shm, size, shmname, p); if (APR_SUCCESS != rc) { return rc; } client_list = apr_shm_baseaddr_get(shm); memset(client_list, 0, size); if(shmname != NULL) { /* prevent other processes from accessing the segment */ apr_shm_remove(shmname, p); } client_list->head = client_list->base; client_t *c = client_list->base; int i; for (i = 1; i < table_size; i++) { c->next = (c + 1); c++; } c->next = NULL; return APR_SUCCESS; }
int lua_apr_shm_create(lua_State *L) { apr_status_t status; lua_apr_shm *object; const char *filename; apr_size_t reqsize; filename = lua_isnil(L, 1) ? NULL : luaL_checkstring(L, 1); reqsize = luaL_checkinteger(L, 2); object = new_object(L, &lua_apr_shm_type); if (object == NULL) return push_error_memory(L); status = apr_pool_create(&object->pool, NULL); if (status != APR_SUCCESS) return push_error_status(L, status); status = apr_shm_create(&object->handle, reqsize, filename, object->pool); if (status != APR_SUCCESS) return push_error_status(L, status); init_shm(L, object); return 1; }
static apr_status_t test_rmm(apr_pool_t *parpool) { apr_status_t rv; apr_pool_t *pool; apr_shm_t *shm; apr_rmm_t *rmm; apr_size_t size, fragsize; apr_rmm_off_t *off; int i; void *entity; rv = apr_pool_create(&pool, parpool); if (rv != APR_SUCCESS) { fprintf(stderr, "Error creating child pool\n"); return rv; } /* We're going to want 10 blocks of data from our target rmm. */ size = SHARED_SIZE + apr_rmm_overhead_get(FRAG_COUNT + 1); printf("Creating anonymous shared memory (%" APR_SIZE_T_FMT " bytes).....", size); rv = apr_shm_create(&shm, size, NULL, pool); if (rv != APR_SUCCESS) { fprintf(stderr, "Error allocating shared memory block\n"); return rv; } fprintf(stdout, "OK\n"); printf("Creating rmm segment............................."); rv = apr_rmm_init(&rmm, NULL, apr_shm_baseaddr_get(shm), size, pool); if (rv != APR_SUCCESS) { fprintf(stderr, "Error allocating rmm..............\n"); return rv; } fprintf(stdout, "OK\n"); fragsize = SHARED_SIZE / FRAG_COUNT; printf("Creating each fragment of size %" APR_SIZE_T_FMT "................", fragsize); off = apr_palloc(pool, FRAG_COUNT * sizeof(apr_rmm_off_t)); for (i = 0; i < FRAG_COUNT; i++) { off[i] = apr_rmm_malloc(rmm, fragsize); } fprintf(stdout, "OK\n"); printf("Checking for out of memory allocation............"); if (apr_rmm_malloc(rmm, FRAG_SIZE * FRAG_COUNT) == 0) { fprintf(stdout, "OK\n"); } else { return APR_EGENERAL; } printf("Checking each fragment for address alignment....."); for (i = 0; i < FRAG_COUNT; i++) { char *c = apr_rmm_addr_get(rmm, off[i]); apr_size_t sc = (apr_size_t)c; if (off[i] == 0) { printf("allocation failed for offset %d\n", i); return APR_ENOMEM; } if (sc & 7) { printf("Bad alignment for fragment %d; %p not %p!\n", i, c, (void *)APR_ALIGN_DEFAULT((apr_size_t)c)); return APR_EGENERAL; } } fprintf(stdout, "OK\n"); printf("Setting each fragment to a unique value.........."); for (i = 0; i < FRAG_COUNT; i++) { int j; char **c = apr_rmm_addr_get(rmm, off[i]); for (j = 0; j < FRAG_SIZE; j++, c++) { *c = apr_itoa(pool, i + j); } } fprintf(stdout, "OK\n"); printf("Checking each fragment for its unique value......"); for (i = 0; i < FRAG_COUNT; i++) { int j; char **c = apr_rmm_addr_get(rmm, off[i]); for (j = 0; j < FRAG_SIZE; j++, c++) { char *d = apr_itoa(pool, i + j); if (strcmp(*c, d) != 0) { return APR_EGENERAL; } } } fprintf(stdout, "OK\n"); printf("Freeing each fragment............................"); for (i = 0; i < FRAG_COUNT; i++) { rv = apr_rmm_free(rmm, off[i]); if (rv != APR_SUCCESS) { return rv; } } fprintf(stdout, "OK\n"); printf("Creating one large segment......................."); off[0] = apr_rmm_calloc(rmm, SHARED_SIZE); fprintf(stdout, "OK\n"); printf("Setting large segment............................"); for (i = 0; i < FRAG_COUNT * FRAG_SIZE; i++) { char **c = apr_rmm_addr_get(rmm, off[0]); c[i] = apr_itoa(pool, i); } fprintf(stdout, "OK\n"); printf("Freeing large segment............................"); apr_rmm_free(rmm, off[0]); fprintf(stdout, "OK\n"); printf("Creating each fragment of size %" APR_SIZE_T_FMT " (again)........", fragsize); for (i = 0; i < FRAG_COUNT; i++) { off[i] = apr_rmm_malloc(rmm, fragsize); } fprintf(stdout, "OK\n"); printf("Freeing each fragment backwards.................."); for (i = FRAG_COUNT - 1; i >= 0; i--) { rv = apr_rmm_free(rmm, off[i]); if (rv != APR_SUCCESS) { return rv; } } fprintf(stdout, "OK\n"); printf("Creating one large segment (again)..............."); off[0] = apr_rmm_calloc(rmm, SHARED_SIZE); fprintf(stdout, "OK\n"); printf("Freeing large segment............................"); apr_rmm_free(rmm, off[0]); fprintf(stdout, "OK\n"); printf("Checking realloc................................."); off[0] = apr_rmm_calloc(rmm, SHARED_SIZE - 100); off[1] = apr_rmm_calloc(rmm, 100); if (off[0] == 0 || off[1] == 0) { printf("FAILED\n"); return APR_EINVAL; } entity = apr_rmm_addr_get(rmm, off[1]); rv = apr_rmm_free(rmm, off[0]); if (rv != APR_SUCCESS) { printf("FAILED\n"); return rv; } { unsigned char *c = entity; /* Fill in the region; the first half with zereos, which will * likely catch the apr_rmm_realloc offset calculation bug by * making it think the old region was zero length. */ for (i = 0; i < 100; i++) { c[i] = (i < 50) ? 0 : i; } } /* now we can realloc off[1] and get many more bytes */ off[0] = apr_rmm_realloc(rmm, entity, SHARED_SIZE - 100); if (off[0] == 0) { printf("FAILED\n"); return APR_EINVAL; } { unsigned char *c = apr_rmm_addr_get(rmm, off[0]); /* fill in the region */ for (i = 0; i < 100; i++) { if (c[i] != (i < 50 ? 0 : i)) { printf("FAILED at offset %d: %hx\n", i, c[i]); return APR_EGENERAL; } } } fprintf(stdout, "OK\n"); printf("Destroying rmm segment..........................."); rv = apr_rmm_destroy(rmm); if (rv != APR_SUCCESS) { printf("FAILED\n"); return rv; } printf("OK\n"); printf("Destroying shared memory segment................."); rv = apr_shm_destroy(shm); if (rv != APR_SUCCESS) { printf("FAILED\n"); return rv; } printf("OK\n"); apr_pool_destroy(pool); return APR_SUCCESS; }
static int exipc_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { void *data; /* These two help ensure that we only init once. */ const char *userdata_key; apr_status_t rs; exipc_data *base; const char *tempdir; /* * The following checks if this routine has been called before. * This is necessary because the parent process gets initialized * a couple of times as the server starts up, and we don't want * to create any more mutexes and shared memory segments than * we're actually going to use. * * The key needs to be unique for the entire web server, so put * the module name in it. */ userdata_key = "example_ipc_init_module"; apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { /* * If no data was found for our key, this must be the first * time the module is initialized. Put some data under that * key and return. */ apr_pool_userdata_set((const void *) 1, userdata_key, apr_pool_cleanup_null, s->process->pool); return OK; } /* Kilroy was here */ /* * Both the shared memory and mutex allocation routines take a * file name. Depending on system-specific implementation of these * routines, that file may or may not actually be created. We'd * like to store those files in the operating system's designated * temporary directory, which APR can point us to. */ rs = apr_temp_dir_get(&tempdir, pconf); if (APR_SUCCESS != rs) { ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, "Failed to find temporary directory"); return HTTP_INTERNAL_SERVER_ERROR; } /* Create the shared memory segment */ /* * Create a unique filename using our pid. This information is * stashed in the global variable so the children inherit it. */ shmfilename = apr_psprintf(pconf, "%s/httpd_shm.%ld", tempdir, (long int)getpid()); /* Now create that segment */ rs = apr_shm_create(&exipc_shm, sizeof(exipc_data), (const char *) shmfilename, pconf); if (APR_SUCCESS != rs) { ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, "Failed to create shared memory segment on file %s", shmfilename); return HTTP_INTERNAL_SERVER_ERROR; } /* Created it, now let's zero it out */ base = (exipc_data *)apr_shm_baseaddr_get(exipc_shm); base->counter = 0; /* Create global mutex */ /* * Create another unique filename to lock upon. Note that * depending on OS and locking mechanism of choice, the file * may or may not be actually created. */ mutexfilename = apr_psprintf(pconf, "%s/httpd_mutex.%ld", tempdir, (long int) getpid()); rs = apr_global_mutex_create(&exipc_mutex, (const char *) mutexfilename, APR_LOCK_DEFAULT, pconf); if (APR_SUCCESS != rs) { ap_log_error(APLOG_MARK, APLOG_ERR, rs, s, "Failed to create mutex on file %s", mutexfilename); return HTTP_INTERNAL_SERVER_ERROR; } /* * After the mutex is created, its permissions need to be adjusted * on unix platforms so that the child processe can acquire * it. This call takes care of that. The preprocessor define was * set up early in this source file since Apache doesn't provide * it. */ #ifdef MOD_EXIPC_SET_MUTEX_PERMS rs = unixd_set_global_mutex_perms(exipc_mutex); if (APR_SUCCESS != rs) { ap_log_error(APLOG_MARK, APLOG_CRIT, rs, s, "Parent could not set permissions on Example IPC " "mutex: check User and Group directives"); return HTTP_INTERNAL_SERVER_ERROR; } #endif /* MOD_EXIPC_SET_MUTEX_PERMS */ /* * Destroy the shm segment when the configuration pool gets destroyed. This * happens on server restarts. The parent will then (above) allocate a new * shm segment that the new children will bind to. */ apr_pool_cleanup_register(pconf, NULL, shm_cleanup_wrapper, apr_pool_cleanup_null); return OK; }
static void test_rmm(abts_case *tc, void *data) { apr_status_t rv; apr_pool_t *pool; apr_shm_t *shm; apr_rmm_t *rmm; apr_size_t size, fragsize; apr_rmm_off_t *off, off2; int i; void *entity; rv = apr_pool_create(&pool, p); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); /* We're going to want 10 blocks of data from our target rmm. */ size = SHARED_SIZE + apr_rmm_overhead_get(FRAG_COUNT + 1); rv = apr_shm_create(&shm, size, NULL, pool); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); if (rv != APR_SUCCESS) return; rv = apr_rmm_init(&rmm, NULL, apr_shm_baseaddr_get(shm), size, pool); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); if (rv != APR_SUCCESS) return; /* Creating each fragment of size fragsize */ fragsize = SHARED_SIZE / FRAG_COUNT; off = apr_palloc(pool, FRAG_COUNT * sizeof(apr_rmm_off_t)); for (i = 0; i < FRAG_COUNT; i++) { off[i] = apr_rmm_malloc(rmm, fragsize); } /* Checking for out of memory allocation */ off2 = apr_rmm_malloc(rmm, FRAG_SIZE * FRAG_COUNT); ABTS_TRUE(tc, !off2); /* Checking each fragment for address alignment */ for (i = 0; i < FRAG_COUNT; i++) { char *c = apr_rmm_addr_get(rmm, off[i]); apr_size_t sc = (apr_size_t)c; ABTS_TRUE(tc, !!off[i]); ABTS_TRUE(tc, !(sc & 7)); } /* Setting each fragment to a unique value */ for (i = 0; i < FRAG_COUNT; i++) { int j; char **c = apr_rmm_addr_get(rmm, off[i]); for (j = 0; j < FRAG_SIZE; j++, c++) { *c = apr_itoa(pool, i + j); } } /* Checking each fragment for its unique value */ for (i = 0; i < FRAG_COUNT; i++) { int j; char **c = apr_rmm_addr_get(rmm, off[i]); for (j = 0; j < FRAG_SIZE; j++, c++) { char *d = apr_itoa(pool, i + j); ABTS_STR_EQUAL(tc, d, *c); } } /* Freeing each fragment */ for (i = 0; i < FRAG_COUNT; i++) { rv = apr_rmm_free(rmm, off[i]); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); } /* Creating one large segment */ off[0] = apr_rmm_calloc(rmm, SHARED_SIZE); /* Setting large segment */ for (i = 0; i < FRAG_COUNT * FRAG_SIZE; i++) { char **c = apr_rmm_addr_get(rmm, off[0]); c[i] = apr_itoa(pool, i); } /* Freeing large segment */ rv = apr_rmm_free(rmm, off[0]); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); /* Creating each fragment of size fragsize */ for (i = 0; i < FRAG_COUNT; i++) { off[i] = apr_rmm_malloc(rmm, fragsize); } /* Freeing each fragment backwards */ for (i = FRAG_COUNT - 1; i >= 0; i--) { rv = apr_rmm_free(rmm, off[i]); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); } /* Creating one large segment (again) */ off[0] = apr_rmm_calloc(rmm, SHARED_SIZE); /* Freeing large segment */ rv = apr_rmm_free(rmm, off[0]); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); /* Checking realloc */ off[0] = apr_rmm_calloc(rmm, SHARED_SIZE - 100); off[1] = apr_rmm_calloc(rmm, 100); ABTS_TRUE(tc, !!off[0]); ABTS_TRUE(tc, !!off[1]); entity = apr_rmm_addr_get(rmm, off[1]); rv = apr_rmm_free(rmm, off[0]); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); { unsigned char *c = entity; /* Fill in the region; the first half with zereos, which will * likely catch the apr_rmm_realloc offset calculation bug by * making it think the old region was zero length. */ for (i = 0; i < 100; i++) { c[i] = (i < 50) ? 0 : i; } } /* now we can realloc off[1] and get many more bytes */ off[0] = apr_rmm_realloc(rmm, entity, SHARED_SIZE - 100); ABTS_TRUE(tc, !!off[0]); { unsigned char *c = apr_rmm_addr_get(rmm, off[0]); /* fill in the region */ for (i = 0; i < 100; i++) { ABTS_TRUE(tc, c[i] == (i < 50 ? 0 : i)); } } rv = apr_rmm_destroy(rmm); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); rv = apr_shm_destroy(shm); ABTS_INT_EQUAL(tc, APR_SUCCESS, rv); apr_pool_destroy(pool); }
static void test_named(abts_case *tc, void *data) { apr_status_t rv; apr_shm_t *shm = NULL; apr_size_t retsize; apr_proc_t pidproducer, pidconsumer; apr_procattr_t *attr1 = NULL, *attr2 = NULL; int sent, received; apr_exit_why_e why; const char *args[4]; apr_shm_remove(SHARED_FILENAME, p); rv = apr_shm_create(&shm, SHARED_SIZE, SHARED_FILENAME, p); APR_ASSERT_SUCCESS(tc, "Error allocating shared memory block", rv); if (rv != APR_SUCCESS) { return; } ABTS_PTR_NOTNULL(tc, shm); retsize = apr_shm_size_get(shm); ABTS_SIZE_EQUAL(tc, SHARED_SIZE, retsize); boxes = apr_shm_baseaddr_get(shm); ABTS_PTR_NOTNULL(tc, boxes); rv = apr_procattr_create(&attr1, p); ABTS_PTR_NOTNULL(tc, attr1); APR_ASSERT_SUCCESS(tc, "Couldn't create attr1", rv); rv = apr_procattr_cmdtype_set(attr1, APR_PROGRAM_ENV); APR_ASSERT_SUCCESS(tc, "Couldn't set copy environment", rv); args[0] = apr_pstrdup(p, "testshmproducer" EXTENSION); args[1] = NULL; rv = apr_proc_create(&pidproducer, TESTBINPATH "testshmproducer" EXTENSION, args, NULL, attr1, p); APR_ASSERT_SUCCESS(tc, "Couldn't launch producer", rv); rv = apr_procattr_create(&attr2, p); ABTS_PTR_NOTNULL(tc, attr2); APR_ASSERT_SUCCESS(tc, "Couldn't create attr2", rv); rv = apr_procattr_cmdtype_set(attr2, APR_PROGRAM_ENV); APR_ASSERT_SUCCESS(tc, "Couldn't set copy environment", rv); args[0] = apr_pstrdup(p, "testshmconsumer" EXTENSION); rv = apr_proc_create(&pidconsumer, TESTBINPATH "testshmconsumer" EXTENSION, args, NULL, attr2, p); APR_ASSERT_SUCCESS(tc, "Couldn't launch consumer", rv); rv = apr_proc_wait(&pidconsumer, &received, &why, APR_WAIT); ABTS_INT_EQUAL(tc, APR_CHILD_DONE, rv); ABTS_INT_EQUAL(tc, APR_PROC_EXIT, why); rv = apr_proc_wait(&pidproducer, &sent, &why, APR_WAIT); ABTS_INT_EQUAL(tc, APR_CHILD_DONE, rv); ABTS_INT_EQUAL(tc, APR_PROC_EXIT, why); /* Cleanup before testing that producer and consumer worked correctly. * This way, if they didn't succeed, we can just run this test again * without having to cleanup manually. */ APR_ASSERT_SUCCESS(tc, "Error destroying shared memory", apr_shm_destroy(shm)); ABTS_INT_EQUAL(tc, sent, received); }
/* Set up startup-time initialization */ static int vlimit_init(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { VLIMIT_DEBUG_SYSLOG("vlimit_init: ", MODULE_NAME " " MODULE_VERSION " started.", p); if(apr_file_open(&vlimit_log_fp, VLIMIT_LOG_FILE, APR_WRITE|APR_APPEND|APR_CREATE, APR_OS_DEFAULT, p) != APR_SUCCESS){ return OK; } apr_status_t status; apr_size_t retsize; apr_size_t shm_size; int t; SHM_DATA *shm_data = NULL; shm_size = (apr_size_t) (sizeof(shm_data) + sizeof(shm_data->file_stat_shm) + sizeof(shm_data->ip_stat_shm)) * (conf_counter + 1); //Create global mutex status = apr_global_mutex_create(&vlimit_mutex, NULL, APR_LOCK_DEFAULT, p); if(status != APR_SUCCESS){ VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "Error creating global mutex.", p); return status; } #ifdef AP_NEED_SET_MUTEX_PERMS status = unixd_set_global_mutex_perms(vlimit_mutex); if(status != APR_SUCCESS){ VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "Error xrent could not set permissions on global mutex.", p); return status; } #endif if(apr_global_mutex_child_init(&vlimit_mutex, NULL, p)) VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "global mutex attached.", p); /* If there was a memory block already assigned.. destroy it */ if (shm) { status = apr_shm_destroy(shm); if (status != APR_SUCCESS) { VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "Couldn't destroy old memory block", p); return status; } else { VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "Old Shared memory block, destroyed.", p); } } /* Create shared memory block */ status = apr_shm_create(&shm, shm_size, NULL, p); if (status != APR_SUCCESS) { VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "Error creating shm block", p); return status; } /* Check size of shared memory block */ retsize = apr_shm_size_get(shm); if (retsize != shm_size) { VLIMIT_DEBUG_SYSLOG("vlimit_init: ", "Error allocating shared memory block", p); return status; } /* Init shm block */ shm_base = apr_shm_baseaddr_get(shm); if (shm_base == NULL) { VLIMIT_DEBUG_SYSLOG("vlimit_init", "Error creating status block.", p); return status; } memset(shm_base, 0, retsize); vlimit_debug_log_buf = apr_psprintf(p , "Memory Allocated %d bytes (each conf takes %d bytes) MaxClient:%d" , (int) retsize , (int) (sizeof(shm_data) + sizeof(shm_data->file_stat_shm) + sizeof(shm_data->ip_stat_shm)) , MAX_CLIENTS ); VLIMIT_DEBUG_SYSLOG("vlimit_init: ", vlimit_debug_log_buf, p); if (retsize < (sizeof(shm_data) * conf_counter)) { VLIMIT_DEBUG_SYSLOG("vlimit_init ", "Not enough memory allocated!! Giving up" , p); return HTTP_INTERNAL_SERVER_ERROR; } int i; for (t = 0; t <= conf_counter; t++) { shm_data = shm_base + t; for (i = 0; i < MAX_CLIENTS; i++) { shm_data->file_stat_shm[i].filename[0] = '\0'; shm_data->ip_stat_shm[i].address[0] = '\0'; shm_data->file_stat_shm[i].counter = 0; shm_data->ip_stat_shm[i].counter = 0; } } vlimit_debug_log_buf = apr_psprintf(p , "%s Version %s - Initialized [%d Conf]" , MODULE_NAME , MODULE_VERSION , conf_counter ); VLIMIT_DEBUG_SYSLOG("vlimit_init: ", vlimit_debug_log_buf, p); return OK; }
void ssl_scache_shmht_init(server_rec *s, apr_pool_t *p) { SSLModConfigRec *mc = myModConfig(s); table_t *ta; int ta_errno; apr_size_t avail; int n; apr_status_t rv; /* * Create shared memory segment */ if (mc->szSessionCacheDataFile == NULL) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "SSLSessionCache required"); ssl_die(); } if ((rv = apr_shm_create(&(mc->pSessionCacheDataMM), mc->nSessionCacheDataSize, mc->szSessionCacheDataFile, mc->pPool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "Cannot allocate shared memory"); ssl_die(); } if ((rv = apr_rmm_init(&(mc->pSessionCacheDataRMM), NULL, apr_shm_baseaddr_get(mc->pSessionCacheDataMM), mc->nSessionCacheDataSize, mc->pPool)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, "Cannot initialize rmm"); ssl_die(); } ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "initialize MM %pp RMM %pp", mc->pSessionCacheDataMM, mc->pSessionCacheDataRMM); /* * Create hash table in shared memory segment */ avail = mc->nSessionCacheDataSize; n = (avail/2) / 1024; n = n < 10 ? 10 : n; /* * Passing server_rec as opt_param to table_alloc so that we can do * logging if required ssl_util_table. Otherwise, mc is sufficient. */ if ((ta = table_alloc(n, &ta_errno, ssl_scache_shmht_malloc, ssl_scache_shmht_calloc, ssl_scache_shmht_realloc, ssl_scache_shmht_free, s )) == NULL) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "Cannot allocate hash table in shared memory: %s", table_strerror(ta_errno)); ssl_die(); } table_attr(ta, TABLE_FLAG_AUTO_ADJUST|TABLE_FLAG_ADJUST_DOWN); table_set_data_alignment(ta, sizeof(char *)); table_clear(ta); mc->tSessionCacheDataTable = ta; /* * Log the done work */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, "Init: Created hash-table (%d buckets) " "in shared memory (%" APR_SIZE_T_FMT " bytes) for SSL session cache", n, avail); return; }