void LLThreadLocalPointerBase::initStorage( ) { apr_status_t result = apr_threadkey_private_create(&mThreadKey, NULL, gAPRPoolp); if (result != APR_SUCCESS) { ll_apr_warn_status(result); LL_ERRS() << "Failed to allocate thread local data" << LL_ENDL; } }
static void lisp_child_init (apr_pool_t * pool, server_rec * s) { if (APR_SUCCESS == apr_pool_create ((&socket_pool), 0)) { apr_pool_cleanup_register (pool, 0, destroy_socket_pool, destroy_socket_pool); #if APR_HAS_THREADS apr_threadkey_private_create(&cfg_key, NULL, socket_pool); #endif } }
// This should be called exactly once, before the first call to createLocalAPRFilePool. // static void LLVolatileAPRPool::initLocalAPRFilePool() { apr_status_t status = apr_threadkey_private_create(&sLocalAPRFilePoolKey, &destroyLocalAPRFilePool, gAPRPoolp); ll_apr_assert_status(status); // Or out of memory, or system-imposed limit on the // total number of keys per process {PTHREAD_KEYS_MAX} // has been exceeded. // Create the thread-local pool for the main thread (this function is called by the main thread). createLocalAPRFilePool(); #ifdef SHOW_ASSERT gIsMainThread = getLocalAPRFilePool(); #endif }
/** * Initialize a threading library. * * @note This must only be called once. * * If any OS threads were created before calling this function, they must be attached using * hythread_attach before accessing any thread library functions. * * @param[in] lib pointer to the thread library to be initialized (non-NULL) * @return The thread library's initStatus will be set to 0 on success or * a negative value on failure. */ void VMCALL hythread_init(hythread_library_t lib) { apr_status_t apr_status; IDATA status; hythread_monitor_t *mon; // Current implementation doesn't support more than one library instance. if (TM_LIBRARY == NULL) { TM_LIBRARY = lib; } assert(TM_LIBRARY == lib); if (hythread_library_state != TM_LIBRARY_STATUS_NOT_INITIALIZED) return; hythread_library_state = TM_LIBRARY_STATUS_INITIALIZED; apr_status = apr_initialize(); assert(apr_status == APR_SUCCESS); // TM_POOL will be NULL if hythread_lib_create was not used to create the library if (TM_POOL == NULL) { apr_status = apr_pool_create(&TM_POOL, NULL); assert(apr_status == APR_SUCCESS); } apr_status = apr_threadkey_private_create(&TM_THREAD_KEY, NULL, TM_POOL); assert(apr_status == APR_SUCCESS); status = port_mutex_create(&lib->TM_LOCK, APR_THREAD_MUTEX_NESTED); assert(status == TM_ERROR_NONE); status = port_mutex_create(&TM_START_LOCK, APR_THREAD_MUTEX_NESTED); assert(status == TM_ERROR_NONE); status = init_group_list(); assert(status == TM_ERROR_NONE); // Create default group - hosts any thread crated with NULL group status = hythread_group_create(&TM_DEFAULT_GROUP); assert(status == TM_ERROR_NONE); //nondaemon thread barrier //// lib->nondaemon_thread_count = 0; status = hycond_create(&lib->nondaemon_thread_cond); assert(status == TM_ERROR_NONE); // init global monitor status=hythread_monitor_init_with_name(&p_global_monitor, 0, "Thread Global Monitor"); assert(status == TM_ERROR_NONE); mon = (hythread_monitor_t*)hythread_global(GLOBAL_MONITOR_NAME); *mon = p_global_monitor; assert(mon); }
gop_thread_pool_context_t *gop_tp_context_create(char *tp_name, int min_threads, int max_threads, int max_recursion_depth) { // char buffer[1024]; gop_thread_pool_context_t *tpc; apr_interval_time_t dt; int i; log_printf(15, "count=%d\n", _tp_context_count); tbx_type_malloc_clear(tpc, gop_thread_pool_context_t, 1); if (tbx_atomic_inc(_tp_context_count) == 0) { apr_pool_create(&_tp_pool, NULL); apr_thread_mutex_create(&_tp_lock, APR_THREAD_MUTEX_DEFAULT, _tp_pool); thread_pool_stats_init(); } if (thread_local_depth_key == NULL) apr_threadkey_private_create(&thread_local_depth_key,_thread_pool_destructor, _tp_pool); tpc->pc = gop_hp_context_create(&_tp_base_portal); //** Really just used for the submit default_thread_pool_config(tpc); if (min_threads > 0) tpc->min_threads = min_threads; if (max_threads > 0) tpc->max_threads = max_threads + 1; //** Add one for the recursion depth starting offset being 1 tpc->recursion_depth = max_recursion_depth + 1; //** The min recusion normally starts at 1 so just slap an extra level and we don't care about 0|1 starting location tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth; if (tpc->max_concurrency <= 0) { tpc->max_threads += 5 - tpc->max_concurrency; //** MAke sure we have at least 5 threads for work tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth; log_printf(0, "Specified max threads and recursion depth don't work. Adjusting max_threads=%d\n", tpc->max_threads); } dt = tpc->min_idle * 1000000; assert_result(apr_thread_pool_create(&(tpc->tp), tpc->min_threads, tpc->max_threads, _tp_pool), APR_SUCCESS); apr_thread_pool_idle_wait_set(tpc->tp, dt); apr_thread_pool_threshold_set(tpc->tp, 0); tpc->name = (tp_name == NULL) ? NULL : strdup(tp_name); tbx_atomic_set(tpc->n_ops, 0); tbx_atomic_set(tpc->n_completed, 0); tbx_atomic_set(tpc->n_started, 0); tbx_atomic_set(tpc->n_submitted, 0); tbx_atomic_set(tpc->n_running, 0); tbx_type_malloc(tpc->overflow_running_depth, int, tpc->recursion_depth); tbx_type_malloc(tpc->reserve_stack, tbx_stack_t *, tpc->recursion_depth); for (i=0; i<tpc->recursion_depth; i++) { tpc->overflow_running_depth[i] = -1; tpc->reserve_stack[i] = tbx_stack_new(); } return(tpc); }
//static void LLThreadLocalData::init(void) { // Only do this once. if (sThreadLocalDataKey) { return; } // This function is called by the main thread (these values are also needed in the next line). AIThreadID::set_main_thread_id(); AIThreadID::set_current_thread_id(); apr_status_t status = apr_threadkey_private_create(&sThreadLocalDataKey, &LLThreadLocalData::destroy, LLAPRRootPool::get()()); ll_apr_assert_status(status); // Or out of memory, or system-imposed limit on the // total number of keys per process {PTHREAD_KEYS_MAX} // has been exceeded. // Create the thread-local data for the main thread (this function is called by the main thread). LLThreadLocalData::create(NULL); }
void thread_pool_stats_init() { int i; char *eval; //** Check if we are enabling stat collection eval = NULL; apr_env_get(&eval, "GOP_TP_STATS", _tp_pool); if (eval != NULL) { i = atol(eval); if (i > 0) { _tp_stats = i; if (thread_local_stats_key == NULL) { apr_threadkey_private_create(&thread_local_stats_key,_thread_pool_destructor, _tp_pool); thread_pool_stats_make(); } } } }
/** * Initialize the thread local storage. * @return success or failure */ bool JNIThreadData::initThreadData() { // If already initialized -> nothing to do. if (g_key != NULL) return false; // Request a key for the thread local storage from the global pool // and register a callback function called when the thread is // deleted. apr_status_t apr_err = apr_threadkey_private_create(&g_key, del, JNIUtil::getPool()); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_create"); return false; } return true; }
//static void AIThreadLocalData::init(void) { // Only do this once. if (sThreadLocalDataKey) { return; } apr_status_t status = apr_threadkey_private_create(&sThreadLocalDataKey, &AIThreadLocalData::destroy, AIAPRRootPool::get()()); ll_apr_assert_status(status); // Or out of memory, or system-imposed limit on the // total number of keys per process {PTHREAD_KEYS_MAX} // has been exceeded. // Create the thread-local data for the main thread (this function is called by the main thread). AIThreadLocalData::create(NULL); #ifdef SHOW_ASSERT // This function is called by the main thread. main_thread_id = apr_os_thread_current(); #endif }
/* Create a Berkeley DB environment. */ static svn_error_t * create_env(bdb_env_t **bdbp, const char *path, apr_pool_t *pool) { int db_err; bdb_env_t *bdb; const char *path_bdb; char *tmp_path, *tmp_path_bdb; apr_size_t path_size, path_bdb_size; #if SVN_BDB_PATH_UTF8 path_bdb = svn_dirent_local_style(path, pool); #else SVN_ERR(svn_utf_cstring_from_utf8(&path_bdb, svn_dirent_local_style(path, pool), pool)); #endif /* Allocate the whole structure, including strings, from the heap, because it must survive the cache pool cleanup. */ path_size = strlen(path) + 1; path_bdb_size = strlen(path_bdb) + 1; /* Using calloc() to ensure the padding bytes in bdb->key (which is used as * a hash key) are zeroed. */ bdb = calloc(1, sizeof(*bdb) + path_size + path_bdb_size); /* We must initialize this now, as our callers may assume their bdb pointer is valid when checking for errors. */ apr_pool_cleanup_register(pool, bdb, cleanup_env, apr_pool_cleanup_null); apr_cpystrn(bdb->errpfx_string, BDB_ERRPFX_STRING, sizeof(bdb->errpfx_string)); bdb->path = tmp_path = (char*)(bdb + 1); bdb->path_bdb = tmp_path_bdb = tmp_path + path_size; apr_cpystrn(tmp_path, path, path_size); apr_cpystrn(tmp_path_bdb, path_bdb, path_bdb_size); bdb->pool = pool; *bdbp = bdb; #if APR_HAS_THREADS { apr_status_t apr_err = apr_threadkey_private_create(&bdb->error_info, cleanup_error_info, pool); if (apr_err) return svn_error_create(apr_err, NULL, "Can't allocate thread-specific storage" " for the Berkeley DB environment descriptor"); } #endif /* APR_HAS_THREADS */ db_err = db_env_create(&(bdb->env), 0); if (!db_err) { /* See the documentation at bdb_env_t's definition why the (char *) cast is safe and why it is done. */ bdb->env->set_errpfx(bdb->env, (char *) bdb); /* bdb_error_gatherer is in parens to stop macro expansion. */ bdb->env->set_errcall(bdb->env, (bdb_error_gatherer)); /* Needed on Windows in case Subversion and Berkeley DB are using different C runtime libraries */ db_err = bdb->env->set_alloc(bdb->env, malloc, realloc, free); /* If we detect a deadlock, select a transaction to abort at random from those participating in the deadlock. */ if (!db_err) db_err = bdb->env->set_lk_detect(bdb->env, DB_LOCK_RANDOM); } return convert_bdb_error(bdb, db_err); }
apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) { return apr_threadkey_private_create(&thread_lock, NULL, pool); }