// static LLVolatileAPRPool* LLVolatileAPRPool::getLocalAPRFilePool() { void* thread_local_data; apr_status_t status = apr_threadkey_private_get(&thread_local_data, sLocalAPRFilePoolKey); llassert_always(status == APR_SUCCESS); return reinterpret_cast<LLVolatileAPRPool*>(thread_local_data); }
WSGIThreadInfo *wsgi_thread_info(int create, int request) { WSGIThreadInfo *thread_handle = NULL; apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key); if (!thread_handle && create) { WSGIThreadInfo **entry = NULL; if (!wsgi_thread_details) { wsgi_thread_details = apr_array_make( wsgi_server->process->pool, 3, sizeof(char*)); } thread_handle = (WSGIThreadInfo *)apr_pcalloc( wsgi_server->process->pool, sizeof(WSGIThreadInfo)); thread_handle->log_buffer = NULL; thread_handle->thread_id = wsgi_total_threads++; entry = (WSGIThreadInfo **)apr_array_push(wsgi_thread_details); *entry = thread_handle; apr_threadkey_private_set(thread_handle, wsgi_thread_key); } if (thread_handle && request && !thread_handle->request_thread) { thread_handle->request_thread = 1; wsgi_request_threads++; } return thread_handle; }
/** * Get the thread local storage for this thread. * @return thread local storage */ JNIThreadData *JNIThreadData::getThreadData() { // We should never be called before initThreadData if (g_key == NULL) return NULL; // Retrieve the thread local storage from APR. JNIThreadData *data = NULL; apr_status_t apr_err = apr_threadkey_private_get (reinterpret_cast<void**>(&data), g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_get"); return NULL; } // Not already allocated. if (data == NULL) { // Allocate and store to APR. data = new JNIThreadData; apr_err = apr_threadkey_private_set (data, g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_set"); return NULL; } } return data; }
//static AIThreadLocalData& AIThreadLocalData::tldata(void) { if (!sThreadLocalDataKey) AIThreadLocalData::init(); void* data; apr_status_t status = apr_threadkey_private_get(&data, sThreadLocalDataKey); llassert_always(status == APR_SUCCESS); return *static_cast<AIThreadLocalData*>(data); }
hythread_t hythread_self_slow() { hythread_t thread; apr_status_t UNUSED apr_status; // Extract hythread_t from TLS apr_status = apr_threadkey_private_get((void **)(&thread), TM_THREAD_KEY); assert(apr_status == APR_SUCCESS); return thread; }
int *_thread_local_depth_ptr() { int *my_depth = NULL; apr_threadkey_private_get((void *)&my_depth, thread_local_depth_key); if (my_depth == NULL ) { tbx_type_malloc_clear(my_depth, int, 1); *my_depth = 0; apr_threadkey_private_set(my_depth, thread_local_depth_key); }
/* Get the thread-specific error info from a bdb_env_t. */ static bdb_error_info_t * get_error_info(const bdb_env_t *bdb) { void *priv; apr_threadkey_private_get(&priv, bdb->error_info); if (!priv) { priv = calloc(1, sizeof(bdb_error_info_t)); apr_threadkey_private_set(priv, bdb->error_info); } return priv; }
void* LLThreadLocalPointerBase::get() const { // llassert(sInitialized); void* ptr; apr_status_t result = apr_threadkey_private_get(&ptr, mThreadKey); if (result != APR_SUCCESS) { ll_apr_warn_status(result); LL_ERRS() << "Failed to get thread local data" << LL_ENDL; } return ptr; }
thread_local_stats_t *_thread_local_stats_ptr() { thread_local_stats_t *my = NULL; apr_threadkey_private_get((void *)&my, thread_local_stats_key); if (my == NULL ) { tbx_type_malloc_clear(my, thread_local_stats_t, 1); apr_thread_mutex_lock(_tp_lock); my->concurrent_max = _tp_concurrent_max; memcpy(my->depth_concurrent, _tp_depth_concurrent_max, sizeof(_tp_depth_concurrent_max)); //** Set to the current global apr_thread_mutex_unlock(_tp_lock); apr_threadkey_private_set(my, thread_local_stats_key); } return(my); }
static lisp_cfg_t * local_lisp_cfg (lisp_cfg_t *cfg) { void *local_cfg = NULL; apr_threadkey_private_get(&local_cfg, cfg_key); if (local_cfg == NULL) { local_cfg = copy_lisp_cfg (socket_pool, cfg); apr_threadkey_private_set(local_cfg, cfg_key); return local_cfg; } check_cfg_for_reuse(local_cfg, cfg); return (lisp_cfg_t*) local_cfg; }
/** * Allocate a new ThreadData for the current call from Java and push * it on the stack */ void JNIThreadData::pushNewThreadData() { JNIThreadData *data = NULL; apr_status_t apr_err = apr_threadkey_private_get (reinterpret_cast<void**>(&data), g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_get"); return; } JNIThreadData *newData = new JNIThreadData(); newData->m_previous =data; apr_err = apr_threadkey_private_set(newData, g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_set"); return; } }
/** * Pop the current ThreadData from the stack, because the call * completed. */ void JNIThreadData::popThreadData() { JNIThreadData *data = NULL; apr_status_t apr_err = apr_threadkey_private_get (reinterpret_cast<void**>(&data), g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_get"); return; } if (data == NULL) return; JNIThreadData *oldData = data->m_previous; delete data; apr_err = apr_threadkey_private_set(oldData, g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_set"); return; } }
static apr_status_t enter_mutex(h2_mplx *m, int *pacquired) { apr_status_t status; void *mutex = NULL; /* Enter the mutex if this thread already holds the lock or * if we can acquire it. Only on the later case do we unlock * onleaving the mutex. * This allow recursive entering of the mutex from the saem thread, * which is what we need in certain situations involving callbacks */ apr_threadkey_private_get(&mutex, thread_lock); if (mutex == m->lock) { *pacquired = 0; return APR_SUCCESS; } status = apr_thread_mutex_lock(m->lock); *pacquired = (status == APR_SUCCESS); if (*pacquired) { apr_threadkey_private_set(m->lock, thread_lock); } return status; }