static void leave_mutex(h2_mplx *m, int acquired) { if (acquired) { apr_threadkey_private_set(NULL, thread_lock); apr_thread_mutex_unlock(m->lock); } }
WSGIThreadInfo *wsgi_thread_info(int create, int request) { WSGIThreadInfo *thread_handle = NULL; apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key); if (!thread_handle && create) { WSGIThreadInfo **entry = NULL; if (!wsgi_thread_details) { wsgi_thread_details = apr_array_make( wsgi_server->process->pool, 3, sizeof(char*)); } thread_handle = (WSGIThreadInfo *)apr_pcalloc( wsgi_server->process->pool, sizeof(WSGIThreadInfo)); thread_handle->log_buffer = NULL; thread_handle->thread_id = wsgi_total_threads++; entry = (WSGIThreadInfo **)apr_array_push(wsgi_thread_details); *entry = thread_handle; apr_threadkey_private_set(thread_handle, wsgi_thread_key); } if (thread_handle && request && !thread_handle->request_thread) { thread_handle->request_thread = 1; wsgi_request_threads++; } return thread_handle; }
/** * Get the thread local storage for this thread. * @return thread local storage */ JNIThreadData *JNIThreadData::getThreadData() { // We should never be called before initThreadData if (g_key == NULL) return NULL; // Retrieve the thread local storage from APR. JNIThreadData *data = NULL; apr_status_t apr_err = apr_threadkey_private_get (reinterpret_cast<void**>(&data), g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_get"); return NULL; } // Not already allocated. if (data == NULL) { // Allocate and store to APR. data = new JNIThreadData; apr_err = apr_threadkey_private_set (data, g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_set"); return NULL; } } return data; }
svn_error_t * svn_fs_bdb__close(bdb_env_baton_t *bdb_baton) { bdb_env_t *bdb = bdb_baton->bdb; SVN_ERR_ASSERT(bdb_baton->env == bdb_baton->bdb->env); SVN_ERR_ASSERT(bdb_baton->error_info->refcount > 0); /* Neutralize bdb_baton's pool cleanup to prevent double-close. See cleanup_env_baton(). */ bdb_baton->bdb = NULL; /* Note that we only bother with this cleanup if the pool is non-NULL, to guard against potential races between this and the cleanup_env cleanup callback. It's not clear if that can actually happen, but better safe than sorry. */ if (0 == --bdb_baton->error_info->refcount && bdb->pool) { svn_error_clear(bdb_baton->error_info->pending_errors); #if APR_HAS_THREADS free(bdb_baton->error_info); apr_threadkey_private_set(NULL, bdb->error_info); #endif } /* This may run during final pool cleanup when the lock is NULL. */ SVN_MUTEX__WITH_LOCK(bdb_cache_lock, svn_fs_bdb__close_internal(bdb)); return SVN_NO_ERROR; }
//static void AIThreadLocalData::create(LLThread* threadp) { AIThreadLocalData* new_tld = new AIThreadLocalData; if (threadp) { threadp->mThreadLocalData = new_tld; } apr_status_t status = apr_threadkey_private_set(new_tld, sThreadLocalDataKey); llassert_always(status == APR_SUCCESS); }
int *_thread_local_depth_ptr() { int *my_depth = NULL; apr_threadkey_private_get((void *)&my_depth, thread_local_depth_key); if (my_depth == NULL ) { tbx_type_malloc_clear(my_depth, int, 1); *my_depth = 0; apr_threadkey_private_set(my_depth, thread_local_depth_key); }
//static void LLThreadLocalData::create(LLThread* threadp) { LLThreadLocalData* new_tld = new LLThreadLocalData(threadp ? threadp->mName.c_str() : "main thread"); if (threadp) { threadp->mThreadLocalData = new_tld; } apr_status_t status = apr_threadkey_private_set(new_tld, sThreadLocalDataKey); llassert_always(status == APR_SUCCESS); }
void LLThreadLocalPointerBase::set( void* value ) { llassert(sInitialized && mThreadKey); apr_status_t result = apr_threadkey_private_set((void*)value, mThreadKey); if (result != APR_SUCCESS) { ll_apr_warn_status(result); LL_ERRS() << "Failed to set thread local data" << LL_ENDL; } }
/* Get the thread-specific error info from a bdb_env_t. */ static bdb_error_info_t * get_error_info(const bdb_env_t *bdb) { void *priv; apr_threadkey_private_get(&priv, bdb->error_info); if (!priv) { priv = calloc(1, sizeof(bdb_error_info_t)); apr_threadkey_private_set(priv, bdb->error_info); } return priv; }
thread_local_stats_t *_thread_local_stats_ptr() { thread_local_stats_t *my = NULL; apr_threadkey_private_get((void *)&my, thread_local_stats_key); if (my == NULL ) { tbx_type_malloc_clear(my, thread_local_stats_t, 1); apr_thread_mutex_lock(_tp_lock); my->concurrent_max = _tp_concurrent_max; memcpy(my->depth_concurrent, _tp_depth_concurrent_max, sizeof(_tp_depth_concurrent_max)); //** Set to the current global apr_thread_mutex_unlock(_tp_lock); apr_threadkey_private_set(my, thread_local_stats_key); } return(my); }
static lisp_cfg_t * local_lisp_cfg (lisp_cfg_t *cfg) { void *local_cfg = NULL; apr_threadkey_private_get(&local_cfg, cfg_key); if (local_cfg == NULL) { local_cfg = copy_lisp_cfg (socket_pool, cfg); apr_threadkey_private_set(local_cfg, cfg_key); return local_cfg; } check_cfg_for_reuse(local_cfg, cfg); return (lisp_cfg_t*) local_cfg; }
/** * Allocate a new ThreadData for the current call from Java and push * it on the stack */ void JNIThreadData::pushNewThreadData() { JNIThreadData *data = NULL; apr_status_t apr_err = apr_threadkey_private_get (reinterpret_cast<void**>(&data), g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_get"); return; } JNIThreadData *newData = new JNIThreadData(); newData->m_previous =data; apr_err = apr_threadkey_private_set(newData, g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_set"); return; } }
/** * Pop the current ThreadData from the stack, because the call * completed. */ void JNIThreadData::popThreadData() { JNIThreadData *data = NULL; apr_status_t apr_err = apr_threadkey_private_get (reinterpret_cast<void**>(&data), g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_get"); return; } if (data == NULL) return; JNIThreadData *oldData = data->m_previous; delete data; apr_err = apr_threadkey_private_set(oldData, g_key); if (apr_err) { JNIUtil::handleAPRError(apr_err, "apr_threadkey_private_set"); return; } }
static apr_status_t enter_mutex(h2_mplx *m, int *pacquired) { apr_status_t status; void *mutex = NULL; /* Enter the mutex if this thread already holds the lock or * if we can acquire it. Only on the later case do we unlock * onleaving the mutex. * This allow recursive entering of the mutex from the saem thread, * which is what we need in certain situations involving callbacks */ apr_threadkey_private_get(&mutex, thread_lock); if (mutex == m->lock) { *pacquired = 0; return APR_SUCCESS; } status = apr_thread_mutex_lock(m->lock); *pacquired = (status == APR_SUCCESS); if (*pacquired) { apr_threadkey_private_set(m->lock, thread_lock); } return status; }
static void WINAPI nx_win32_svc_main(DWORD argc, LPTSTR *argv) { nx_context_t thread_context; nx_exception_t e; if ( _nxlog_initializer == 0 ) { // running from service manager ASSERT(nx_init(&argc, &argv, NULL) == TRUE); nxlog_init(&nxlog); nx_logger_disable_foreground(); } else if ( _nxlog_initializer != apr_os_thread_current() ) { // service dispatcher runs in a new thread, we need // to initialize the exception context. _nxlog_initializer = apr_os_thread_current(); memset(&thread_context, 0, sizeof(nx_context_t)); init_exception_context(&thread_context.exception_context); apr_threadkey_private_set(&thread_context, nx_get_context_key()); } log_debug("nx_win32_svc_main"); try { // read config cache nx_config_cache_read(); log_debug("nxlog cache read"); // load DSO and read and verify module config nx_ctx_config_modules(nxlog.ctx); log_debug("nxlog config OK"); // initialize modules nx_ctx_init_modules(nxlog.ctx); // initialize log routes nx_ctx_init_routes(nxlog.ctx); nx_ctx_init_jobs(nxlog.ctx); nx_ctx_restore_queues(nxlog.ctx); // setup threadpool nxlog_create_threads(&nxlog); // start modules nx_ctx_start_modules(nxlog.ctx); if ( nxlog.foreground != TRUE ) { // register to service manager svc_status_handle = RegisterServiceCtrlHandler("nxlog", nx_win32_svc_change); if ( svc_status_handle == 0 ) { nx_win32_error("RegisterServiceCtrlHandler() failed, couldn't register the service control handler"); } // Signal to svc manager that we are running svc_status.dwWin32ExitCode = 0; svc_status.dwServiceSpecificExitCode = 0; svc_status.dwCheckPoint = 0; svc_status.dwWaitHint = 0; svc_status.dwServiceType = SERVICE_WIN32; svc_status.dwCurrentState = SERVICE_RUNNING; svc_status.dwControlsAccepted = SERVICE_ACCEPT_STOP; if ( SetServiceStatus(svc_status_handle, &svc_status) == FALSE ) { nx_win32_error("Cannot send start service status update"); } } log_info(PACKAGE"-"VERSION_STRING" started"); } catch(e) { log_exception(e); log_error("exiting..."); svc_status.dwCurrentState = SERVICE_STOPPED; SetServiceStatus(svc_status_handle, &svc_status); exit(e.code); } // mainloop nxlog_mainloop(&nxlog, FALSE); nxlog_shutdown(&nxlog); if ( nxlog.foreground != TRUE ) { // Signal back that we are stopped svc_status.dwCurrentState = SERVICE_STOPPED; SetServiceStatus(svc_status_handle, &svc_status); log_debug("service stopped"); } nxlog_exit_function(); }
// This should be called once for every thread, before it uses getLocalAPRFilePool. // static void LLVolatileAPRPool::createLocalAPRFilePool() { void* thread_local_data = new LLVolatileAPRPool; apr_status_t status = apr_threadkey_private_set(thread_local_data, sLocalAPRFilePoolKey); llassert_always(status == APR_SUCCESS); }
void VMCALL hythread_set_self(hythread_t thread) { apr_threadkey_private_set(thread, TM_THREAD_KEY); }