static gboolean threadpool_start_thread (ThreadPool *tp) { gint n; guint32 stack_size; MonoInternalThread *thread; stack_size = (!tp->is_io) ? 0 : SMALL_STACK; while (!mono_runtime_is_shutting_down () && (n = tp->nthreads) < tp->max_threads) { if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n) { #ifndef DISABLE_PERFCOUNTERS mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1); #endif if (tp->is_io) { thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size); } else { mono_mutex_lock (&threads_lock); thread = mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size); g_assert (threads != NULL); g_ptr_array_add (threads, thread); mono_mutex_unlock (&threads_lock); } return TRUE; } } return FALSE; }
static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs) { MonoObject *ar; gint i; if (mono_runtime_is_shutting_down ()) return; if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) { if (!tp->is_io) { monitor_internal_thread = mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK); monitor_internal_thread->flags |= MONO_THREAD_FLAG_DONT_MANAGE; threadpool_start_thread (tp); } /* Create on demand up to min_threads to avoid startup penalty for apps that don't use * the threadpool that much */ if (mono_config_is_server_mode ()) { mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK); } } InterlockedAdd (&monitor_njobs, njobs); if (monitor_state == MONITOR_STATE_SLEEPING && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_SLEEPING) == MONITOR_STATE_SLEEPING) MONO_SEM_POST (&monitor_sem); if (monitor_state == MONITOR_STATE_FALLING_ASLEEP) InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_FALLING_ASLEEP); for (i = 0; i < njobs; i++) { ar = jobs [i]; if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain)) continue; /* Might happen when cleaning domain jobs */ threadpool_jobs_inc (ar); #ifndef DISABLE_PERFCOUNTERS mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1); #endif if (!tp->is_io && mono_wsq_local_push (ar)) continue; mono_cq_enqueue (tp->queue, ar); } #if DEBUG InterlockedAdd (&tp->njobs, njobs); #endif for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++) pulse_on_new_job (tp); }
static void initialize (void) { g_assert (!threadpool_io); threadpool_io = g_new0 (ThreadPoolIO, 1); g_assert (threadpool_io); mono_mutex_init_recursive (&threadpool_io->updates_lock); mono_cond_init (&threadpool_io->updates_cond, 0); mono_gc_register_root ((void*)&threadpool_io->updates [0], sizeof (threadpool_io->updates), MONO_GC_DESCRIPTOR_NULL, MONO_ROOT_SOURCE_THREAD_POOL, "i/o thread pool updates list"); threadpool_io->updates_size = 0; threadpool_io->backend = backend_poll; if (g_getenv ("MONO_ENABLE_AIO") != NULL) { #if defined(HAVE_EPOLL) threadpool_io->backend = backend_epoll; #elif defined(HAVE_KQUEUE) threadpool_io->backend = backend_kqueue; #endif } wakeup_pipes_init (); if (!threadpool_io->backend.init (threadpool_io->wakeup_pipes [0])) g_error ("initialize: backend->init () failed"); if (!mono_thread_create_internal (mono_get_root_domain (), selector_thread, NULL, TRUE, SMALL_STACK)) g_error ("initialize: mono_thread_create_internal () failed"); }
static void socket_io_init (SocketIOData *data) { int inited; if (data->inited >= 2) // 2 -> initialized, 3-> cleaned up return; inited = InterlockedCompareExchange (&data->inited, 1, 0); if (inited >= 1) { while (TRUE) { if (data->inited >= 2) return; SleepEx (1, FALSE); } } EnterCriticalSection (&data->io_lock); data->sock_to_state = mono_g_hash_table_new_type (g_direct_hash, g_direct_equal, MONO_HASH_VALUE_GC); #ifdef HAVE_EPOLL data->event_system = EPOLL_BACKEND; #elif defined(HAVE_KQUEUE) data->event_system = KQUEUE_BACKEND; #else data->event_system = POLL_BACKEND; #endif if (g_getenv ("MONO_DISABLE_AIO") != NULL) data->event_system = POLL_BACKEND; init_event_system (data); mono_thread_create_internal (mono_get_root_domain (), data->wait, data, TRUE, SMALL_STACK); LeaveCriticalSection (&data->io_lock); data->inited = 2; threadpool_start_thread (&async_io_tp); }
static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs) { static int job_counter; MonoObject *ar; gint i; if (mono_runtime_is_shutting_down ()) return; if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) { if (!tp->is_io) { mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK); threadpool_start_thread (tp); } /* Create on demand up to min_threads to avoid startup penalty for apps that don't use * the threadpool that much */ if (mono_config_is_server_mode ()) { mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK); } } for (i = 0; i < njobs; i++) { ar = jobs [i]; if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain)) continue; /* Might happen when cleaning domain jobs */ if (!tp->is_io && (InterlockedIncrement (&job_counter) % 10) == 0) { MonoAsyncResult *o = (MonoAsyncResult *) ar; o->add_time = mono_100ns_ticks (); } threadpool_jobs_inc (ar); #ifndef DISABLE_PERFCOUNTERS mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1); #endif if (!tp->is_io && mono_wsq_local_push (ar)) continue; mono_cq_enqueue (tp->queue, ar); } for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++) pulse_on_new_job (tp); }
MonoBoolean ves_icall_System_Threading_ThreadPool_SetMinThreads (gint workerThreads, gint completionPortThreads) { gint max_threads; gint max_io_threads; max_threads = async_tp.max_threads; if (workerThreads <= 0 || workerThreads > max_threads) return FALSE; max_io_threads = async_io_tp.max_threads; if (completionPortThreads <= 0 || completionPortThreads > max_io_threads) return FALSE; InterlockedExchange (&async_tp.min_threads, workerThreads); InterlockedExchange (&async_io_tp.min_threads, completionPortThreads); if (workerThreads > async_tp.nthreads) mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, &async_tp, TRUE, SMALL_STACK); if (completionPortThreads > async_io_tp.nthreads) mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, &async_io_tp, TRUE, SMALL_STACK); return TRUE; }
static gboolean threadpool_start_thread (ThreadPool *tp) { gint n; guint32 stack_size; stack_size = (!tp->is_io) ? 0 : SMALL_STACK; while (!mono_runtime_is_shutting_down () && (n = tp->nthreads) < tp->max_threads) { if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n) { mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1); mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size); return TRUE; } } return FALSE; }
void mono_runtime_setup_stat_profiler (void) { /* * Use a real-time signal when possible. This gives us roughly a 99% signal * delivery rate in all cases. On the other hand, using a regular signal * tends to result in awful delivery rates when the application is heavily * loaded. * * We avoid real-time signals on Android as they're super broken in certain * API levels (too small sigset_t, nonsensical SIGRTMIN/SIGRTMAX values, * etc). * * TODO: On Mac, we should explore using the Mach thread suspend/resume * functions and doing the stack walk from the sampling thread. This would * get us a 100% sampling rate. However, this may interfere with the GC's * STW logic. Could perhaps be solved by taking the suspend lock. */ #if defined (USE_POSIX_BACKEND) && defined (SIGRTMIN) && !defined (HOST_ANDROID) /* Just take the first real-time signal we can get. */ profiler_signal = mono_threads_suspend_search_alternative_signal (); #else profiler_signal = SIGPROF; #endif add_signal_handler (profiler_signal, profiler_signal_handler, SA_RESTART); mono_counters_register ("Sampling signals sent", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &profiler_signals_sent); mono_counters_register ("Sampling signals received", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &profiler_signals_received); mono_counters_register ("Sampling signals accepted", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &profiler_signals_accepted); mono_counters_register ("Shutdown signals received", MONO_COUNTER_UINT | MONO_COUNTER_PROFILER | MONO_COUNTER_MONOTONIC, &profiler_interrupt_signals_received); mono_os_event_init (&sampling_thread_exited, FALSE); mono_atomic_store_i32 (&sampling_thread_running, 1); MonoError error; MonoInternalThread *thread = mono_thread_create_internal (mono_get_root_domain (), sampling_thread_func, NULL, MONO_THREAD_CREATE_FLAGS_NONE, &error); mono_error_assert_ok (&error); sampling_thread = MONO_UINT_TO_NATIVE_THREAD_ID (thread->tid); }
static void threadpool_start_idle_threads (ThreadPool *tp) { int n; guint32 stack_size; stack_size = (!tp->is_io) ? 0 : SMALL_STACK; do { while (1) { n = tp->nthreads; if (n >= tp->min_threads) return; if (InterlockedCompareExchange (&tp->nthreads, n + 1, n) == n) break; } mono_perfcounter_update_value (tp->pc_nthreads, TRUE, 1); mono_thread_create_internal (mono_get_root_domain (), tp->async_invoke, tp, TRUE, stack_size); SleepEx (100, TRUE); } while (1); }