static void monitor_thread (gpointer unused) { ThreadPool *pools [2]; MonoInternalThread *thread; guint32 ms; gboolean need_one; int i; pools [0] = &async_tp; pools [1] = &async_io_tp; thread = mono_thread_internal_current (); ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor")); while (1) { ms = 500; i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing. do { guint32 ts; ts = mono_msec_ticks (); if (SleepEx (ms, TRUE) == 0) break; ms -= (mono_msec_ticks () - ts); if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } while (ms > 0 && i--); if (mono_runtime_is_shutting_down ()) break; if (suspended) continue; for (i = 0; i < 2; i++) { ThreadPool *tp; tp = pools [i]; if (tp->waiting > 0) continue; need_one = (mono_cq_count (tp->queue) > 0); if (!need_one && !tp->is_io) { EnterCriticalSection (&wsqs_lock); for (i = 0; wsqs != NULL && i < wsqs->len; i++) { MonoWSQ *wsq; wsq = g_ptr_array_index (wsqs, i); if (mono_wsq_count (wsq) != 0) { need_one = TRUE; break; } } LeaveCriticalSection (&wsqs_lock); } if (need_one) threadpool_start_thread (tp); } } }
static void async_invoke_thread (gpointer data) { MonoDomain *domain; MonoInternalThread *thread; MonoWSQ *wsq; ThreadPool *tp; gboolean must_die; const gchar *name; tp = data; wsq = NULL; if (!tp->is_io) wsq = add_wsq (); thread = mono_thread_internal_current (); mono_profiler_thread_start (thread->tid); name = (tp->is_io) ? "IO Threadpool worker" : "Threadpool worker"; mono_thread_set_name_internal (thread, mono_string_new (mono_domain_get (), name), FALSE); if (tp_start_func) tp_start_func (tp_hooks_user_data); data = NULL; for (;;) { MonoAsyncResult *ar; MonoClass *klass; gboolean is_io_task; gboolean is_socket; int n_naps = 0; is_io_task = FALSE; ar = (MonoAsyncResult *) data; if (ar) { InterlockedIncrement (&tp->busy_threads); domain = ((MonoObject *)ar)->vtable->domain; #ifndef DISABLE_SOCKETS klass = ((MonoObject *) data)->vtable->klass; is_io_task = !is_corlib_asyncresult (domain, klass); is_socket = FALSE; if (is_io_task) { MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data; is_socket = is_socketasyncresult (domain, klass); ar = state->ares; switch (state->operation) { case AIO_OP_RECEIVE: state->total = ICALL_RECV (state); break; case AIO_OP_SEND: state->total = ICALL_SEND (state); break; } } #endif /* worker threads invokes methods in different domains, * so we need to set the right domain here */ g_assert (domain); if (mono_domain_is_unloading (domain) || mono_runtime_is_shutting_down ()) { threadpool_jobs_dec ((MonoObject *)ar); data = NULL; ar = NULL; InterlockedDecrement (&tp->busy_threads); } else { mono_thread_push_appdomain_ref (domain); if (threadpool_jobs_dec ((MonoObject *)ar)) { data = NULL; ar = NULL; mono_thread_pop_appdomain_ref (); InterlockedDecrement (&tp->busy_threads); continue; } if (mono_domain_set (domain, FALSE)) { MonoObject *exc; if (tp_item_begin_func) tp_item_begin_func (tp_item_user_data); if (!is_io_task && ar->add_time > 0) process_idle_times (tp, ar->add_time); exc = mono_async_invoke (tp, ar); if (tp_item_end_func) tp_item_end_func (tp_item_user_data); if (exc) mono_internal_thread_unhandled_exception (exc); if (is_socket && tp->is_io) { MonoSocketAsyncResult *state = (MonoSocketAsyncResult *) data; if (state->completed && state->callback) { MonoAsyncResult *cb_ares; cb_ares = create_simple_asyncresult ((MonoObject *) state->callback, (MonoObject *) state); icall_append_job ((MonoObject *) cb_ares); } } mono_domain_set (mono_get_root_domain (), TRUE); } mono_thread_pop_appdomain_ref (); InterlockedDecrement (&tp->busy_threads); /* If the callee changes the background status, set it back to TRUE */ mono_thread_clr_state (thread , ~ThreadState_Background); if (!mono_thread_test_state (thread , ThreadState_Background)) ves_icall_System_Threading_Thread_SetState (thread, ThreadState_Background); } } ar = NULL; data = NULL; must_die = should_i_die (tp); if (!must_die && (tp->is_io || !mono_wsq_local_pop (&data))) dequeue_or_steal (tp, &data, wsq); n_naps = 0; while (!must_die && !data && n_naps < 4) { gboolean res; InterlockedIncrement (&tp->waiting); // Another thread may have added a job into its wsq since the last call to dequeue_or_steal // Check all the queues again before entering the wait loop dequeue_or_steal (tp, &data, wsq); if (data) { InterlockedDecrement (&tp->waiting); break; } mono_gc_set_skip_thread (TRUE); #if defined(__OpenBSD__) while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_wait (&tp->new_job, TRUE)) == -1) {// && errno == EINTR) { #else while (mono_cq_count (tp->queue) == 0 && (res = mono_sem_timedwait (&tp->new_job, 2000, TRUE)) == -1) {// && errno == EINTR) { #endif if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } InterlockedDecrement (&tp->waiting); mono_gc_set_skip_thread (FALSE); if (mono_runtime_is_shutting_down ()) break; must_die = should_i_die (tp); dequeue_or_steal (tp, &data, wsq); n_naps++; } if (!data && !tp->is_io && !mono_runtime_is_shutting_down ()) { mono_wsq_local_pop (&data); if (data && must_die) { InterlockedCompareExchange (&tp->destroy_thread, 1, 0); pulse_on_new_job (tp); } } if (!data) { gint nt; gboolean down; while (1) { nt = tp->nthreads; down = mono_runtime_is_shutting_down (); if (!down && nt <= tp->min_threads) break; if (down || InterlockedCompareExchange (&tp->nthreads, nt - 1, nt) == nt) { mono_perfcounter_update_value (tp->pc_nthreads, TRUE, -1); if (!tp->is_io) { remove_wsq (wsq); } mono_profiler_thread_end (thread->tid); if (tp_finish_func) tp_finish_func (tp_hooks_user_data); return; } } } } g_assert_not_reached (); } void ves_icall_System_Threading_ThreadPool_GetAvailableThreads (gint *workerThreads, gint *completionPortThreads) { *workerThreads = async_tp.max_threads - async_tp.busy_threads; *completionPortThreads = async_io_tp.max_threads - async_io_tp.busy_threads; }
static void monitor_thread (gpointer unused) { ThreadPool *pools [2]; MonoInternalThread *thread; int i; guint32 ms; gint8 num_waiting_iterations = 0; gint16 history_size = 0, current = -1; SamplesHistory *history = malloc (sizeof (SamplesHistory) * HISTORY_SIZE); pools [0] = &async_tp; pools [1] = &async_io_tp; thread = mono_thread_internal_current (); ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor")); while (1) { ms = SAMPLES_PERIOD; i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing. do { guint32 ts; ts = mono_msec_ticks (); if (SleepEx (ms, TRUE) == 0) break; ms -= (mono_msec_ticks () - ts); if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } while (ms > 0 && i--); if (mono_runtime_is_shutting_down ()) break; if (suspended) continue; /* threadpool is cleaning up */ if (async_tp.pool_status == 2 || async_io_tp.pool_status == 2) break; switch (monitor_state) { case MONITOR_STATE_AWAKE: num_waiting_iterations = 0; break; case MONITOR_STATE_FALLING_ASLEEP: if (++num_waiting_iterations == NUM_WAITING_ITERATIONS) { if (monitor_state == MONITOR_STATE_FALLING_ASLEEP && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_SLEEPING, MONITOR_STATE_FALLING_ASLEEP) == MONITOR_STATE_FALLING_ASLEEP) { MONO_SEM_WAIT (&monitor_sem); num_waiting_iterations = 0; current = -1; history_size = 0; } } break; case MONITOR_STATE_SLEEPING: g_assert_not_reached (); } for (i = 0; i < 2; i++) { ThreadPool *tp; tp = pools [i]; if (tp->is_io) { if (!tp->waiting && mono_cq_count (tp->queue) > 0) threadpool_start_thread (tp); } else { gint8 nthreads_diff = monitor_heuristic (¤t, &history_size, history, tp); if (nthreads_diff == 1) threadpool_start_thread (tp); else if (nthreads_diff == -1) threadpool_kill_thread (tp); } } } }