static void socket_io_init (SocketIOData *data) { int inited; if (data->inited >= 2) // 2 -> initialized, 3-> cleaned up return; inited = InterlockedCompareExchange (&data->inited, 1, 0); if (inited >= 1) { while (TRUE) { if (data->inited >= 2) return; SleepEx (1, FALSE); } } EnterCriticalSection (&data->io_lock); data->sock_to_state = mono_g_hash_table_new_type (g_direct_hash, g_direct_equal, MONO_HASH_VALUE_GC); #ifdef HAVE_EPOLL data->event_system = EPOLL_BACKEND; #elif defined(HAVE_KQUEUE) data->event_system = KQUEUE_BACKEND; #else data->event_system = POLL_BACKEND; #endif if (g_getenv ("MONO_DISABLE_AIO") != NULL) data->event_system = POLL_BACKEND; init_event_system (data); mono_thread_create_internal (mono_get_root_domain (), data->wait, data, TRUE, SMALL_STACK); LeaveCriticalSection (&data->io_lock); data->inited = 2; threadpool_start_thread (&async_io_tp); }
static void monitor_thread (gpointer unused) { ThreadPool *pools [2]; MonoInternalThread *thread; guint32 ms; gboolean need_one; int i; pools [0] = &async_tp; pools [1] = &async_io_tp; thread = mono_thread_internal_current (); ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor")); while (1) { ms = 500; i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing. do { guint32 ts; ts = mono_msec_ticks (); if (SleepEx (ms, TRUE) == 0) break; ms -= (mono_msec_ticks () - ts); if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } while (ms > 0 && i--); if (mono_runtime_is_shutting_down ()) break; if (suspended) continue; for (i = 0; i < 2; i++) { ThreadPool *tp; tp = pools [i]; if (tp->waiting > 0) continue; need_one = (mono_cq_count (tp->queue) > 0); if (!need_one && !tp->is_io) { EnterCriticalSection (&wsqs_lock); for (i = 0; wsqs != NULL && i < wsqs->len; i++) { MonoWSQ *wsq; wsq = g_ptr_array_index (wsqs, i); if (mono_wsq_count (wsq) != 0) { need_one = TRUE; break; } } LeaveCriticalSection (&wsqs_lock); } if (need_one) threadpool_start_thread (tp); } } }
static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs) { MonoObject *ar; gint i; if (mono_runtime_is_shutting_down ()) return; if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) { if (!tp->is_io) { monitor_internal_thread = mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK); monitor_internal_thread->flags |= MONO_THREAD_FLAG_DONT_MANAGE; threadpool_start_thread (tp); } /* Create on demand up to min_threads to avoid startup penalty for apps that don't use * the threadpool that much */ if (mono_config_is_server_mode ()) { mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK); } } InterlockedAdd (&monitor_njobs, njobs); if (monitor_state == MONITOR_STATE_SLEEPING && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_SLEEPING) == MONITOR_STATE_SLEEPING) MONO_SEM_POST (&monitor_sem); if (monitor_state == MONITOR_STATE_FALLING_ASLEEP) InterlockedCompareExchange (&monitor_state, MONITOR_STATE_AWAKE, MONITOR_STATE_FALLING_ASLEEP); for (i = 0; i < njobs; i++) { ar = jobs [i]; if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain)) continue; /* Might happen when cleaning domain jobs */ threadpool_jobs_inc (ar); #ifndef DISABLE_PERFCOUNTERS mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1); #endif if (!tp->is_io && mono_wsq_local_push (ar)) continue; mono_cq_enqueue (tp->queue, ar); } #if DEBUG InterlockedAdd (&tp->njobs, njobs); #endif for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++) pulse_on_new_job (tp); }
static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs) { static int job_counter; MonoObject *ar; gint i; if (mono_runtime_is_shutting_down ()) return; if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) { if (!tp->is_io) { mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK); threadpool_start_thread (tp); } /* Create on demand up to min_threads to avoid startup penalty for apps that don't use * the threadpool that much */ if (mono_config_is_server_mode ()) { mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK); } } for (i = 0; i < njobs; i++) { ar = jobs [i]; if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain)) continue; /* Might happen when cleaning domain jobs */ if (!tp->is_io && (InterlockedIncrement (&job_counter) % 10) == 0) { MonoAsyncResult *o = (MonoAsyncResult *) ar; o->add_time = mono_100ns_ticks (); } threadpool_jobs_inc (ar); #ifndef DISABLE_PERFCOUNTERS mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1); #endif if (!tp->is_io && mono_wsq_local_push (ar)) continue; mono_cq_enqueue (tp->queue, ar); } for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++) pulse_on_new_job (tp); }
static void monitor_thread (gpointer unused) { ThreadPool *pools [2]; MonoInternalThread *thread; int i; guint32 ms; gint8 num_waiting_iterations = 0; gint16 history_size = 0, current = -1; SamplesHistory *history = malloc (sizeof (SamplesHistory) * HISTORY_SIZE); pools [0] = &async_tp; pools [1] = &async_io_tp; thread = mono_thread_internal_current (); ves_icall_System_Threading_Thread_SetName_internal (thread, mono_string_new (mono_domain_get (), "Threadpool monitor")); while (1) { ms = SAMPLES_PERIOD; i = 10; //number of spurious awakes we tolerate before doing a round of rebalancing. do { guint32 ts; ts = mono_msec_ticks (); if (SleepEx (ms, TRUE) == 0) break; ms -= (mono_msec_ticks () - ts); if (mono_runtime_is_shutting_down ()) break; if (THREAD_WANTS_A_BREAK (thread)) mono_thread_interruption_checkpoint (); } while (ms > 0 && i--); if (mono_runtime_is_shutting_down ()) break; if (suspended) continue; /* threadpool is cleaning up */ if (async_tp.pool_status == 2 || async_io_tp.pool_status == 2) break; switch (monitor_state) { case MONITOR_STATE_AWAKE: num_waiting_iterations = 0; break; case MONITOR_STATE_FALLING_ASLEEP: if (++num_waiting_iterations == NUM_WAITING_ITERATIONS) { if (monitor_state == MONITOR_STATE_FALLING_ASLEEP && InterlockedCompareExchange (&monitor_state, MONITOR_STATE_SLEEPING, MONITOR_STATE_FALLING_ASLEEP) == MONITOR_STATE_FALLING_ASLEEP) { MONO_SEM_WAIT (&monitor_sem); num_waiting_iterations = 0; current = -1; history_size = 0; } } break; case MONITOR_STATE_SLEEPING: g_assert_not_reached (); } for (i = 0; i < 2; i++) { ThreadPool *tp; tp = pools [i]; if (tp->is_io) { if (!tp->waiting && mono_cq_count (tp->queue) > 0) threadpool_start_thread (tp); } else { gint8 nthreads_diff = monitor_heuristic (¤t, &history_size, history, tp); if (nthreads_diff == 1) threadpool_start_thread (tp); else if (nthreads_diff == -1) threadpool_kill_thread (tp); } } } }