static void initialize (void) { g_assert (!threadpool_io); threadpool_io = g_new0 (ThreadPoolIO, 1); g_assert (threadpool_io); mono_mutex_init_recursive (&threadpool_io->updates_lock); mono_cond_init (&threadpool_io->updates_cond, 0); mono_gc_register_root ((void*)&threadpool_io->updates [0], sizeof (threadpool_io->updates), MONO_GC_DESCRIPTOR_NULL, MONO_ROOT_SOURCE_THREAD_POOL, "i/o thread pool updates list"); threadpool_io->updates_size = 0; threadpool_io->backend = backend_poll; if (g_getenv ("MONO_ENABLE_AIO") != NULL) { #if defined(HAVE_EPOLL) threadpool_io->backend = backend_epoll; #elif defined(HAVE_KQUEUE) threadpool_io->backend = backend_kqueue; #endif } wakeup_pipes_init (); if (!threadpool_io->backend.init (threadpool_io->wakeup_pipes [0])) g_error ("initialize: backend->init () failed"); if (!mono_thread_create_internal (mono_get_root_domain (), selector_thread, NULL, TRUE, SMALL_STACK)) g_error ("initialize: mono_thread_create_internal () failed"); }
static void* codechunk_valloc (void *preferred, guint32 size) { void *ptr; GSList *freelist; if (!valloc_freelists) { mono_mutex_init_recursive (&valloc_mutex); valloc_freelists = g_hash_table_new (NULL, NULL); } /* * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321. */ mono_mutex_lock (&valloc_mutex); freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size)); if (freelist) { ptr = freelist->data; memset (ptr, 0, size); freelist = g_slist_delete_link (freelist, freelist); g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist); } else { ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS); if (!ptr && preferred) ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS); } mono_mutex_unlock (&valloc_mutex); return ptr; }
void mono_unwind_init (void) { mono_mutex_init_recursive (&unwind_mutex); mono_counters_register ("Unwind info size", MONO_COUNTER_JIT | MONO_COUNTER_INT, &unwind_info_size); }
/** * mono_profiler_install: * @prof: a MonoProfiler structure pointer, or a pointer to a derived structure. * @callback: the function to invoke at shutdown * * Use mono_profiler_install to activate profiling in the Mono runtime. * Typically developers of new profilers will create a new structure whose * first field is a MonoProfiler and put any extra information that they need * to access from the various profiling callbacks there. * */ void mono_profiler_install (MonoProfiler *prof, MonoProfileFunc callback) { ProfilerDesc *desc = g_new0 (ProfilerDesc, 1); if (!prof_list) mono_mutex_init_recursive (&profiler_coverage_mutex); desc->profiler = prof; desc->shutdown_callback = callback; desc->next = prof_list; prof_list = desc; }
void mono_tasklets_init (void) { mono_mutex_init_recursive (&tasklets_mutex); mono_add_internal_call ("Mono.Tasklets.Continuation::alloc", continuation_alloc); mono_add_internal_call ("Mono.Tasklets.Continuation::free", continuation_free); mono_add_internal_call ("Mono.Tasklets.Continuation::mark", continuation_mark_frame); mono_add_internal_call ("Mono.Tasklets.Continuation::store", continuation_store); mono_add_internal_call ("Mono.Tasklets.Continuation::restore", continuation_restore); }
void mono_thread_smr_init (void) { int i; mono_mutex_init_recursive(&small_id_mutex); mono_counters_register ("Hazardous pointers", MONO_COUNTER_JIT | MONO_COUNTER_INT, &hazardous_pointer_count); for (i = 0; i < HAZARD_TABLE_OVERFLOW; ++i) { int small_id = mono_thread_small_id_alloc (); g_assert (small_id == i); } }
void sgen_section_gray_queue_init (SgenSectionGrayQueue *queue, gboolean locked, GrayQueueEnqueueCheckFunc enqueue_check_func) { g_assert (sgen_section_gray_queue_is_empty (queue)); queue->locked = locked; if (locked) { mono_mutex_init_recursive (&queue->lock); } #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE queue->enqueue_check_func = enqueue_check_func; #endif }
static void mono_backtrace (int size) { void *array[BACKTRACE_DEPTH]; char **names; int i, symbols; static gboolean inited; if (!inited) { mono_mutex_init_recursive (&mempool_tracing_lock); inited = TRUE; } mono_mutex_lock (&mempool_tracing_lock); g_print ("Allocating %d bytes\n", size); symbols = backtrace (array, BACKTRACE_DEPTH); names = backtrace_symbols (array, symbols); for (i = 1; i < symbols; ++i) { g_print ("\t%s\n", names [i]); } free (names); mono_mutex_unlock (&mempool_tracing_lock); }
static void ensure_initialized (gboolean *enable_worker_tracking) { ThreadPoolHillClimbing *hc; const char *threads_per_cpu_env; gint threads_per_cpu; gint threads_count; if (enable_worker_tracking) { // TODO implement some kind of switch to have the possibily to use it *enable_worker_tracking = FALSE; } if (status >= STATUS_INITIALIZED) return; if (status == STATUS_INITIALIZING || InterlockedCompareExchange (&status, STATUS_INITIALIZING, STATUS_NOT_INITIALIZED) != STATUS_NOT_INITIALIZED) { while (status == STATUS_INITIALIZING) mono_thread_info_yield (); g_assert (status >= STATUS_INITIALIZED); return; } g_assert (!threadpool); threadpool = g_new0 (ThreadPool, 1); g_assert (threadpool); threadpool->domains = g_ptr_array_new (); mono_mutex_init_recursive (&threadpool->domains_lock); threadpool->parked_threads = g_ptr_array_new (); mono_mutex_init (&threadpool->parked_threads_lock); threadpool->working_threads = g_ptr_array_new (); mono_mutex_init (&threadpool->working_threads_lock); threadpool->heuristic_adjustment_interval = 10; mono_mutex_init (&threadpool->heuristic_lock); mono_rand_open (); hc = &threadpool->heuristic_hill_climbing; hc->wave_period = HILL_CLIMBING_WAVE_PERIOD; hc->max_thread_wave_magnitude = HILL_CLIMBING_MAX_WAVE_MAGNITUDE; hc->thread_magnitude_multiplier = (gdouble) HILL_CLIMBING_WAVE_MAGNITUDE_MULTIPLIER; hc->samples_to_measure = hc->wave_period * HILL_CLIMBING_WAVE_HISTORY_SIZE; hc->target_throughput_ratio = (gdouble) HILL_CLIMBING_BIAS; hc->target_signal_to_noise_ratio = (gdouble) HILL_CLIMBING_TARGET_SIGNAL_TO_NOISE_RATIO; hc->max_change_per_second = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SECOND; hc->max_change_per_sample = (gdouble) HILL_CLIMBING_MAX_CHANGE_PER_SAMPLE; hc->sample_interval_low = HILL_CLIMBING_SAMPLE_INTERVAL_LOW; hc->sample_interval_high = HILL_CLIMBING_SAMPLE_INTERVAL_HIGH; hc->throughput_error_smoothing_factor = (gdouble) HILL_CLIMBING_ERROR_SMOOTHING_FACTOR; hc->gain_exponent = (gdouble) HILL_CLIMBING_GAIN_EXPONENT; hc->max_sample_error = (gdouble) HILL_CLIMBING_MAX_SAMPLE_ERROR_PERCENT; hc->current_control_setting = 0; hc->total_samples = 0; hc->last_thread_count = 0; hc->average_throughput_noise = 0; hc->elapsed_since_last_change = 0; hc->accumulated_completion_count = 0; hc->accumulated_sample_duration = 0; hc->samples = g_new0 (gdouble, hc->samples_to_measure); hc->thread_counts = g_new0 (gdouble, hc->samples_to_measure); hc->random_interval_generator = rand_create (); hc->current_sample_interval = rand_next (&hc->random_interval_generator, hc->sample_interval_low, hc->sample_interval_high); if (!(threads_per_cpu_env = g_getenv ("MONO_THREADS_PER_CPU"))) threads_per_cpu = 1; else threads_per_cpu = CLAMP (atoi (threads_per_cpu_env), 1, 50); threads_count = mono_cpu_count () * threads_per_cpu; threadpool->limit_worker_min = threadpool->limit_io_min = threads_count; threadpool->limit_worker_max = threadpool->limit_io_max = threads_count * 100; threadpool->counters._.max_working = threadpool->limit_worker_min; threadpool->cpu_usage_state = g_new0 (MonoCpuUsageState, 1); threadpool->suspended = FALSE; status = STATUS_INITIALIZED; }
void mono_thread_pool_init (void) { gint threads_per_cpu = 1; gint thread_count; gint cpu_count = mono_cpu_count (); int result; if (tp_inited == 2) return; result = InterlockedCompareExchange (&tp_inited, 1, 0); if (result == 1) { while (1) { SleepEx (1, FALSE); if (tp_inited == 2) return; } } MONO_GC_REGISTER_ROOT_FIXED (socket_io_data.sock_to_state); mono_mutex_init_recursive (&socket_io_data.io_lock); if (g_getenv ("MONO_THREADS_PER_CPU") != NULL) { threads_per_cpu = atoi (g_getenv ("MONO_THREADS_PER_CPU")); if (threads_per_cpu < 1) threads_per_cpu = 1; } thread_count = MIN (cpu_count * threads_per_cpu, 100 * cpu_count); threadpool_init (&async_tp, thread_count, MAX (100 * cpu_count, thread_count), async_invoke_thread); threadpool_init (&async_io_tp, cpu_count * 2, cpu_count * 4, async_invoke_thread); async_io_tp.is_io = TRUE; async_call_klass = mono_class_from_name (mono_defaults.corlib, "System", "MonoAsyncCall"); g_assert (async_call_klass); mono_mutex_init (&threads_lock); threads = g_ptr_array_sized_new (thread_count); g_assert (threads); mono_mutex_init_recursive (&wsqs_lock); wsqs = g_ptr_array_sized_new (MAX (100 * cpu_count, thread_count)); #ifndef DISABLE_PERFCOUNTERS async_tp.pc_nitems = init_perf_counter ("Mono Threadpool", "Work Items Added"); g_assert (async_tp.pc_nitems); async_io_tp.pc_nitems = init_perf_counter ("Mono Threadpool", "IO Work Items Added"); g_assert (async_io_tp.pc_nitems); async_tp.pc_nthreads = init_perf_counter ("Mono Threadpool", "# of Threads"); g_assert (async_tp.pc_nthreads); async_io_tp.pc_nthreads = init_perf_counter ("Mono Threadpool", "# of IO Threads"); g_assert (async_io_tp.pc_nthreads); #endif tp_inited = 2; #ifdef DEBUG signal (SIGALRM, signal_handler); alarm (2); #endif MONO_SEM_INIT (&monitor_sem, 0); monitor_state = MONITOR_STATE_AWAKE; monitor_njobs = 0; }
void mono_gc_base_init (void) { MonoThreadInfoCallbacks cb; const char *env; int dummy; if (gc_initialized) return; mono_counters_init (); /* * Handle the case when we are called from a thread different from the main thread, * confusing libgc. * FIXME: Move this to libgc where it belongs. * * we used to do this only when running on valgrind, * but it happens also in other setups. */ #if defined(HAVE_PTHREAD_GETATTR_NP) && defined(HAVE_PTHREAD_ATTR_GETSTACK) && !defined(__native_client__) { size_t size; void *sstart; pthread_attr_t attr; pthread_getattr_np (pthread_self (), &attr); pthread_attr_getstack (&attr, &sstart, &size); pthread_attr_destroy (&attr); /*g_print ("stackbottom pth is: %p\n", (char*)sstart + size);*/ #ifdef __ia64__ /* * The calculation above doesn't seem to work on ia64, also we need to set * GC_register_stackbottom as well, but don't know how. */ #else /* apparently with some linuxthreads implementations sstart can be NULL, * fallback to the more imprecise method (bug# 78096). */ if (sstart) { GC_stackbottom = (char*)sstart + size; } else { int dummy; gsize stack_bottom = (gsize)&dummy; stack_bottom += 4095; stack_bottom &= ~4095; GC_stackbottom = (char*)stack_bottom; } #endif } #elif defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP) GC_stackbottom = (char*)pthread_get_stackaddr_np (pthread_self ()); #elif defined(__OpenBSD__) # include <pthread_np.h> { stack_t ss; int rslt; rslt = pthread_stackseg_np(pthread_self(), &ss); g_assert (rslt == 0); GC_stackbottom = (char*)ss.ss_sp; } #elif defined(__native_client__) /* Do nothing, GC_stackbottom is set correctly in libgc */ #else { int dummy; gsize stack_bottom = (gsize)&dummy; stack_bottom += 4095; stack_bottom &= ~4095; /*g_print ("stackbottom is: %p\n", (char*)stack_bottom);*/ GC_stackbottom = (char*)stack_bottom; } #endif #if !defined(PLATFORM_ANDROID) /* If GC_no_dls is set to true, GC_find_limit is not called. This causes a seg fault on Android. */ GC_no_dls = TRUE; #endif { if ((env = g_getenv ("MONO_GC_DEBUG"))) { char **opts = g_strsplit (env, ",", -1); for (char **ptr = opts; ptr && *ptr; ptr ++) { char *opt = *ptr; if (!strcmp (opt, "do-not-finalize")) { mono_do_not_finalize = 1; } else if (!strcmp (opt, "log-finalizers")) { log_finalizers = 1; } } } } GC_init (); GC_oom_fn = mono_gc_out_of_memory; GC_set_warn_proc (mono_gc_warning); GC_finalize_on_demand = 1; GC_finalizer_notifier = mono_gc_finalize_notify; GC_init_gcj_malloc (5, NULL); if ((env = g_getenv ("MONO_GC_PARAMS"))) { char **ptr, **opts = g_strsplit (env, ",", -1); for (ptr = opts; *ptr; ++ptr) { char *opt = *ptr; if (g_str_has_prefix (opt, "max-heap-size=")) { size_t max_heap; opt = strchr (opt, '=') + 1; if (*opt && mono_gc_parse_environment_string_extract_number (opt, &max_heap)) { if (max_heap < MIN_BOEHM_MAX_HEAP_SIZE) { fprintf (stderr, "max-heap-size must be at least %dMb.\n", MIN_BOEHM_MAX_HEAP_SIZE_IN_MB); exit (1); } GC_set_max_heap_size (max_heap); } else { fprintf (stderr, "max-heap-size must be an integer.\n"); exit (1); } continue; } else if (g_str_has_prefix (opt, "toggleref-test")) { register_test_toggleref_callback (); continue; } else { /* Could be a parameter for sgen */ /* fprintf (stderr, "MONO_GC_PARAMS must be a comma-delimited list of one or more of the following:\n"); fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n"); exit (1); */ } } g_strfreev (opts); } memset (&cb, 0, sizeof (cb)); cb.thread_register = boehm_thread_register; cb.thread_unregister = boehm_thread_unregister; cb.mono_method_is_critical = (gpointer)mono_runtime_is_critical_method; mono_threads_init (&cb, sizeof (MonoThreadInfo)); mono_mutex_init (&mono_gc_lock); mono_mutex_init_recursive (&handle_section); mono_thread_info_attach (&dummy); mono_gc_enable_events (); MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_NORMAL].entries, MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table"); MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_PINNED].entries, MONO_ROOT_SOURCE_GC_HANDLE, "gc handles table"); gc_initialized = TRUE; }
void mono_debugger_initialize () { mono_mutex_init_recursive (&debugger_lock_mutex); initialized = 1; }