MonoStringHandle mono_string_new_handle (MonoDomain *domain, const char *data, MonoError *error) { return MONO_HANDLE_NEW (MonoString, mono_string_new_checked (domain, data, error)); }
static gsize sampling_thread_func (gpointer unused) { MonoInternalThread *thread = mono_thread_internal_current (); thread->flags |= MONO_THREAD_FLAG_DONT_MANAGE; ERROR_DECL (error); MonoString *name = mono_string_new_checked (mono_get_root_domain (), "Profiler Sampler", error); mono_error_assert_ok (error); mono_thread_set_name_internal (thread, name, FALSE, FALSE, error); mono_error_assert_ok (error); mono_thread_info_set_flags (MONO_THREAD_INFO_FLAGS_NO_GC | MONO_THREAD_INFO_FLAGS_NO_SAMPLE); int old_policy; struct sched_param old_sched; pthread_getschedparam (pthread_self (), &old_policy, &old_sched); /* * Attempt to switch the thread to real time scheduling. This will not * necessarily work on all OSs; for example, most Linux systems will give * us EPERM here unless configured to allow this. * * TODO: This does not work on Mac (and maybe some other OSs). On Mac, we * have to use the Mach thread policy routines to switch to real-time * scheduling. This is quite tricky as we need to specify how often we'll * be doing work (easy), the normal processing time needed (also easy), * and the maximum amount of processing time needed (hard). This is * further complicated by the fact that if we misbehave and take too long * to do our work, the kernel may knock us back down to the normal thread * scheduling policy without telling us. */ struct sched_param sched = { .sched_priority = sched_get_priority_max (SCHED_FIFO) }; pthread_setschedparam (pthread_self (), SCHED_FIFO, &sched); MonoProfilerSampleMode mode; init: mono_profiler_get_sample_mode (NULL, &mode, NULL); if (mode == MONO_PROFILER_SAMPLE_MODE_NONE) { mono_profiler_sampling_thread_wait (); if (!mono_atomic_load_i32 (&sampling_thread_running)) goto done; goto init; } clock_init (mode); for (guint64 sleep = clock_get_time_ns (); mono_atomic_load_i32 (&sampling_thread_running); clock_sleep_ns_abs (sleep)) { uint32_t freq; MonoProfilerSampleMode new_mode; mono_profiler_get_sample_mode (NULL, &new_mode, &freq); if (new_mode != mode) { clock_cleanup (); goto init; } sleep += 1000000000 / freq; FOREACH_THREAD_SAFE_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_SAMPLE) { g_assert (mono_thread_info_get_tid (info) != sampling_thread); /* * Require an ack for the last sampling signal sent to the thread * so that we don't overflow the signal queue, leading to all sorts * of problems (e.g. GC STW failing). */ if (profiler_signal != SIGPROF && !mono_atomic_cas_i32 (&info->profiler_signal_ack, 0, 1)) continue; mono_threads_pthread_kill (info, profiler_signal); mono_atomic_inc_i32 (&profiler_signals_sent); } FOREACH_THREAD_SAFE_END }