Ejemplo n.º 1
0
static void
clock_sleep_ns_abs (guint64 ns_abs)
{
#ifdef HAVE_CLOCK_NANOSLEEP
	int ret;
	struct timespec then;

	then.tv_sec = ns_abs / 1000000000;
	then.tv_nsec = ns_abs % 1000000000;

	do {
		ret = clock_nanosleep (sampling_posix_clock, TIMER_ABSTIME, &then, NULL);

		if (ret != 0 && ret != EINTR)
			g_error ("%s: clock_nanosleep () returned %d", __func__, ret);
	} while (ret == EINTR && mono_atomic_load_i32 (&sampling_thread_running));
#else
	int ret;
	gint64 diff;
	struct timespec req;

	/*
	 * What follows is a crude attempt at emulating clock_nanosleep () on OSs
	 * which don't provide it (e.g. FreeBSD).
	 *
	 * The problem with nanosleep () is that if it is interrupted by a signal,
	 * time will drift as a result of having to restart the call after the
	 * signal handler has finished. For this reason, we avoid using the rem
	 * argument of nanosleep (). Instead, before every nanosleep () call, we
	 * check if enough time has passed to satisfy the sleep request. If yes, we
	 * simply return. If not, we calculate the difference and do another sleep.
	 *
	 * This should reduce the amount of drift that happens because we account
	 * for the time spent executing the signal handler, which nanosleep () is
	 * not guaranteed to do for the rem argument.
	 *
	 * The downside to this approach is that it is slightly expensive: We have
	 * to make an extra system call to retrieve the current time whenever we're
	 * going to restart a nanosleep () call. This is unlikely to be a problem
	 * in practice since the sampling thread won't be receiving many signals in
	 * the first place (it's a tools thread, so no STW), and because typical
	 * sleep periods for the thread are many orders of magnitude bigger than
	 * the time it takes to actually perform that system call (just a few
	 * nanoseconds).
	 */
	do {
		diff = (gint64) ns_abs - (gint64) clock_get_time_ns ();

		if (diff <= 0)
			break;

		req.tv_sec = diff / 1000000000;
		req.tv_nsec = diff % 1000000000;

		if ((ret = nanosleep (&req, NULL)) == -1 && errno != EINTR)
			g_error ("%s: nanosleep () returned -1, errno = %d", __func__, errno);
	} while (ret == -1 && mono_atomic_load_i32 (&sampling_thread_running));
#endif
}
Ejemplo n.º 2
0
static inline void
request_interrupt (gpointer thread_info, HANDLE native_thread_handle, gint32 pending_apc_slot, PAPCFUNC apc_callback, DWORD tid)
{
	/*
	* On Windows platforms, an async interrupt/abort request queues an APC
	* that needs to be processed by target thread before it can return from an
	* alertable OS wait call and complete the mono interrupt/abort request.
	* Uncontrolled queuing of APC's could flood the APC queue preventing the target thread
	* to return from its alertable OS wait call, blocking the interrupt/abort requests to complete
	* This check makes sure that only one APC per type gets queued, preventing potential flooding
	* of the APC queue. NOTE, this code will execute regardless if targeted thread is currently in
	* an alertable wait or not. This is done to prevent races between interrupt/abort requests and
	* alertable wait calls. Threads already in an alertable wait should handle WAIT_IO_COMPLETION
	* return scenarios and restart the alertable wait operation if needed or take other actions
	* (like service the interrupt/abort request).
	*/
	MonoThreadInfo *info = (MonoThreadInfo *)thread_info;
	gint32 old_wait_info, new_wait_info;

	do {
		old_wait_info = mono_atomic_load_i32 (&info->thread_wait_info);
		if (old_wait_info & pending_apc_slot)
			return;

		new_wait_info = old_wait_info | pending_apc_slot;
	} while (mono_atomic_cas_i32 (&info->thread_wait_info, new_wait_info, old_wait_info) != old_wait_info);

	THREADS_INTERRUPT_DEBUG ("%06d - Interrupting/Aborting syscall in thread %06d", GetCurrentThreadId (), tid);
	QueueUserAPC (apc_callback, native_thread_handle, (ULONG_PTR)NULL);
}
Ejemplo n.º 3
0
static void
clock_sleep_ns_abs (guint64 ns_abs)
{
	kern_return_t ret;
	mach_timespec_t then, remain_unused;

	then.tv_sec = ns_abs / 1000000000;
	then.tv_nsec = ns_abs % 1000000000;

	do {
		ret = clock_sleep (sampling_clock_service, TIME_ABSOLUTE, then, &remain_unused);

		if (ret != KERN_SUCCESS && ret != KERN_ABORTED)
			g_error ("%s: clock_sleep () returned %d", __func__, ret);
	} while (ret == KERN_ABORTED && mono_atomic_load_i32 (&sampling_thread_running));
}
Ejemplo n.º 4
0
void
mono_runtime_shutdown_stat_profiler (void)
{
	mono_atomic_store_i32 (&sampling_thread_running, 0);

	mono_profiler_sampling_thread_post ();

#ifndef HOST_DARWIN
	/*
	 * There is a slight problem when we're using CLOCK_PROCESS_CPUTIME_ID: If
	 * we're shutting down and there's largely no activity in the process other
	 * than waiting for the sampler thread to shut down, it can take upwards of
	 * 20 seconds (depending on a lot of factors) for us to shut down because
	 * the sleep progresses very slowly as a result of the low CPU activity.
	 *
	 * We fix this by repeatedly sending the profiler signal to the sampler
	 * thread in order to interrupt the sleep. clock_sleep_ns_abs () will check
	 * sampling_thread_running upon an interrupt and return immediately if it's
	 * zero. profiler_signal_handler () has a special case to ignore the signal
	 * for the sampler thread.
	 */
	MonoThreadInfo *info;

	// Did it shut down already?
	if ((info = mono_thread_info_lookup (sampling_thread))) {
		while (!mono_atomic_load_i32 (&sampling_thread_exiting)) {
			mono_threads_pthread_kill (info, profiler_signal);
			mono_thread_info_usleep (10 * 1000 /* 10ms */);
		}

		// Make sure info can be freed.
		mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1);
	}
#endif

	mono_os_event_wait_one (&sampling_thread_exited, MONO_INFINITE_WAIT, FALSE);
	mono_os_event_destroy (&sampling_thread_exited);

	/*
	 * We can't safely remove the signal handler because we have no guarantee
	 * that all pending signals have been delivered at this point. This should
	 * not really be a problem anyway.
	 */
	//remove_signal_handler (profiler_signal);
}
Ejemplo n.º 5
0
static gsize
sampling_thread_func (gpointer unused)
{
	MonoInternalThread *thread = mono_thread_internal_current ();

	thread->flags |= MONO_THREAD_FLAG_DONT_MANAGE;

	ERROR_DECL (error);

	MonoString *name = mono_string_new_checked (mono_get_root_domain (), "Profiler Sampler", error);
	mono_error_assert_ok (error);
	mono_thread_set_name_internal (thread, name, FALSE, FALSE, error);
	mono_error_assert_ok (error);

	mono_thread_info_set_flags (MONO_THREAD_INFO_FLAGS_NO_GC | MONO_THREAD_INFO_FLAGS_NO_SAMPLE);

	int old_policy;
	struct sched_param old_sched;
	pthread_getschedparam (pthread_self (), &old_policy, &old_sched);

	/*
	 * Attempt to switch the thread to real time scheduling. This will not
	 * necessarily work on all OSs; for example, most Linux systems will give
	 * us EPERM here unless configured to allow this.
	 *
	 * TODO: This does not work on Mac (and maybe some other OSs). On Mac, we
	 * have to use the Mach thread policy routines to switch to real-time
	 * scheduling. This is quite tricky as we need to specify how often we'll
	 * be doing work (easy), the normal processing time needed (also easy),
	 * and the maximum amount of processing time needed (hard). This is
	 * further complicated by the fact that if we misbehave and take too long
	 * to do our work, the kernel may knock us back down to the normal thread
	 * scheduling policy without telling us.
	 */
	struct sched_param sched = { .sched_priority = sched_get_priority_max (SCHED_FIFO) };
	pthread_setschedparam (pthread_self (), SCHED_FIFO, &sched);

	MonoProfilerSampleMode mode;

init:
	mono_profiler_get_sample_mode (NULL, &mode, NULL);

	if (mode == MONO_PROFILER_SAMPLE_MODE_NONE) {
		mono_profiler_sampling_thread_wait ();

		if (!mono_atomic_load_i32 (&sampling_thread_running))
			goto done;

		goto init;
	}

	clock_init (mode);

	for (guint64 sleep = clock_get_time_ns (); mono_atomic_load_i32 (&sampling_thread_running); clock_sleep_ns_abs (sleep)) {
		uint32_t freq;
		MonoProfilerSampleMode new_mode;

		mono_profiler_get_sample_mode (NULL, &new_mode, &freq);

		if (new_mode != mode) {
			clock_cleanup ();
			goto init;
		}

		sleep += 1000000000 / freq;

		FOREACH_THREAD_SAFE_EXCLUDE (info, MONO_THREAD_INFO_FLAGS_NO_SAMPLE) {
			g_assert (mono_thread_info_get_tid (info) != sampling_thread);

			/*
			 * Require an ack for the last sampling signal sent to the thread
			 * so that we don't overflow the signal queue, leading to all sorts
			 * of problems (e.g. GC STW failing).
			 */
			if (profiler_signal != SIGPROF && !mono_atomic_cas_i32 (&info->profiler_signal_ack, 0, 1))
				continue;

			mono_threads_pthread_kill (info, profiler_signal);
			mono_atomic_inc_i32 (&profiler_signals_sent);
		} FOREACH_THREAD_SAFE_END
	}