static void on_gc_notification (GCEventType event) { MonoGCEvent e = (MonoGCEvent)event; switch (e) { case MONO_GC_EVENT_PRE_STOP_WORLD: MONO_GC_WORLD_STOP_BEGIN (); mono_thread_info_suspend_lock (); break; case MONO_GC_EVENT_POST_STOP_WORLD: MONO_GC_WORLD_STOP_END (); break; case MONO_GC_EVENT_PRE_START_WORLD: MONO_GC_WORLD_RESTART_BEGIN (1); break; case MONO_GC_EVENT_POST_START_WORLD: MONO_GC_WORLD_RESTART_END (1); mono_thread_info_suspend_unlock (); break; case MONO_GC_EVENT_START: MONO_GC_BEGIN (1); #ifndef DISABLE_PERFCOUNTERS if (mono_perfcounters) mono_perfcounters->gc_collections0++; #endif gc_stats.major_gc_count ++; gc_start_time = mono_100ns_ticks (); break; case MONO_GC_EVENT_END: MONO_GC_END (1); #if defined(ENABLE_DTRACE) && defined(__sun__) /* This works around a dtrace -G problem on Solaris. Limit its actual use to when the probe is enabled. */ if (MONO_GC_END_ENABLED ()) sleep(0); #endif #ifndef DISABLE_PERFCOUNTERS if (mono_perfcounters) { guint64 heap_size = GC_get_heap_size (); guint64 used_size = heap_size - GC_get_free_bytes (); mono_perfcounters->gc_total_bytes = used_size; mono_perfcounters->gc_committed_bytes = heap_size; mono_perfcounters->gc_reserved_bytes = heap_size; mono_perfcounters->gc_gen0size = heap_size; } #endif gc_stats.major_gc_time_usecs += (mono_100ns_ticks () - gc_start_time) / 10; mono_trace_message (MONO_TRACE_GC, "gc took %d usecs", (mono_100ns_ticks () - gc_start_time) / 10); break; } mono_profiler_gc_event (e, 0); }
static void on_gc_notification (GCEventType event) { MonoGCEvent e = (MonoGCEvent)event; if (e == MONO_GC_EVENT_PRE_STOP_WORLD) mono_thread_info_suspend_lock (); else if (e == MONO_GC_EVENT_POST_START_WORLD) mono_thread_info_suspend_unlock (); if (e == MONO_GC_EVENT_START) { if (mono_perfcounters) mono_perfcounters->gc_collections0++; mono_stats.major_gc_count ++; gc_start_time = mono_100ns_ticks (); } else if (e == MONO_GC_EVENT_END) { if (mono_perfcounters) { guint64 heap_size = GC_get_heap_size (); guint64 used_size = heap_size - GC_get_free_bytes (); mono_perfcounters->gc_total_bytes = used_size; mono_perfcounters->gc_committed_bytes = heap_size; mono_perfcounters->gc_reserved_bytes = heap_size; mono_perfcounters->gc_gen0size = heap_size; } mono_stats.major_gc_time_usecs += (mono_100ns_ticks () - gc_start_time) / 10; mono_trace_message (MONO_TRACE_GC, "gc took %d usecs", (mono_100ns_ticks () - gc_start_time) / 10); } mono_profiler_gc_event (e, 0); }
static gint64 get_boot_time (void) { #if defined (HAVE_SYS_PARAM_H) && defined (KERN_BOOTTIME) int mib [2]; size_t size; time_t now; struct timeval boottime; (void)time(&now); mib [0] = CTL_KERN; mib [1] = KERN_BOOTTIME; size = sizeof(boottime); if (sysctl(mib, 2, &boottime, &size, NULL, 0) != -1) return (gint64)((now - boottime.tv_sec) * MTICKS_PER_SEC); #else FILE *uptime = fopen ("/proc/uptime", "r"); if (uptime) { double upt; if (fscanf (uptime, "%lf", &upt) == 1) { gint64 now = mono_100ns_ticks (); fclose (uptime); return now - (gint64)(upt * MTICKS_PER_SEC); } fclose (uptime); } #endif /* a made up uptime of 300 seconds */ return (gint64)300 * MTICKS_PER_SEC; }
static inline guint32 sleep_interruptable (guint32 ms, gboolean *alerted) { gint64 now, end; g_assert (INFINITE == G_MAXUINT32); g_assert (alerted); *alerted = FALSE; if (ms != INFINITE) end = mono_100ns_ticks () + (ms * 1000 * 10); mono_lazy_initialize (&sleep_init, sleep_initialize); mono_coop_mutex_lock (&sleep_mutex); for (;;) { if (ms != INFINITE) { now = mono_100ns_ticks (); if (now > end) break; } mono_thread_info_install_interrupt (sleep_interrupt, NULL, alerted); if (*alerted) { mono_coop_mutex_unlock (&sleep_mutex); return WAIT_IO_COMPLETION; } if (ms != INFINITE) mono_coop_cond_timedwait (&sleep_cond, &sleep_mutex, (end - now) / 10 / 1000); else mono_coop_cond_wait (&sleep_cond, &sleep_mutex); mono_thread_info_uninstall_interrupt (alerted); if (*alerted) { mono_coop_mutex_unlock (&sleep_mutex); return WAIT_IO_COMPLETION; } } mono_coop_mutex_unlock (&sleep_mutex); return 0; }
gint32 mono_cpu_usage (MonoCpuUsageState *prev) { gint32 cpu_usage = 0; gint64 cpu_total_time; gint64 cpu_busy_time; #ifndef HOST_WIN32 struct rusage resource_usage; gint64 current_time; gint64 kernel_time; gint64 user_time; if (getrusage (RUSAGE_SELF, &resource_usage) == -1) { g_error ("getrusage() failed, errno is %d (%s)\n", errno, strerror (errno)); return -1; } current_time = mono_100ns_ticks (); kernel_time = resource_usage.ru_stime.tv_sec * 1000 * 1000 * 10 + resource_usage.ru_stime.tv_usec * 10; user_time = resource_usage.ru_utime.tv_sec * 1000 * 1000 * 10 + resource_usage.ru_utime.tv_usec * 10; cpu_busy_time = (user_time - (prev ? prev->user_time : 0)) + (kernel_time - (prev ? prev->kernel_time : 0)); cpu_total_time = (current_time - (prev ? prev->current_time : 0)) * mono_cpu_count (); if (prev) { prev->kernel_time = kernel_time; prev->user_time = user_time; prev->current_time = current_time; } #else guint64 idle_time; guint64 kernel_time; guint64 user_time; if (!GetSystemTimes ((FILETIME*) &idle_time, (FILETIME*) &kernel_time, (FILETIME*) &user_time)) { g_error ("GetSystemTimes() failed, error code is %d\n", GetLastError ()); return -1; } cpu_total_time = (gint64)((user_time - (prev ? prev->user_time : 0)) + (kernel_time - (prev ? prev->kernel_time : 0))); cpu_busy_time = (gint64)(cpu_total_time - (idle_time - (prev ? prev->idle_time : 0))); if (prev) { prev->idle_time = idle_time; prev->kernel_time = kernel_time; prev->user_time = user_time; } #endif if (cpu_total_time > 0 && cpu_busy_time > 0) cpu_usage = (gint32)(cpu_busy_time * 100 / cpu_total_time); g_assert (cpu_usage >= 0); g_assert (cpu_usage <= 100); return cpu_usage; }
static void indent (int diff) { if (diff < 0) indent_level += diff; if (start_time == 0) start_time = mono_100ns_ticks (); printf ("[%p: %.5f %d] ", (void*)GetCurrentThreadId (), seconds_since_start (), indent_level); if (diff > 0) indent_level += diff; }
static void indent (int diff) { if (diff < 0) indent_level += diff; if (start_time == 0) start_time = mono_100ns_ticks (); printf ("[%p: %.5f %d] ", (void*)mono_native_thread_id_get (), seconds_since_start (), indent_level); if (diff > 0) indent_level += diff; }
static void on_gc_notification (GCEventType event) { if (event == MONO_GC_EVENT_START) { mono_perfcounters->gc_collections0++; mono_stats.major_gc_count ++; gc_start_time = mono_100ns_ticks (); } else if (event == MONO_GC_EVENT_END) { guint64 heap_size = GC_get_heap_size (); guint64 used_size = heap_size - GC_get_free_bytes (); mono_perfcounters->gc_total_bytes = used_size; mono_perfcounters->gc_committed_bytes = heap_size; mono_perfcounters->gc_reserved_bytes = heap_size; mono_perfcounters->gc_gen0size = heap_size; mono_stats.major_gc_time_usecs += (mono_100ns_ticks () - gc_start_time) / 10; mono_trace_message (MONO_TRACE_GC, "gc took %d usecs", (mono_100ns_ticks () - gc_start_time) / 10); } mono_profiler_gc_event ((MonoGCEvent) event, 0); }
/* Returns the number of milliseconds from boot time: this should be monotonic */ guint32 mono_msec_ticks (void) { static gint64 boot_time = 0; gint64 now; if (!boot_time) boot_time = get_boot_time (); now = mono_100ns_ticks (); /*printf ("now: %llu (boot: %llu) ticks: %llu\n", (gint64)now, (gint64)boot_time, (gint64)(now - boot_time));*/ return (now - boot_time)/10000; }
static void threadpool_append_jobs (ThreadPool *tp, MonoObject **jobs, gint njobs) { static int job_counter; MonoObject *ar; gint i; if (mono_runtime_is_shutting_down ()) return; if (tp->pool_status == 0 && InterlockedCompareExchange (&tp->pool_status, 1, 0) == 0) { if (!tp->is_io) { mono_thread_create_internal (mono_get_root_domain (), monitor_thread, NULL, TRUE, SMALL_STACK); threadpool_start_thread (tp); } /* Create on demand up to min_threads to avoid startup penalty for apps that don't use * the threadpool that much */ if (mono_config_is_server_mode ()) { mono_thread_create_internal (mono_get_root_domain (), threadpool_start_idle_threads, tp, TRUE, SMALL_STACK); } } for (i = 0; i < njobs; i++) { ar = jobs [i]; if (ar == NULL || mono_domain_is_unloading (ar->vtable->domain)) continue; /* Might happen when cleaning domain jobs */ if (!tp->is_io && (InterlockedIncrement (&job_counter) % 10) == 0) { MonoAsyncResult *o = (MonoAsyncResult *) ar; o->add_time = mono_100ns_ticks (); } threadpool_jobs_inc (ar); #ifndef DISABLE_PERFCOUNTERS mono_perfcounter_update_value (tp->pc_nitems, TRUE, 1); #endif if (!tp->is_io && mono_wsq_local_push (ar)) continue; mono_cq_enqueue (tp->queue, ar); } for (i = 0; tp->waiting > 0 && i < MIN(njobs, tp->max_threads); i++) pulse_on_new_job (tp); }
static double seconds_since_start (void) { guint64 diff = mono_100ns_ticks () - start_time; return diff/10000000.0; }
static void process_idle_times (ThreadPool *tp, gint64 t) { gint64 ticks; gint64 avg; gboolean compute_avg; gint new_threads; gint64 per1; if (tp->ignore_times || t <= 0) return; compute_avg = FALSE; ticks = mono_100ns_ticks (); t = ticks - t; SPIN_LOCK (tp->sp_lock); if (tp->ignore_times) { SPIN_UNLOCK (tp->sp_lock); return; } tp->time_sum += t; tp->n_sum++; if (tp->last_check == 0) tp->last_check = ticks; else if (tp->last_check > 0 && (ticks - tp->last_check) > 5000000) { tp->ignore_times = 1; compute_avg = TRUE; } SPIN_UNLOCK (tp->sp_lock); if (!compute_avg) return; //printf ("Items: %d Time elapsed: %.3fs\n", tp->n_sum, (ticks - tp->last_check) / 10000.0); tp->last_check = ticks; new_threads = 0; avg = tp->time_sum / tp->n_sum; if (tp->averages [1] == 0) { tp->averages [1] = avg; } else { per1 = ((100 * (ABS (avg - tp->averages [1]))) / tp->averages [1]); if (per1 > 5) { if (avg > tp->averages [1]) { if (tp->averages [1] < tp->averages [0]) { new_threads = -1; } else { new_threads = 1; } } else if (avg < tp->averages [1] && tp->averages [1] < tp->averages [0]) { new_threads = 1; } } else { int min, n; min = tp->min_threads; n = tp->nthreads; if ((n - min) < min && tp->busy_threads == n) new_threads = 1; } /* if (new_threads != 0) { printf ("n: %d per1: %lld avg=%lld avg1=%lld avg0=%lld\n", new_threads, per1, avg, tp->averages [1], tp->averages [0]); } */ } tp->time_sum = 0; tp->n_sum = 0; tp->averages [0] = tp->averages [1]; tp->averages [1] = avg; tp->ignore_times = 0; if (new_threads == -1) { if (tp->destroy_thread == 0 && InterlockedCompareExchange (&tp->destroy_thread, 1, 0) == 0) pulse_on_new_job (tp); } }
/** * WaitForSingleObjectEx: * @handle: an object to wait for * @timeout: the maximum time in milliseconds to wait for * @alertable: if TRUE, the wait can be interrupted by an APC call * * This function returns when either @handle is signalled, or @timeout * ms elapses. If @timeout is zero, the object's state is tested and * the function returns immediately. If @timeout is %INFINITE, the * function waits forever. * * Return value: %WAIT_ABANDONED - @handle is a mutex that was not * released by the owning thread when it exited. Ownership of the * mutex object is granted to the calling thread and the mutex is set * to nonsignalled. %WAIT_OBJECT_0 - The state of @handle is * signalled. %WAIT_TIMEOUT - The @timeout interval elapsed and * @handle's state is still not signalled. %WAIT_FAILED - an error * occurred. %WAIT_IO_COMPLETION - the wait was ended by an APC. */ guint32 WaitForSingleObjectEx(gpointer handle, guint32 timeout, gboolean alertable) { guint32 ret, waited; int thr_ret; gboolean apc_pending = FALSE; gpointer current_thread = wapi_get_current_thread_handle (); gint64 now, end; if (current_thread == NULL) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } if (handle == _WAPI_THREAD_CURRENT) { handle = wapi_get_current_thread_handle (); if (handle == NULL) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } } if ((GPOINTER_TO_UINT (handle) & _WAPI_PROCESS_UNHANDLED) == _WAPI_PROCESS_UNHANDLED) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } if (_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_WAIT) == FALSE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p can't be waited for", __func__, handle); return(WAIT_FAILED); } _wapi_handle_ops_prewait (handle); if (_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_SPECIAL_WAIT) == TRUE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p has special wait", __func__, handle); ret = _wapi_handle_ops_special_wait (handle, timeout, alertable); if (alertable && _wapi_thread_cur_apc_pending ()) ret = WAIT_IO_COMPLETION; return ret; } MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: locking handle %p", __func__, handle); thr_ret = _wapi_handle_lock_handle (handle); g_assert (thr_ret == 0); if (_wapi_handle_test_capabilities (handle, WAPI_HANDLE_CAP_OWN) == TRUE) { if (own_if_owned (handle) == TRUE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p already owned", __func__, handle); ret = WAIT_OBJECT_0; goto done; } } if (own_if_signalled (handle) == TRUE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p already signalled", __func__, handle); ret=WAIT_OBJECT_0; goto done; } if (timeout == 0) { ret = WAIT_TIMEOUT; goto done; } if (timeout != INFINITE) end = mono_100ns_ticks () + timeout * 1000 * 10; do { /* Check before waiting on the condition, just in case */ _wapi_handle_ops_prewait (handle); if (own_if_signalled (handle)) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p signalled", __func__, handle); ret = WAIT_OBJECT_0; goto done; } if (timeout == INFINITE) { waited = _wapi_handle_timedwait_signal_handle (handle, INFINITE, alertable, FALSE, &apc_pending); } else { now = mono_100ns_ticks (); if (end < now) { ret = WAIT_TIMEOUT; goto done; } waited = _wapi_handle_timedwait_signal_handle (handle, (end - now) / 10 / 1000, alertable, FALSE, &apc_pending); } if(waited==0 && !apc_pending) { /* Condition was signalled, so hopefully * handle is signalled now. (It might not be * if someone else got in before us.) */ if (own_if_signalled (handle)) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p signalled", __func__, handle); ret=WAIT_OBJECT_0; goto done; } /* Better luck next time */ } } while(waited == 0 && !apc_pending); /* Timeout or other error */ MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: wait on handle %p error: %s", __func__, handle, strerror (waited)); ret = apc_pending ? WAIT_IO_COMPLETION : WAIT_TIMEOUT; done: MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: unlocking handle %p", __func__, handle); thr_ret = _wapi_handle_unlock_handle (handle); g_assert (thr_ret == 0); return(ret); }
/** * WaitForMultipleObjectsEx: * @numobjects: The number of objects in @handles. The maximum allowed * is %MAXIMUM_WAIT_OBJECTS. * @handles: An array of object handles. Duplicates are not allowed. * @waitall: If %TRUE, this function waits until all of the handles * are signalled. If %FALSE, this function returns when any object is * signalled. * @timeout: The maximum time in milliseconds to wait for. * @alertable: if TRUE, the wait can be interrupted by an APC call * * This function returns when either one or more of @handles is * signalled, or @timeout ms elapses. If @timeout is zero, the state * of each item of @handles is tested and the function returns * immediately. If @timeout is %INFINITE, the function waits forever. * * Return value: %WAIT_OBJECT_0 to %WAIT_OBJECT_0 + @numobjects - 1 - * if @waitall is %TRUE, indicates that all objects are signalled. If * @waitall is %FALSE, the return value minus %WAIT_OBJECT_0 indicates * the first index into @handles of the objects that are signalled. * %WAIT_ABANDONED_0 to %WAIT_ABANDONED_0 + @numobjects - 1 - if * @waitall is %TRUE, indicates that all objects are signalled, and at * least one object is an abandoned mutex object (See * WaitForSingleObject() for a description of abandoned mutexes.) If * @waitall is %FALSE, the return value minus %WAIT_ABANDONED_0 * indicates the first index into @handles of an abandoned mutex. * %WAIT_TIMEOUT - The @timeout interval elapsed and no objects in * @handles are signalled. %WAIT_FAILED - an error occurred. * %WAIT_IO_COMPLETION - the wait was ended by an APC. */ guint32 WaitForMultipleObjectsEx(guint32 numobjects, gpointer *handles, gboolean waitall, guint32 timeout, gboolean alertable) { gboolean duplicate = FALSE, bogustype = FALSE, done; guint32 count, lowest; guint i; guint32 ret; int thr_ret; gpointer current_thread = wapi_get_current_thread_handle (); guint32 retval; gboolean poll; gpointer sorted_handles [MAXIMUM_WAIT_OBJECTS]; gboolean apc_pending = FALSE; gint64 now, end; if (current_thread == NULL) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } if (numobjects > MAXIMUM_WAIT_OBJECTS) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: Too many handles: %d", __func__, numobjects); return(WAIT_FAILED); } if (numobjects == 1) { return WaitForSingleObjectEx (handles [0], timeout, alertable); } /* Check for duplicates */ for (i = 0; i < numobjects; i++) { if (handles[i] == _WAPI_THREAD_CURRENT) { handles[i] = wapi_get_current_thread_handle (); if (handles[i] == NULL) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: Handle %d bogus", __func__, i); bogustype = TRUE; break; } } if ((GPOINTER_TO_UINT (handles[i]) & _WAPI_PROCESS_UNHANDLED) == _WAPI_PROCESS_UNHANDLED) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: Handle %d pseudo process", __func__, i); bogustype = TRUE; break; } if (_wapi_handle_test_capabilities (handles[i], WAPI_HANDLE_CAP_WAIT) == FALSE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: Handle %p can't be waited for", __func__, handles[i]); bogustype = TRUE; break; } sorted_handles [i] = handles [i]; _wapi_handle_ops_prewait (handles[i]); } qsort (sorted_handles, numobjects, sizeof (gpointer), g_direct_equal); for (i = 1; i < numobjects; i++) { if (sorted_handles [i - 1] == sorted_handles [i]) { duplicate = TRUE; break; } } if (duplicate == TRUE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: Returning due to duplicates", __func__); return(WAIT_FAILED); } if (bogustype == TRUE) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: Returning due to bogus type", __func__); return(WAIT_FAILED); } poll = FALSE; for (i = 0; i < numobjects; ++i) if (_wapi_handle_type (handles [i]) == WAPI_HANDLE_PROCESS || _WAPI_SHARED_HANDLE (_wapi_handle_type (handles[i]))) /* Can't wait for a process handle + another handle without polling */ poll = TRUE; done = test_and_own (numobjects, handles, waitall, &count, &lowest); if (done == TRUE) { return(WAIT_OBJECT_0+lowest); } if (timeout == 0) { return WAIT_TIMEOUT; } if (timeout != INFINITE) end = mono_100ns_ticks () + timeout * 1000 * 10; /* Have to wait for some or all handles to become signalled */ for (i = 0; i < numobjects; i++) { /* Add a reference, as we need to ensure the handle wont * disappear from under us while we're waiting in the loop * (not lock, as we don't want exclusive access here) */ _wapi_handle_ref (handles[i]); } while(1) { /* Prod all handles with prewait methods and * special-wait handles that aren't already signalled */ for (i = 0; i < numobjects; i++) { _wapi_handle_ops_prewait (handles[i]); if (_wapi_handle_test_capabilities (handles[i], WAPI_HANDLE_CAP_SPECIAL_WAIT) == TRUE && _wapi_handle_issignalled (handles[i]) == FALSE) { _wapi_handle_ops_special_wait (handles[i], 0, alertable); } } MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: locking signal mutex", __func__); thr_ret = _wapi_handle_lock_signal_mutex (); g_assert (thr_ret == 0); /* Check the signalled state of handles inside the critical section */ if (waitall) { done = TRUE; for (i = 0; i < numobjects; i++) if (!_wapi_handle_issignalled (handles [i])) done = FALSE; } else { done = FALSE; for (i = 0; i < numobjects; i++) if (_wapi_handle_issignalled (handles [i])) done = TRUE; } if (!done) { /* Enter the wait */ if (timeout == INFINITE) { ret = _wapi_handle_timedwait_signal (INFINITE, poll, &apc_pending); } else { now = mono_100ns_ticks (); if (end < now) { ret = WAIT_TIMEOUT; } else { ret = _wapi_handle_timedwait_signal ((end - now) / 10 / 1000, poll, &apc_pending); } } } else { /* No need to wait */ ret = 0; } MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: unlocking signal mutex", __func__); thr_ret = _wapi_handle_unlock_signal_mutex (NULL); g_assert (thr_ret == 0); if (alertable && apc_pending) { retval = WAIT_IO_COMPLETION; break; } /* Check if everything is signalled, as we can't * guarantee to notice a shared signal even if the * wait timed out */ done = test_and_own (numobjects, handles, waitall, &count, &lowest); if (done == TRUE) { retval = WAIT_OBJECT_0+lowest; break; } else if (ret != 0) { /* Didn't get all handles, and there was a * timeout or other error */ MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: wait returned error: %s", __func__, strerror (ret)); if(ret==ETIMEDOUT) { retval = WAIT_TIMEOUT; } else { retval = WAIT_FAILED; } break; } } for (i = 0; i < numobjects; i++) { /* Unref everything we reffed above */ _wapi_handle_unref (handles[i]); } return retval; }
/** * SignalObjectAndWait: * @signal_handle: An object to signal * @wait: An object to wait for * @timeout: The maximum time in milliseconds to wait for * @alertable: Specifies whether the function returnes when the system * queues an I/O completion routine or an APC for the calling thread. * * Atomically signals @signal and waits for @wait to become signalled, * or @timeout ms elapses. If @timeout is zero, the object's state is * tested and the function returns immediately. If @timeout is * %INFINITE, the function waits forever. * * @signal can be a semaphore, mutex or event object. * * If @alertable is %TRUE and the system queues an I/O completion * routine or an APC for the calling thread, the function returns and * the thread calls the completion routine or APC function. If * %FALSE, the function does not return, and the thread does not call * the completion routine or APC function. A completion routine is * queued when the ReadFileEx() or WriteFileEx() function in which it * was specified has completed. The calling thread is the thread that * initiated the read or write operation. An APC is queued when * QueueUserAPC() is called. Currently completion routines and APC * functions are not supported. * * Return value: %WAIT_ABANDONED - @wait is a mutex that was not * released by the owning thread when it exited. Ownershop of the * mutex object is granted to the calling thread and the mutex is set * to nonsignalled. %WAIT_IO_COMPLETION - the wait was ended by one * or more user-mode asynchronous procedure calls queued to the * thread. %WAIT_OBJECT_0 - The state of @wait is signalled. * %WAIT_TIMEOUT - The @timeout interval elapsed and @wait's state is * still not signalled. %WAIT_FAILED - an error occurred. */ guint32 SignalObjectAndWait(gpointer signal_handle, gpointer wait, guint32 timeout, gboolean alertable) { guint32 ret = 0, waited; int thr_ret; gboolean apc_pending = FALSE; gpointer current_thread = wapi_get_current_thread_handle (); gint64 wait_start, timeout_in_ticks; if (current_thread == NULL) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } if (signal_handle == _WAPI_THREAD_CURRENT) { signal_handle = wapi_get_current_thread_handle (); if (signal_handle == NULL) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } } if (wait == _WAPI_THREAD_CURRENT) { wait = wapi_get_current_thread_handle (); if (wait == NULL) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } } if ((GPOINTER_TO_UINT (signal_handle) & _WAPI_PROCESS_UNHANDLED) == _WAPI_PROCESS_UNHANDLED) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } if ((GPOINTER_TO_UINT (wait) & _WAPI_PROCESS_UNHANDLED) == _WAPI_PROCESS_UNHANDLED) { SetLastError (ERROR_INVALID_HANDLE); return(WAIT_FAILED); } if (mono_w32handle_test_capabilities (signal_handle, MONO_W32HANDLE_CAP_SIGNAL)==FALSE) { return(WAIT_FAILED); } if (mono_w32handle_test_capabilities (wait, MONO_W32HANDLE_CAP_WAIT)==FALSE) { return(WAIT_FAILED); } mono_w32handle_ops_prewait (wait); if (mono_w32handle_test_capabilities (wait, MONO_W32HANDLE_CAP_SPECIAL_WAIT) == TRUE) { g_warning ("%s: handle %p has special wait, implement me!!", __func__, wait); return (WAIT_FAILED); } MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: locking handle %p", __func__, wait); thr_ret = mono_w32handle_lock_handle (wait); g_assert (thr_ret == 0); mono_w32handle_ops_signal (signal_handle); if (mono_w32handle_test_capabilities (wait, MONO_W32HANDLE_CAP_OWN)==TRUE) { if (own_if_owned (wait)) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p already owned", __func__, wait); ret = WAIT_OBJECT_0; goto done; } } if (own_if_signalled (wait)) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p already signalled", __func__, wait); ret = WAIT_OBJECT_0; goto done; } if (timeout != INFINITE) { wait_start = mono_100ns_ticks (); timeout_in_ticks = (gint64)timeout * 10 * 1000; //can't overflow as timeout is 32bits } do { /* Check before waiting on the condition, just in case */ mono_w32handle_ops_prewait (wait); if (own_if_signalled (wait)) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p signalled", __func__, wait); ret = WAIT_OBJECT_0; goto done; } if (timeout == INFINITE) { waited = mono_w32handle_timedwait_signal_handle (wait, INFINITE, FALSE, alertable ? &apc_pending : NULL); } else { gint64 elapsed = mono_100ns_ticks () - wait_start; if (elapsed >= timeout_in_ticks) { ret = WAIT_TIMEOUT; goto done; } waited = mono_w32handle_timedwait_signal_handle (wait, (timeout_in_ticks - elapsed) / 10 / 1000, FALSE, alertable ? &apc_pending : NULL); } if (waited==0 && !apc_pending) { /* Condition was signalled, so hopefully * handle is signalled now. (It might not be * if someone else got in before us.) */ if (own_if_signalled (wait)) { MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: handle %p signalled", __func__, wait); ret = WAIT_OBJECT_0; goto done; } /* Better luck next time */ } } while(waited == 0 && !apc_pending); /* Timeout or other error */ MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: wait on handle %p error: %s", __func__, wait, strerror (ret)); ret = apc_pending ? WAIT_IO_COMPLETION : WAIT_TIMEOUT; done: MONO_TRACE (G_LOG_LEVEL_DEBUG, MONO_TRACE_IO_LAYER, "%s: unlocking handle %p", __func__, wait); thr_ret = mono_w32handle_unlock_handle (wait); g_assert (thr_ret == 0); return(ret); }