static MonoThreadInfo* suspend_sync_nolock (MonoNativeThreadId id, gboolean interrupt_kernel) { MonoThreadInfo *info = NULL; int sleep_duration = 0; for (;;) { if (!(info = suspend_sync (id, interrupt_kernel))) { mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); return NULL; } /*WARNING: We now are in interrupt context until we resume the thread. */ if (!is_thread_in_critical_region (info)) break; if (!mono_thread_info_core_resume (info)) { mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); return NULL; } THREADS_SUSPEND_DEBUG ("RESTARTED thread tid %p\n", (void*)id); /* Wait for the pending resume to finish */ mono_threads_wait_pending_operations (); if (sleep_duration == 0) mono_thread_info_yield (); else g_usleep (sleep_duration); sleep_duration += 10; } return info; }
gboolean mono_thread_info_resume (MonoNativeThreadId tid) { gboolean result; /* don't initialize it so the compiler can catch unitilized paths. */ MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info; THREADS_SUSPEND_DEBUG ("RESUMING tid %p\n", (void*)tid); mono_thread_info_suspend_lock (); info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) { result = FALSE; goto cleanup; } result = mono_thread_info_core_resume (info); //Wait for the pending resume to finish mono_threads_wait_pending_operations (); cleanup: mono_thread_info_suspend_unlock (); mono_hazard_pointer_clear (hp, 1); return result; }
gboolean mono_thread_info_resume (MonoNativeThreadId tid) { gboolean result = TRUE; MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) return FALSE; EnterCriticalSection (&info->suspend_lock); THREADS_DEBUG ("resume %x IN COUNT %d\n",tid, info->suspend_count); if (info->suspend_count <= 0) { LeaveCriticalSection (&info->suspend_lock); mono_hazard_pointer_clear (hp, 1); return FALSE; } /* * The theory here is that if we manage to suspend the thread it means it did not * start cleanup since it take the same lock. */ g_assert (mono_thread_info_get_tid (info)); if (--info->suspend_count == 0) result = mono_threads_core_resume (info); LeaveCriticalSection (&info->suspend_lock); mono_hazard_pointer_clear (hp, 1); return result; }
void mono_thread_info_safe_suspend_and_run (MonoNativeThreadId id, gboolean interrupt_kernel, MonoSuspendThreadCallback callback, gpointer user_data) { int result; MonoThreadInfo *info = NULL; MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); THREADS_SUSPEND_DEBUG ("SUSPENDING tid %p\n", (void*)id); /*FIXME: unify this with self-suspend*/ g_assert (id != mono_native_thread_id_get ()); mono_thread_info_suspend_lock (); mono_threads_begin_global_suspend (); info = suspend_sync_nolock (id, interrupt_kernel); if (!info) goto done; switch (result = callback (info, user_data)) { case MonoResumeThread: mono_hazard_pointer_set (hp, 1, info); mono_thread_info_core_resume (info); mono_threads_wait_pending_operations (); break; case KeepSuspended: break; default: g_error ("Invalid suspend_and_run callback return value %d", result); } done: mono_hazard_pointer_clear (hp, 1); mono_threads_end_global_suspend (); mono_thread_info_suspend_unlock (); }
MonoThreadInfo* mono_thread_info_current (void) { MonoThreadInfo *info = (MonoThreadInfo*)mono_native_tls_get_value (thread_info_key); if (info) return info; info = mono_thread_info_lookup (mono_native_thread_id_get ()); /*info on HP1*/ /* We might be called during thread cleanup, but we cannot be called after cleanup as happened. The way to distinguish between before, during and after cleanup is the following: -If the TLS key is set, cleanup has not begun; -If the TLS key is clean, but the thread remains registered, cleanup is in progress; -If the thread is nowhere to be found, cleanup has finished. We cannot function after cleanup since there's no way to ensure what will happen. */ g_assert (info); /*We're looking up the current thread which will not be freed until we finish running, so no need to keep it on a HP */ mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); return info; }
/* The return value is only valid until a matching mono_thread_info_resume is called */ static MonoThreadInfo* suspend_sync (MonoNativeThreadId tid, gboolean interrupt_kernel) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) return NULL; switch (mono_threads_transition_request_async_suspension (info)) { case AsyncSuspendAlreadySuspended: mono_hazard_pointer_clear (hp, 1); //XXX this is questionable we got to clean the suspend/resume nonsense of critical sections return info; case AsyncSuspendWait: mono_threads_add_to_pending_operation_set (info); break; case AsyncSuspendInitSuspend: if (!begin_async_suspend (info, interrupt_kernel)) { mono_hazard_pointer_clear (hp, 1); return NULL; } } //Wait for the pending suspend to finish mono_threads_wait_pending_operations (); if (!check_async_suspend (info)) { mono_hazard_pointer_clear (hp, 1); return NULL; } return info; }
/* * This is a very specific function whose only purpose is to * break a given thread from socket syscalls. * * This only exists because linux won't fail a call to connect * if the underlying is closed. * * TODO We should cleanup and unify this with the other syscall abort * facility. */ void mono_thread_info_abort_socket_syscall_for_close (MonoNativeThreadId tid) { MonoThreadHazardPointers *hp; MonoThreadInfo *info; if (tid == mono_native_thread_id_get () || !mono_threads_core_needs_abort_syscall ()) return; hp = mono_hazard_pointer_get (); info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) return; if (mono_thread_info_run_state (info) > STATE_RUNNING) { mono_hazard_pointer_clear (hp, 1); return; } mono_thread_info_suspend_lock (); mono_threads_core_abort_syscall (info); mono_hazard_pointer_clear (hp, 1); mono_thread_info_suspend_unlock (); }
gboolean mono_thread_info_resume (MonoNativeThreadId tid) { gboolean result = TRUE; MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) return FALSE; MONO_SEM_WAIT_UNITERRUPTIBLE (&info->suspend_semaphore); THREADS_DEBUG ("resume %x IN COUNT %d\n",tid, info->suspend_count); if (info->suspend_count <= 0) { MONO_SEM_POST (&info->suspend_semaphore); mono_hazard_pointer_clear (hp, 1); return FALSE; } /* * The theory here is that if we manage to suspend the thread it means it did not * start cleanup since it take the same lock. */ g_assert (mono_thread_info_get_tid (info)); if (--info->suspend_count == 0) result = mono_thread_info_resume_internal (info); MONO_SEM_POST (&info->suspend_semaphore); mono_hazard_pointer_clear (hp, 1); mono_atomic_store_release (&mono_thread_info_current ()->inside_critical_region, FALSE); return result; }
/* * This is a very specific function whose only purpose is to * break a given thread from socket syscalls. * * This only exists because linux won't fail a call to connect * if the underlying is closed. * * TODO We should cleanup and unify this with the other syscall abort * facility. */ void mono_thread_info_abort_socket_syscall_for_close (MonoNativeThreadId tid) { MonoThreadHazardPointers *hp; MonoThreadInfo *info; if (tid == mono_native_thread_id_get () || !mono_threads_core_needs_abort_syscall ()) return; hp = mono_hazard_pointer_get (); info = mono_thread_info_lookup (tid); if (!info) return; if (mono_thread_info_run_state (info) == STATE_DETACHED) { mono_hazard_pointer_clear (hp, 1); return; } mono_thread_info_suspend_lock (); mono_threads_begin_global_suspend (); mono_threads_core_abort_syscall (info); mono_threads_wait_pending_operations (); mono_hazard_pointer_clear (hp, 1); mono_threads_end_global_suspend (); mono_thread_info_suspend_unlock (); }
void mono_hazard_pointer_restore_for_signal_handler (int small_id) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadHazardPointers *hp_overflow; int i; if (small_id < 0) return; g_assert (small_id < HAZARD_TABLE_OVERFLOW); g_assert (overflow_busy [small_id]); for (i = 0; i < HAZARD_POINTER_COUNT; ++i) g_assert (!hp->hazard_pointers [i]); hp_overflow = &hazard_table [small_id]; *hp = *hp_overflow; mono_memory_write_barrier (); memset (hp_overflow, 0, sizeof (MonoThreadHazardPointers)); mono_memory_write_barrier (); overflow_busy [small_id] = 0; }
gpointer mono_conc_hashtable_lookup (MonoConcurrentHashTable *hash_table, gpointer key) { MonoThreadHazardPointers* hp; conc_table *table; int hash, i, table_mask; key_value_pair *kvs; hash = mix_hash (hash_table->hash_func (key)); hp = mono_hazard_pointer_get (); retry: table = (conc_table *)get_hazardous_pointer ((gpointer volatile*)&hash_table->table, hp, 0); table_mask = table->table_size - 1; kvs = table->kvs; i = hash & table_mask; if (G_LIKELY (!hash_table->equal_func)) { while (kvs [i].key) { if (key == kvs [i].key) { gpointer value; /* The read of keys must happen before the read of values */ mono_memory_barrier (); value = kvs [i].value; /* FIXME check for NULL if we add suppport for removal */ mono_hazard_pointer_clear (hp, 0); return value; } i = (i + 1) & table_mask; } } else { GEqualFunc equal = hash_table->equal_func; while (kvs [i].key) { if (kvs [i].key != TOMBSTONE && equal (key, kvs [i].key)) { gpointer value; /* The read of keys must happen before the read of values */ mono_memory_barrier (); value = kvs [i].value; /* We just read a value been deleted, try again. */ if (G_UNLIKELY (!value)) goto retry; mono_hazard_pointer_clear (hp, 0); return value; } i = (i + 1) & table_mask; } } /* The table might have expanded and the value is now on the newer table */ mono_memory_barrier (); if (hash_table->table != table) goto retry; mono_hazard_pointer_clear (hp, 0); return NULL; }
static void* worker (void *arg) { thread_data_t *thread_data = (thread_data_t *)arg; MonoThreadHazardPointers *hp; int skip = thread_data->skip; int i, j; gboolean result; mono_thread_info_register_small_id (); hp = mono_hazard_pointer_get (); i = 0; for (j = 0; j < NUM_ITERS; ++j) { switch (nodes [i].state) { case STATE_BUSY: mono_thread_hazardous_try_free_some (); break; case STATE_OUT: if (InterlockedCompareExchange (&nodes [i].state, STATE_BUSY, STATE_OUT) == STATE_OUT) { result = mono_lls_find (&lls, hp, i, HAZARD_FREE_SAFE_CTX); assert (!result); mono_hazard_pointer_clear_all (hp, -1); result = mono_lls_insert (&lls, hp, &nodes [i].node, HAZARD_FREE_SAFE_CTX); mono_hazard_pointer_clear_all (hp, -1); assert (nodes [i].state == STATE_BUSY); nodes [i].state = STATE_IN; ++thread_data->num_adds; } break; case STATE_IN: if (InterlockedCompareExchange (&nodes [i].state, STATE_BUSY, STATE_IN) == STATE_IN) { result = mono_lls_find (&lls, hp, i, HAZARD_FREE_SAFE_CTX); assert (result); assert (mono_hazard_pointer_get_val (hp, 1) == &nodes [i].node); mono_hazard_pointer_clear_all (hp, -1); result = mono_lls_remove (&lls, hp, &nodes [i].node, HAZARD_FREE_SAFE_CTX); mono_hazard_pointer_clear_all (hp, -1); ++thread_data->num_removes; } break; default: assert (FALSE); } i += skip; if (i >= N) i -= N; } return NULL; }
void mono_lock_free_queue_enqueue (MonoLockFreeQueue *q, MonoLockFreeQueueNode *node) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoLockFreeQueueNode *tail; #ifdef QUEUE_DEBUG g_assert (!node->in_queue); node->in_queue = TRUE; mono_memory_write_barrier (); #endif g_assert (node->next == FREE_NEXT); node->next = END_MARKER; for (;;) { MonoLockFreeQueueNode *next; tail = (MonoLockFreeQueueNode *) get_hazardous_pointer ((gpointer volatile*)&q->tail, hp, 0); mono_memory_read_barrier (); /* * We never dereference next so we don't need a * hazardous load. */ next = tail->next; mono_memory_read_barrier (); /* Are tail and next consistent? */ if (tail == q->tail) { g_assert (next != INVALID_NEXT && next != FREE_NEXT); g_assert (next != tail); if (next == END_MARKER) { /* * Here we require that nodes that * have been dequeued don't have * next==END_MARKER. If they did, we * might append to a node that isn't * in the queue anymore here. */ if (InterlockedCompareExchangePointer ((gpointer volatile*)&tail->next, node, END_MARKER) == END_MARKER) break; } else { /* Try to advance tail */ InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, next, tail); } } mono_memory_write_barrier (); mono_hazard_pointer_clear (hp, 0); } /* Try to advance tail */ InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, node, tail); mono_memory_write_barrier (); mono_hazard_pointer_clear (hp, 0); }
static gboolean mono_thread_info_remove (MonoThreadInfo *info) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); gboolean res; THREADS_DEBUG ("removing info %p\n", info); res = mono_lls_remove (&thread_list, hp, (MonoLinkedListSetNode*)info); mono_hazard_pointer_clear_all (hp, -1); return res; }
void mono_thread_info_finish_suspend_and_resume (MonoThreadInfo *info) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); /*Resume can access info after the target has resumed, so we must ensure it won't touch freed memory. */ mono_hazard_pointer_set (hp, 1, info); mono_thread_info_core_resume (info); mono_hazard_pointer_clear (hp, 1); mono_atomic_store_release (&mono_thread_info_current ()->inside_critical_region, FALSE); }
static gboolean mono_thread_info_insert (MonoThreadInfo *info) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); if (!mono_lls_insert (&thread_list, hp, (MonoLinkedListSetNode*)info)) { mono_hazard_pointer_clear_all (hp, -1); return FALSE; } mono_hazard_pointer_clear_all (hp, -1); return TRUE; }
static MonoThreadInfo* suspend_sync_nolock (MonoNativeThreadId id, gboolean interrupt_kernel) { MonoThreadInfo *info = NULL; int sleep_duration = 0; for (;;) { const char *suspend_error = "Unknown error"; if (!(info = mono_thread_info_suspend_sync (id, interrupt_kernel, &suspend_error))) { mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); return NULL; } /*WARNING: We now are in interrupt context until we resume the thread. */ if (!is_thread_in_critical_region (info)) break; if (!mono_thread_info_core_resume (info)) { mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); return NULL; } THREADS_SUSPEND_DEBUG ("RESTARTED thread tid %p\n", (void*)id); /* Wait for the pending resume to finish */ mono_threads_wait_pending_operations (); if (!sleep_duration) { #ifdef HOST_WIN32 SwitchToThread (); #else sched_yield (); #endif } else { g_usleep (sleep_duration); } sleep_duration += 10; } return info; }
/* If return non null Hazard Pointer 1 holds the return value. */ MonoThreadInfo* mono_thread_info_lookup (MonoNativeThreadId id) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); if (!mono_lls_find (&thread_list, hp, (uintptr_t)id)) { mono_hazard_pointer_clear_all (hp, -1); return NULL; } mono_hazard_pointer_clear_all (hp, 1); return mono_hazard_pointer_get_val (hp, 1); }
static Descriptor* desc_alloc (MonoMemAccountType type) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); Descriptor *desc; for (;;) { gboolean success; desc = (Descriptor *) mono_get_hazardous_pointer ((volatile gpointer *)&desc_avail, hp, 1); if (desc) { Descriptor *next = desc->next; success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, next, desc) == desc); } else { size_t desc_size = sizeof (Descriptor); Descriptor *d; int i; desc = (Descriptor *) mono_valloc (NULL, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE), type); g_assertf (desc, "Failed to allocate memory for the lock free allocator"); /* Organize into linked list. */ d = desc; for (i = 0; i < NUM_DESC_BATCH; ++i) { Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size)); d->next = next; mono_lock_free_queue_node_init (&d->node, TRUE); d = next; } mono_memory_write_barrier (); success = (mono_atomic_cas_ptr ((volatile gpointer *)&desc_avail, desc->next, NULL) == NULL); if (!success) mono_vfree (desc, desc_size * NUM_DESC_BATCH, type); } mono_hazard_pointer_clear (hp, 1); if (success) break; } g_assert (!desc->in_use); desc->in_use = TRUE; return desc; }
/* WARNING: If we are trying to suspend a target that is on a critical region and running a syscall we risk looping forever if @interrupt_kernel is FALSE. So, be VERY carefull in calling this with @interrupt_kernel == FALSE. Info is not put on a hazard pointer as a suspended thread cannot exit and be freed. This function MUST be matched with mono_thread_info_finish_suspend or mono_thread_info_finish_suspend_and_resume */ MonoThreadInfo* mono_thread_info_safe_suspend_sync (MonoNativeThreadId id, gboolean interrupt_kernel) { MonoThreadInfo *info = NULL; int sleep_duration = 0; /*FIXME: unify this with self-suspend*/ g_assert (id != mono_native_thread_id_get ()); mono_thread_info_suspend_lock (); for (;;) { const char *suspend_error = "Unknown error"; if (!(info = mono_thread_info_suspend_sync (id, interrupt_kernel, &suspend_error))) { g_warning ("failed to suspend thread %p due to %s, hopefully it is dead", (gpointer)id, suspend_error); mono_thread_info_suspend_unlock (); return NULL; } /*WARNING: We now are in interrupt context until we resume the thread. */ if (!is_thread_in_critical_region (info)) break; if (!mono_thread_info_core_resume (info)) { g_warning ("failed to resume thread %p, hopefully it is dead", (gpointer)id); mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); mono_thread_info_suspend_unlock (); return NULL; } THREADS_DEBUG ("restarted thread %p\n", (gpointer)id); if (!sleep_duration) { #ifdef HOST_WIN32 SwitchToThread (); #else sched_yield (); #endif } else { g_usleep (sleep_duration); } sleep_duration += 10; } /* XXX this clears HP 1, so we restated it again */ mono_atomic_store_release (&mono_thread_info_current ()->inside_critical_region, TRUE); mono_thread_info_suspend_unlock (); return info; }
static Descriptor* desc_alloc (void) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); Descriptor *desc; for (;;) { gboolean success; desc = (Descriptor *) get_hazardous_pointer ((gpointer * volatile)&desc_avail, hp, 1); if (desc) { Descriptor *next = desc->next; success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, next, desc) == desc); } else { size_t desc_size = sizeof (Descriptor); Descriptor *d; int i; desc = (Descriptor *) mono_valloc (0, desc_size * NUM_DESC_BATCH, prot_flags_for_activate (TRUE)); /* Organize into linked list. */ d = desc; for (i = 0; i < NUM_DESC_BATCH; ++i) { Descriptor *next = (i == (NUM_DESC_BATCH - 1)) ? NULL : (Descriptor*)((char*)desc + ((i + 1) * desc_size)); d->next = next; mono_lock_free_queue_node_init (&d->node, TRUE); d = next; } mono_memory_write_barrier (); success = (InterlockedCompareExchangePointer ((gpointer * volatile)&desc_avail, desc->next, NULL) == NULL); if (!success) mono_vfree (desc, desc_size * NUM_DESC_BATCH); } mono_hazard_pointer_clear (hp, 1); if (success) break; } g_assert (!desc->in_use); desc->in_use = TRUE; return desc; }
gboolean mono_thread_info_resume (MonoNativeThreadId tid) { gboolean result; /* don't initialize it so the compiler can catch unitilized paths. */ MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) { result = FALSE; goto cleanup; } result = mono_thread_info_core_resume (info); cleanup: mono_hazard_pointer_clear (hp, 1); return result; }
void mono_runtime_shutdown_stat_profiler (void) { mono_atomic_store_i32 (&sampling_thread_running, 0); mono_profiler_sampling_thread_post (); #ifndef HOST_DARWIN /* * There is a slight problem when we're using CLOCK_PROCESS_CPUTIME_ID: If * we're shutting down and there's largely no activity in the process other * than waiting for the sampler thread to shut down, it can take upwards of * 20 seconds (depending on a lot of factors) for us to shut down because * the sleep progresses very slowly as a result of the low CPU activity. * * We fix this by repeatedly sending the profiler signal to the sampler * thread in order to interrupt the sleep. clock_sleep_ns_abs () will check * sampling_thread_running upon an interrupt and return immediately if it's * zero. profiler_signal_handler () has a special case to ignore the signal * for the sampler thread. */ MonoThreadInfo *info; // Did it shut down already? if ((info = mono_thread_info_lookup (sampling_thread))) { while (!mono_atomic_load_i32 (&sampling_thread_exiting)) { mono_threads_pthread_kill (info, profiler_signal); mono_thread_info_usleep (10 * 1000 /* 10ms */); } // Make sure info can be freed. mono_hazard_pointer_clear (mono_hazard_pointer_get (), 1); } #endif mono_os_event_wait_one (&sampling_thread_exited, MONO_INFINITE_WAIT, FALSE); mono_os_event_destroy (&sampling_thread_exited); /* * We can't safely remove the signal handler because we have no guarantee * that all pending signals have been delivered at this point. This should * not really be a problem anyway. */ //remove_signal_handler (profiler_signal); }
/* The return value is only valid until a matching mono_thread_info_resume is called */ static MonoThreadInfo* mono_thread_info_suspend_sync (MonoNativeThreadId tid, gboolean interrupt_kernel, const char **error_condition) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) { *error_condition = "Thread not found"; return NULL; } MONO_SEM_WAIT_UNITERRUPTIBLE (&info->suspend_semaphore); /*thread is on the process of detaching*/ if (mono_thread_info_run_state (info) > STATE_RUNNING) { mono_hazard_pointer_clear (hp, 1); *error_condition = "Thread is detaching"; return NULL; } THREADS_DEBUG ("suspend %x IN COUNT %d\n", tid, info->suspend_count); if (info->suspend_count) { ++info->suspend_count; mono_hazard_pointer_clear (hp, 1); MONO_SEM_POST (&info->suspend_semaphore); return info; } if (!mono_threads_core_suspend (info)) { MONO_SEM_POST (&info->suspend_semaphore); mono_hazard_pointer_clear (hp, 1); *error_condition = "Could not suspend thread"; return NULL; } if (interrupt_kernel) mono_threads_core_interrupt (info); ++info->suspend_count; info->thread_state |= STATE_SUSPENDED; MONO_SEM_POST (&info->suspend_semaphore); return info; }
/* * mono_jit_info_table_find_internal: * * If TRY_AOT is FALSE, avoid loading information for missing methods from AOT images, which is currently not async safe. * In this case, only those AOT methods will be found whose jit info is already loaded. * If ALLOW_TRAMPOLINES is TRUE, this can return a MonoJitInfo which represents a trampoline (ji->is_trampoline is true). * ASYNC SAFETY: When called in an async context (mono_thread_info_is_async_context ()), this is async safe. * In this case, the returned MonoJitInfo might not have metadata information, in particular, * mono_jit_info_get_method () could fail. */ MonoJitInfo* mono_jit_info_table_find_internal (MonoDomain *domain, char *addr, gboolean try_aot, gboolean allow_trampolines) { MonoJitInfoTable *table; MonoJitInfo *ji, *module_ji; MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); ++mono_stats.jit_info_table_lookup_count; /* First we have to get the domain's jit_info_table. This is complicated by the fact that a writer might substitute a new table and free the old one. What the writer guarantees us is that it looks at the hazard pointers after it has changed the jit_info_table pointer. So, if we guard the table by a hazard pointer and make sure that the pointer is still there after we've made it hazardous, we don't have to worry about the writer freeing the table. */ table = (MonoJitInfoTable *)get_hazardous_pointer ((gpointer volatile*)&domain->jit_info_table, hp, JIT_INFO_TABLE_HAZARD_INDEX); ji = jit_info_table_find (table, hp, (gint8*)addr); if (hp) mono_hazard_pointer_clear (hp, JIT_INFO_TABLE_HAZARD_INDEX); if (ji && ji->is_trampoline && !allow_trampolines) return NULL; if (ji) return ji; /* Maybe its an AOT module */ if (try_aot && mono_get_root_domain () && mono_get_root_domain ()->aot_modules) { table = (MonoJitInfoTable *)get_hazardous_pointer ((gpointer volatile*)&mono_get_root_domain ()->aot_modules, hp, JIT_INFO_TABLE_HAZARD_INDEX); module_ji = jit_info_table_find (table, hp, (gint8*)addr); if (module_ji) ji = jit_info_find_in_aot_func (domain, module_ji->d.image, addr); if (hp) mono_hazard_pointer_clear (hp, JIT_INFO_TABLE_HAZARD_INDEX); } if (ji && ji->is_trampoline && !allow_trampolines) return NULL; return ji; }
int mono_hazard_pointer_save_for_signal_handler (void) { int small_id, i; MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadHazardPointers *hp_overflow; for (i = 0; i < HAZARD_POINTER_COUNT; ++i) if (hp->hazard_pointers [i]) goto search; return -1; search: for (small_id = 0; small_id < HAZARD_TABLE_OVERFLOW; ++small_id) { if (!overflow_busy [small_id]) break; } /* * If this assert fails we don't have enough overflow slots. * We should contemplate adding them dynamically. If we can * make mono_thread_small_id_alloc() lock-free we can just * allocate them on-demand. */ g_assert (small_id < HAZARD_TABLE_OVERFLOW); if (mono_atomic_cas_i32 (&overflow_busy [small_id], 1, 0) != 0) goto search; hp_overflow = &hazard_table [small_id]; for (i = 0; i < HAZARD_POINTER_COUNT; ++i) g_assert (!hp_overflow->hazard_pointers [i]); *hp_overflow = *hp; mono_memory_write_barrier (); memset (hp, 0, sizeof (MonoThreadHazardPointers)); return small_id; }
/* The return value is only valid until a matching mono_thread_info_resume is called */ static MonoThreadInfo* mono_thread_info_suspend_sync (MonoNativeThreadId tid, gboolean interrupt_kernel) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoThreadInfo *info = mono_thread_info_lookup (tid); /*info on HP1*/ if (!info) return NULL; EnterCriticalSection (&info->suspend_lock); /*thread is on the process of detaching*/ if (mono_thread_info_run_state (info) > STATE_RUNNING) { mono_hazard_pointer_clear (hp, 1); return NULL; } THREADS_DEBUG ("suspend %x IN COUNT %d\n", tid, info->suspend_count); if (info->suspend_count) { ++info->suspend_count; mono_hazard_pointer_clear (hp, 1); LeaveCriticalSection (&info->suspend_lock); return info; } if (!mono_threads_core_suspend (info)) { LeaveCriticalSection (&info->suspend_lock); mono_hazard_pointer_clear (hp, 1); return NULL; } if (interrupt_kernel) mono_threads_core_interrupt (info); ++info->suspend_count; info->thread_state |= STATE_SUSPENDED; LeaveCriticalSection (&info->suspend_lock); mono_hazard_pointer_clear (hp, 1); return info; }
MonoLockFreeQueueNode* mono_lock_free_queue_dequeue (MonoLockFreeQueue *q) { MonoThreadHazardPointers *hp = mono_hazard_pointer_get (); MonoLockFreeQueueNode *head; retry: for (;;) { MonoLockFreeQueueNode *tail, *next; head = (MonoLockFreeQueueNode *) get_hazardous_pointer ((gpointer volatile*)&q->head, hp, 0); tail = (MonoLockFreeQueueNode*)q->tail; mono_memory_read_barrier (); next = head->next; mono_memory_read_barrier (); /* Are head, tail and next consistent? */ if (head == q->head) { g_assert (next != INVALID_NEXT && next != FREE_NEXT); g_assert (next != head); /* Is queue empty or tail behind? */ if (head == tail) { if (next == END_MARKER) { /* Queue is empty */ mono_hazard_pointer_clear (hp, 0); /* * We only continue if we * reenqueue the dummy * ourselves, so as not to * wait for threads that might * not actually run. */ if (!is_dummy (q, head) && try_reenqueue_dummy (q)) continue; return NULL; } /* Try to advance tail */ InterlockedCompareExchangePointer ((gpointer volatile*)&q->tail, next, tail); } else { g_assert (next != END_MARKER); /* Try to dequeue head */ if (InterlockedCompareExchangePointer ((gpointer volatile*)&q->head, next, head) == head) break; } } mono_memory_write_barrier (); mono_hazard_pointer_clear (hp, 0); } /* * The head is dequeued now, so we know it's this thread's * responsibility to free it - no other thread can. */ mono_memory_write_barrier (); mono_hazard_pointer_clear (hp, 0); g_assert (head->next); /* * Setting next here isn't necessary for correctness, but we * do it to make sure that we catch dereferencing next in a * node that's not in the queue anymore. */ head->next = INVALID_NEXT; #if QUEUE_DEBUG g_assert (head->in_queue); head->in_queue = FALSE; mono_memory_write_barrier (); #endif if (is_dummy (q, head)) { g_assert (q->has_dummy); q->has_dummy = 0; mono_memory_write_barrier (); mono_thread_hazardous_free_or_queue (head, free_dummy, FALSE, TRUE); if (try_reenqueue_dummy (q)) goto retry; return NULL; } /* The caller must hazardously free the node. */ return head; }