static void jl_exit_thread0(int state) { jl_ptls_t ptls2 = jl_all_tls_states[0]; thread0_exit_state = state; jl_atomic_store_release(&ptls2->signal_request, 3); pthread_kill(ptls2->system_id, SIGUSR2); }
// Throw jl_interrupt_exception if the master thread is in a signal async region // or if SIGINT happens too often. static void jl_try_deliver_sigint(void) { jl_tls_states_t *ptls = jl_all_task_states[0].ptls; jl_safepoint_enable_sigint(); jl_wake_libuv(); jl_atomic_store_release(&ptls->signal_request, 2); // This also makes sure `sleep` is aborted. pthread_kill(jl_all_task_states[0].system_id, SIGUSR2); }
static void jl_thread_resume(int tid, int sig) { (void)sig; jl_tls_states_t *ptls = jl_all_task_states[tid].ptls; jl_atomic_store_release(&ptls->signal_request, 1); pthread_cond_broadcast(&exit_signal_cond); pthread_cond_wait(&signal_caught_cond, &in_signal_lock); // wait for thread to acknowledge assert(jl_atomic_load_acquire(&ptls->signal_request) == 0); pthread_mutex_unlock(&in_signal_lock); }
static void jl_thread_suspend_and_get_state(int tid, unw_context_t **ctx) { pthread_mutex_lock(&in_signal_lock); jl_tls_states_t *ptls = jl_all_task_states[tid].ptls; jl_atomic_store_release(&ptls->signal_request, 1); pthread_kill(jl_all_task_states[tid].system_id, SIGUSR2); pthread_cond_wait(&signal_caught_cond, &in_signal_lock); // wait for thread to acknowledge assert(jl_atomic_load_acquire(&ptls->signal_request) == 0); *ctx = signal_context; }
int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val) { if (tg->tid_map[ext_tid] == 0) { tg->envelope = bcast_val ? *bcast_val : NULL; // synchronize `tg->envelope` and `tg->group_sense` jl_atomic_store_release(&tg->group_sense, tg->thread_sense[0]->sense); // if it's possible that threads are sleeping, signal them if (tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); uv_cond_broadcast(&tg->alarm); uv_mutex_unlock(&tg->alarm_lock); } } else { // spin up to threshold ns (count sheep), then sleep uint64_t spin_ns; uint64_t spin_start = 0; // synchronize `tg->envelope` and `tg->group_sense` while (jl_atomic_load_acquire(&tg->group_sense) != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { if (tg->sleep_threshold) { if (!spin_start) { // Lazily initialize spin_start since uv_hrtime is expensive spin_start = uv_hrtime(); continue; } spin_ns = uv_hrtime() - spin_start; // In case uv_hrtime is not monotonic, we'll sleep earlier if (spin_ns >= tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); if (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { uv_cond_wait(&tg->alarm, &tg->alarm_lock); } uv_mutex_unlock(&tg->alarm_lock); spin_start = 0; continue; } } jl_cpu_pause(); } if (bcast_val) *bcast_val = tg->envelope; } return 0; }
static jl_sym_t *_jl_symbol(const char *str, size_t len) { jl_sym_t *volatile *slot; jl_sym_t *node = symtab_lookup(&symtab, str, len, &slot); if (node == NULL) { JL_LOCK(&symbol_table_lock); // Might GC // Someone might have updated it, check and look up again if (*slot != NULL && (node = symtab_lookup(slot, str, len, &slot))) { JL_UNLOCK(&symbol_table_lock); // Might GC return node; } node = mk_symbol(str, len); jl_atomic_store_release(slot, node); JL_UNLOCK(&symbol_table_lock); // Might GC } return node; }
void jl_safepoint_end_gc(void) { assert(jl_gc_running); #ifdef JULIA_ENABLE_THREADING jl_mutex_lock_nogc(&safepoint_lock); // Need to reset the page protection before resetting the flag since // the thread will trigger a segfault immediately after returning from // the signal handler. jl_safepoint_disable(2); jl_safepoint_disable(1); jl_atomic_store_release(&jl_gc_running, 0); # ifdef __APPLE__ // This wakes up other threads on mac. jl_mach_gc_end(); # endif jl_mutex_unlock_nogc(&safepoint_lock); #else jl_gc_running = 0; #endif }