int jl_safepoint_start_gc(void) { #ifdef JULIA_ENABLE_THREADING // The thread should have set this already assert(jl_get_ptls_states()->gc_state == JL_GC_STATE_WAITING); jl_mutex_lock_nogc(&safepoint_lock); // In case multiple threads enter the GC at the same time, only allow // one of them to actually run the collection. We can't just let the // master thread do the GC since it might be running unmanaged code // and can take arbitrarily long time before hitting a safe point. if (jl_atomic_compare_exchange(&jl_gc_running, 0, 1) != 0) { jl_mutex_unlock_nogc(&safepoint_lock); jl_safepoint_wait_gc(); return 0; } jl_safepoint_enable(1); jl_safepoint_enable(2); jl_mutex_unlock_nogc(&safepoint_lock); return 1; #else // For single thread, GC should not call itself (in finalizers) before // setting `jl_gc_running` to false so this should never happen. assert(!jl_gc_running); jl_gc_running = 1; return 1; #endif }
/* multiq_insert() */ static inline int multiq_insert(jl_task_t *task, int16_t priority) { jl_ptls_t ptls = jl_get_ptls_states(); uint64_t rn; task->prio = priority; do { rn = cong(heap_p, cong_unbias, &ptls->rngseed); } while (!jl_mutex_trylock_nogc(&heaps[rn].lock)); if (heaps[rn].ntasks >= tasks_per_heap) { jl_mutex_unlock_nogc(&heaps[rn].lock); jl_error("multiq insertion failed, increase #tasks per heap"); return -1; } heaps[rn].tasks[heaps[rn].ntasks++] = task; sift_up(&heaps[rn], heaps[rn].ntasks-1); jl_mutex_unlock_nogc(&heaps[rn].lock); int16_t prio = jl_atomic_load(&heaps[rn].prio); if (task->prio < prio) jl_atomic_compare_exchange(&heaps[rn].prio, prio, task->prio); return 0; }