void erts_thr_progress_unmanaged_continue__(ErtsThrPrgrDelayHandle handle) { #ifdef ERTS_ENABLE_LOCK_CHECK ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL); ERTS_LC_ASSERT(tpd && tpd->is_delaying); tpd->is_delaying = 0; return_tmp_thr_prgr_data(tpd); #endif ASSERT(!erts_thr_progress_is_managed_thread()); unmanaged_continue(handle); }
static void system_cleanup(int flush_async) { /* * Make sure only one thread exits the runtime system. */ if (erts_atomic_inc_read_nob(&exiting) != 1) { /* * Another thread is currently exiting the system; * wait for it to do its job. */ #ifdef ERTS_SMP if (erts_thr_progress_is_managed_thread()) { /* * The exiting thread might be waiting for * us to block; need to update status... */ erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); } #endif /* Wait forever... */ while (1) erts_milli_sleep(10000000); } /* No cleanup wanted if ... * 1. we are about to do an abnormal exit * 2. we haven't finished initializing, or * 3. another thread than the main thread is performing the exit * (in threaded non smp case). */ if (!flush_async || !erts_initialized #if defined(USE_THREADS) && !defined(ERTS_SMP) || !erts_equal_tids(main_thread, erts_thr_self()) #endif ) return; #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif #endif erts_exit_flush_async(); }
ErtsThrPrgrDelayHandle erts_thr_progress_unmanaged_delay__(void) { int umrefc_ix; ASSERT(!erts_thr_progress_is_managed_thread()); umrefc_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current); while (1) { int tmp_ix; erts_atomic_inc_acqb(&intrnl->umrefc[umrefc_ix].refc); tmp_ix = (int) erts_atomic32_read_acqb(&intrnl->misc.data.umrefc_ix.current); if (tmp_ix == umrefc_ix) break; unmanaged_continue(umrefc_ix); umrefc_ix = tmp_ix; } #ifdef ERTS_ENABLE_LOCK_CHECK { ErtsThrPrgrData *tpd = tmp_thr_prgr_data(NULL); tpd->is_delaying = 1; } #endif return (ErtsThrPrgrDelayHandle) umrefc_ix; }
static void system_cleanup(int flush_async) { /* * Make sure only one thread exits the runtime system. */ if (erts_atomic_inc_read_nob(&exiting) != 1) { /* * Another thread is currently exiting the system; * wait for it to do its job. */ #ifdef ERTS_SMP if (erts_thr_progress_is_managed_thread()) { /* * The exiting thread might be waiting for * us to block; need to update status... */ erts_thr_progress_active(NULL, 0); erts_thr_progress_prepare_wait(NULL); } #endif /* Wait forever... */ while (1) erts_milli_sleep(10000000); } /* No cleanup wanted if ... * 1. we are about to do an abnormal exit * 2. we haven't finished initializing, or * 3. another thread than the main thread is performing the exit * (in threaded non smp case). */ if (!flush_async || !erts_initialized #if defined(USE_THREADS) && !defined(ERTS_SMP) || !erts_equal_tids(main_thread, erts_thr_self()) #endif ) return; #ifdef ERTS_SMP #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif #endif #ifdef HYBRID if (ma_src_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_src_stack); if (ma_dst_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_dst_stack); if (ma_offset_stack) erts_free(ERTS_ALC_T_OBJECT_STACK, (void *)ma_offset_stack); ma_src_stack = NULL; ma_dst_stack = NULL; ma_offset_stack = NULL; erts_cleanup_offheap(&erts_global_offheap); #endif #if defined(HYBRID) && !defined(INCREMENTAL) if (global_heap) { ERTS_HEAP_FREE(ERTS_ALC_T_HEAP, (void*) global_heap, sizeof(Eterm) * global_heap_sz); } global_heap = NULL; #endif #ifdef INCREMENTAL erts_cleanup_incgc(); #endif erts_exit_flush_async(); }