static Eterm staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, struct m* loaded, int nloaded) { #ifdef ERTS_SMP if (is_blocking || !commit) #endif { if (commit) { erts_end_staging_code_ix(); erts_commit_staging_code_ix(); if (loaded) { int i; for (i=0; i < nloaded; i++) { set_default_trace_pattern(loaded[i].module); } } } else { erts_abort_staging_code_ix(); } if (loaded) { erts_free(ERTS_ALC_T_LOADER_TMP, loaded); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; } #ifdef ERTS_SMP else { ErtsThrPrgrVal later; ASSERT(is_value(res)); if (loaded) { erts_free(ERTS_ALC_T_LOADER_TMP, loaded); } erts_end_staging_code_ix(); /* * Now we must wait for all schedulers to do a memory barrier before * we can activate and let them access the new staged code. This allows * schedulers to read active code_ix in a safe way while executing * without any memory barriers at all. */ later = erts_thr_progress_later(); erts_thr_progress_wakeup(c_p->scheduler_data, later); erts_notify_code_ix_activation(c_p, later); erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); /* * handle_code_ix_activation() will do the rest "later" * and resume this process to return 'res'. */ ERTS_BIF_YIELD_RETURN(c_p, res); } #endif }
static ERTS_INLINE ErtsAsync *async_get(ErtsThrQ_t *q, erts_tse_t *tse, ErtsThrQPrepEnQ_t **prep_enq) { #if ERTS_USE_ASYNC_READY_Q int saved_fin_deq = 0; ErtsThrQFinDeQ_t fin_deq; #endif #ifdef USE_VM_PROBES int len; #endif while (1) { ErtsAsync *a = (ErtsAsync *) erts_thr_q_dequeue(q); if (a) { #if ERTS_USE_ASYNC_READY_Q *prep_enq = a->q.prep_enq; erts_thr_q_get_finalize_dequeue_data(q, &a->q.fin_deq); if (saved_fin_deq) erts_thr_q_append_finalize_dequeue_data(&a->q.fin_deq, &fin_deq); #endif #ifdef USE_LTTNG_VM_TRACEPOINTS if (LTTNG_ENABLED(aio_pool_get)) { lttng_decl_portbuf(port_str); int length = erts_thr_q_length_dirty(q); lttng_portid_to_str(a->port, port_str); LTTNG2(aio_pool_get, port_str, length); } #endif #ifdef USE_VM_PROBES if (DTRACE_ENABLED(aio_pool_get)) { DTRACE_CHARBUF(port_str, 16); erts_snprintf(port_str, sizeof(DTRACE_CHARBUF_NAME(port_str)), "%T", a->port); /* DTRACE TODO: Get the length from erts_thr_q_dequeue() ? */ len = -1; DTRACE2(aio_pool_get, port_str, len); } #endif return a; } if (ERTS_THR_Q_DIRTY != erts_thr_q_clean(q)) { ErtsThrQFinDeQ_t tmp_fin_deq; erts_tse_reset(tse); #if ERTS_USE_ASYNC_READY_Q chk_fin_deq: if (erts_thr_q_get_finalize_dequeue_data(q, &tmp_fin_deq)) { if (!saved_fin_deq) { erts_thr_q_finalize_dequeue_state_init(&fin_deq); saved_fin_deq = 1; } erts_thr_q_append_finalize_dequeue_data(&fin_deq, &tmp_fin_deq); } #endif switch (erts_thr_q_inspect(q, 1)) { case ERTS_THR_Q_DIRTY: break; case ERTS_THR_Q_NEED_THR_PRGR: #ifdef ERTS_SMP { ErtsThrPrgrVal prgr = erts_thr_q_need_thr_progress(q); erts_thr_progress_wakeup(NULL, prgr); /* * We do no dequeue finalizing in hope that a new async * job will arrive before we are woken due to thread * progress... */ erts_tse_wait(tse); break; } #endif case ERTS_THR_Q_CLEAN: #if ERTS_USE_ASYNC_READY_Q if (saved_fin_deq) { if (erts_thr_q_finalize_dequeue(&fin_deq)) goto chk_fin_deq; else saved_fin_deq = 0; } #endif erts_tse_wait(tse); break; default: ASSERT(0); break; } } } }