Sint erts_complete_off_heap_message_queue_change(Process *c_p) { int reds = 1; ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(c_p)); ASSERT(c_p->flags & F_OFF_HEAP_MSGQ_CHNG); ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); /* * This job was first initiated when the process changed to off heap * message queue management. Since then ERTS_PSFLG_OFF_HEAP_MSGQ * has been set. However, the management state might have been changed * again (multiple times) since then. Check users last requested state * (the flags F_OFF_HEAP_MSGQ, and F_ON_HEAP_MSGQ), and make the state * consistent with that. */ if (!(c_p->flags & F_OFF_HEAP_MSGQ)) erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); else { reds += 2; erts_proc_lock(c_p, ERTS_PROC_LOCK_MSGQ); ERTS_MSGQ_MV_INQ2PRIVQ(c_p); erts_proc_unlock(c_p, ERTS_PROC_LOCK_MSGQ); reds += erts_move_messages_off_heap(c_p); } c_p->flags &= ~F_OFF_HEAP_MSGQ_CHNG; return reds; }
static ERTS_INLINE int leader_update(ErtsThrPrgrData *tpd) { #ifdef ERTS_ENABLE_LOCK_CHECK erts_lc_check_exact(NULL, 0); #endif if (!tpd->leader) { /* Probably need to block... */ block_thread(tpd); } else { ErtsThrPrgrVal current; int ix, chk_next_ix, umrefc_ix, my_ix, no_managed, waiting_unmanaged; erts_aint32_t lflgs; ErtsThrPrgrVal next; erts_aint_t refc; my_ix = tpd->id; if (tpd->leader_state.current == ERTS_THR_PRGR_VAL_WAITING) { /* Took over as leader from another thread */ tpd->leader_state.current = read_nob(&erts_thr_prgr__.current); tpd->leader_state.next = tpd->leader_state.current; tpd->leader_state.next++; if (tpd->leader_state.next == ERTS_THR_PRGR_VAL_WAITING) tpd->leader_state.next = 0; tpd->leader_state.chk_next_ix = intrnl->misc.data.chk_next_ix; tpd->leader_state.umrefc_ix.waiting = intrnl->misc.data.umrefc_ix.waiting; tpd->leader_state.umrefc_ix.current = (int) erts_atomic32_read_nob(&intrnl->misc.data.umrefc_ix.current); if (tpd->confirmed == tpd->leader_state.current) { ErtsThrPrgrVal val = tpd->leader_state.current + 1; if (val == ERTS_THR_PRGR_VAL_WAITING) val = 0; tpd->confirmed = val; set_mb(&intrnl->thr[my_ix].data.current, val); } } next = tpd->leader_state.next; waiting_unmanaged = 0; umrefc_ix = -1; /* Shut up annoying warning */ chk_next_ix = tpd->leader_state.chk_next_ix; no_managed = intrnl->managed.no; ASSERT(0 <= chk_next_ix && chk_next_ix <= no_managed); /* Check manged threads */ if (chk_next_ix < no_managed) { for (ix = chk_next_ix; ix < no_managed; ix++) { ErtsThrPrgrVal tmp; if (ix == my_ix) continue; tmp = read_nob(&intrnl->thr[ix].data.current); if (tmp != next && tmp != ERTS_THR_PRGR_VAL_WAITING) { tpd->leader_state.chk_next_ix = ix; ASSERT(erts_thr_progress_has_passed__(next, tmp)); goto done; } } } /* Check unmanged threads */ waiting_unmanaged = tpd->leader_state.umrefc_ix.waiting != -1; umrefc_ix = (waiting_unmanaged ? tpd->leader_state.umrefc_ix.waiting : tpd->leader_state.umrefc_ix.current); refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); ASSERT(refc >= 0); if (refc != 0) { int new_umrefc_ix; if (waiting_unmanaged) goto done; new_umrefc_ix = (umrefc_ix + 1) & 0x1; tpd->leader_state.umrefc_ix.waiting = umrefc_ix; tpd->leader_state.chk_next_ix = no_managed; erts_atomic32_set_nob(&intrnl->misc.data.umrefc_ix.current, (erts_aint32_t) new_umrefc_ix); ETHR_MEMBAR(ETHR_StoreLoad); refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); ASSERT(refc >= 0); waiting_unmanaged = 1; if (refc != 0) goto done; } /* Make progress */ current = next; next++; if (next == ERTS_THR_PRGR_VAL_WAITING) next = 0; set_nob(&intrnl->thr[my_ix].data.current, next); set_mb(&erts_thr_prgr__.current, current); tpd->confirmed = next; tpd->leader_state.next = next; tpd->leader_state.current = current; #if ERTS_THR_PRGR_PRINT_VAL if (current % 1000 == 0) erts_fprintf(stderr, "%b64u\n", current); #endif handle_wakeup_requests(current); if (waiting_unmanaged) { waiting_unmanaged = 0; tpd->leader_state.umrefc_ix.waiting = -1; erts_atomic32_read_band_nob(&intrnl->misc.data.lflgs, ~ERTS_THR_PRGR_LFLG_WAITING_UM); } tpd->leader_state.chk_next_ix = 0; done: if (tpd->active) { lflgs = erts_atomic32_read_nob(&intrnl->misc.data.lflgs); if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) (void) block_thread(tpd); } else { int force_wakeup_check = 0; erts_aint32_t set_flags = ERTS_THR_PRGR_LFLG_NO_LEADER; tpd->leader = 0; tpd->leader_state.current = ERTS_THR_PRGR_VAL_WAITING; #if ERTS_THR_PRGR_PRINT_LEADER erts_fprintf(stderr, "L <- %d\n", tpd->id); #endif ERTS_THR_PROGRESS_STATE_DEBUG_SET_LEADER(tpd->id, 0); intrnl->misc.data.umrefc_ix.waiting = tpd->leader_state.umrefc_ix.waiting; if (waiting_unmanaged) set_flags |= ERTS_THR_PRGR_LFLG_WAITING_UM; lflgs = erts_atomic32_read_bor_relb(&intrnl->misc.data.lflgs, set_flags); lflgs |= set_flags; if (lflgs & ERTS_THR_PRGR_LFLG_BLOCK) lflgs = block_thread(tpd); if (waiting_unmanaged) { /* Need to check umrefc again */ ETHR_MEMBAR(ETHR_StoreLoad); refc = erts_atomic_read_nob(&intrnl->umrefc[umrefc_ix].refc); if (refc == 0) { /* Need to force wakeup check */ force_wakeup_check = 1; } } if ((force_wakeup_check || ((lflgs & (ERTS_THR_PRGR_LFLG_NO_LEADER | ERTS_THR_PRGR_LFLG_WAITING_UM | ERTS_THR_PRGR_LFLG_ACTIVE_MASK)) == ERTS_THR_PRGR_LFLG_NO_LEADER)) && got_sched_wakeups()) { /* Someone need to make progress */ wakeup_managed(0); } } } return tpd->leader; }
Eterm erts_change_message_queue_management(Process *c_p, Eterm new_state) { Eterm res; #ifdef DEBUG if (c_p->flags & F_OFF_HEAP_MSGQ) { ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { if (c_p->flags & F_OFF_HEAP_MSGQ_CHNG) { ASSERT(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ); } else { ASSERT(!(erts_atomic32_read_nob(&c_p->state) & ERTS_PSFLG_OFF_HEAP_MSGQ)); } } #endif switch (c_p->flags & (F_OFF_HEAP_MSGQ|F_ON_HEAP_MSGQ)) { case F_OFF_HEAP_MSGQ: res = am_off_heap; switch (new_state) { case am_off_heap: break; case am_on_heap: c_p->flags |= F_ON_HEAP_MSGQ; c_p->flags &= ~F_OFF_HEAP_MSGQ; erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_ON_HEAP_MSGQ); /* * We are not allowed to clear ERTS_PSFLG_OFF_HEAP_MSGQ * if a off heap change is ongoing. It will be adjusted * when the change completes... */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { /* Safe to clear ERTS_PSFLG_OFF_HEAP_MSGQ... */ erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_OFF_HEAP_MSGQ); } break; default: res = THE_NON_VALUE; /* badarg */ break; } break; case F_ON_HEAP_MSGQ: res = am_on_heap; switch (new_state) { case am_on_heap: break; case am_off_heap: c_p->flags &= ~F_ON_HEAP_MSGQ; erts_atomic32_read_band_nob(&c_p->state, ~ERTS_PSFLG_ON_HEAP_MSGQ); goto change_to_off_heap; default: res = THE_NON_VALUE; /* badarg */ break; } break; default: res = am_error; ERTS_INTERNAL_ERROR("Inconsistent message queue management state"); break; } return res; change_to_off_heap: c_p->flags |= F_OFF_HEAP_MSGQ; /* * We do not have to schedule a change if * we have an ongoing off heap change... */ if (!(c_p->flags & F_OFF_HEAP_MSGQ_CHNG)) { ErtsChangeOffHeapMessageQueue *cohmq; /* * Need to set ERTS_PSFLG_OFF_HEAP_MSGQ and wait * thread progress before completing the change in * order to ensure that all senders observe that * messages should be passed off heap. When the * change has completed, GC does not need to inspect * the message queue at all. */ erts_atomic32_read_bor_nob(&c_p->state, ERTS_PSFLG_OFF_HEAP_MSGQ); c_p->flags |= F_OFF_HEAP_MSGQ_CHNG; cohmq = erts_alloc(ERTS_ALC_T_MSGQ_CHNG, sizeof(ErtsChangeOffHeapMessageQueue)); cohmq->pid = c_p->common.id; erts_schedule_thr_prgr_later_op(change_off_heap_msgq, (void *) cohmq, &cohmq->lop); } return res; }