/* * Calller _must_ yield if we return 0 */ int erts_try_seize_code_write_permission(Process* c_p) { int success; #ifdef ERTS_SMP ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ #endif ASSERT(c_p != NULL); erts_smp_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 1); #endif } else { /* Already locked */ struct code_write_queue_item* qitem; ASSERT(code_writing_process != c_p); qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_proc_inc_refc(c_p); qitem->next = code_write_queue; code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_smp_mtx_unlock(&code_write_permission_mtx); return success; }
static int try_seize_update_permission(Process* c_p) { int success; ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */ ASSERT(c_p != NULL); erts_mtx_lock(&update_table_permission_mtx); ASSERT(updater_process != c_p); success = (updater_process == NULL); if (success) { updater_process = c_p; } else { struct update_queue_item* qitem; qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_proc_inc_refc(c_p); qitem->next = update_queue; update_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_mtx_unlock(&update_table_permission_mtx); return success; }
static Eterm staging_epilogue(Process* c_p, int commit, Eterm res, int is_blocking, struct m* loaded, int nloaded) { #ifdef ERTS_SMP if (is_blocking || !commit) #endif { if (commit) { erts_end_staging_code_ix(); erts_commit_staging_code_ix(); if (loaded) { int i; for (i=0; i < nloaded; i++) { set_default_trace_pattern(loaded[i].module); } } } else { erts_abort_staging_code_ix(); } if (loaded) { erts_free(ERTS_ALC_T_LOADER_TMP, loaded); } if (is_blocking) { erts_smp_thr_progress_unblock(); erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN); } erts_release_code_write_permission(); return res; } #ifdef ERTS_SMP else { ASSERT(is_value(res)); if (loaded) { erts_free(ERTS_ALC_T_LOADER_TMP, loaded); } erts_end_staging_code_ix(); /* * Now we must wait for all schedulers to do a memory barrier before * we can commit and let them access the new staged code. This allows * schedulers to read active code_ix in a safe way while executing * without any memory barriers at all. */ ASSERT(committer_state.stager == NULL); committer_state.stager = c_p; erts_schedule_thr_prgr_later_op(smp_code_ix_commiter, NULL, &committer_state.lop); erts_proc_inc_refc(c_p); erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); /* * smp_code_ix_commiter() will do the rest "later" * and resume this process to return 'res'. */ ERTS_BIF_YIELD_RETURN(c_p, res); } #endif }
static void process_killer(void) { int i, j, max = erts_ptab_max(&erts_proc); Process* rp; erts_printf("\n\nProcess Information\n\n"); erts_printf("--------------------------------------------------\n"); for (i = max-1; i >= 0; i--) { rp = erts_pix2proc(i); if (rp && rp->i != ENULL) { int br; print_process_info(ERTS_PRINT_STDOUT, NULL, rp); erts_printf("(k)ill (n)ext (r)eturn:\n"); while(1) { if ((j = sys_get_key(0)) <= 0) erts_exit(0, ""); switch(j) { case 'k': { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_proc_inc_refc(rp); erts_smp_proc_lock(rp, rp_locks); state = erts_smp_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS | ERTS_PSFLG_DIRTY_RUNNING | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { erts_printf("Can only kill WAITING processes this way\n"); } else { (void) erts_send_exit_signal(NULL, NIL, rp, &rp_locks, am_kill, NIL, NULL, 0); } erts_smp_proc_unlock(rp, rp_locks); erts_proc_dec_refc(rp); } case 'n': br = 1; break; case 'r': return; default: return; } if (br == 1) break; } } } }
BIF_RETTYPE erts_internal_copy_literals_2(BIF_ALIST_2) { ErtsCodeIndex code_ix; Eterm res = am_true; if (is_not_atom(BIF_ARG_1) || (am_true != BIF_ARG_2 && am_false != BIF_ARG_2)) { BIF_ERROR(BIF_P, BADARG); } if (!erts_try_seize_code_write_permission(BIF_P)) { ERTS_BIF_YIELD2(bif_export[BIF_erts_internal_copy_literals_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } code_ix = erts_active_code_ix(); if (BIF_ARG_2 == am_true) { Module* modp = erts_get_module(BIF_ARG_1, code_ix); if (!modp || !modp->old.code_hdr) { res = am_false; goto done; } if (erts_clrange.ptr != NULL && !(BIF_P->static_flags & ERTS_STC_FLG_SYSTEM_PROC)) { res = am_aborted; goto done; } erts_clrange.ptr = modp->old.code_hdr->literals_start; erts_clrange.sz = modp->old.code_hdr->literals_end - erts_clrange.ptr; erts_clrange.pid = BIF_P->common.id; } else if (BIF_ARG_2 == am_false) { if (erts_clrange.pid != BIF_P->common.id) { res = am_false; goto done; } erts_clrange.ptr = NULL; erts_clrange.sz = 0; erts_clrange.pid = THE_NON_VALUE; } #ifdef ERTS_SMP ASSERT(committer_state.stager == NULL); committer_state.stager = BIF_P; erts_schedule_thr_prgr_later_op(copy_literals_commit, NULL, &committer_state.lop); erts_proc_inc_refc(BIF_P); erts_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, NULL); ERTS_BIF_YIELD_RETURN(BIF_P, am_true); #endif done: erts_release_code_write_permission(); BIF_RET(res); }
static void suspend_until_thr_prg(Process* p) { Binary* state_bin; ErtsFlxCtrWakeUpLaterInfo* info; state_bin = erts_create_magic_binary(sizeof(ErtsFlxCtrWakeUpLaterInfo), erts_flxctr_wait_dtor); info = ERTS_MAGIC_BIN_DATA(state_bin); info->process = p; erts_refc_inctest(&state_bin->intern.refc, 1); erts_suspend(p, ERTS_PROC_LOCK_MAIN, NULL); erts_proc_inc_refc(p); ERTS_VBUMP_ALL_REDS(p); erts_schedule_thr_prgr_later_op(thr_prg_wake_up_later, state_bin, &info->later_op); }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->common.id == pid) { ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(c_p); return c_p; } } dhndl = erts_thr_progress_unmanaged_delay(); proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { int managed; if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; if (!managed) { erts_proc_inc_refc(proc); erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; /* * We don't want to call * erts_thr_progress_unmanaged_continue() * again. */ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }
static void proc_safelock(int is_managed, Process *a_proc, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, Process *b_proc, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks) { Process *p1, *p2; #ifdef ERTS_ENABLE_LOCK_CHECK Eterm pid1, pid2; #endif ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2; ErtsProcLocks unlock_mask; int lock_no, refc1 = 0, refc2 = 0; ERTS_LC_ASSERT(b_proc); /* Determine inter process lock order... * Locks with the same lock order should be locked on p1 before p2. */ if (a_proc) { if (a_proc->common.id < b_proc->common.id) { p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->common.id; #endif need_locks1 = a_need_locks; have_locks1 = a_have_locks; p2 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = b_proc->common.id; #endif need_locks2 = b_need_locks; have_locks2 = b_have_locks; } else if (a_proc->common.id > b_proc->common.id) { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->common.id; #endif need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = a_proc->common.id; #endif need_locks2 = a_need_locks; have_locks2 = a_have_locks; } else { ERTS_LC_ASSERT(a_proc == b_proc); ERTS_LC_ASSERT(a_proc->common.id == b_proc->common.id); p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->common.id; #endif need_locks1 = a_need_locks | b_need_locks; have_locks1 = a_have_locks | b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif need_locks2 = 0; have_locks2 = 0; } } else { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->common.id; #endif need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif need_locks2 = 0; have_locks2 = 0; #ifdef ERTS_ENABLE_LOCK_CHECK a_need_locks = 0; a_have_locks = 0; #endif } #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); if ((need_locks1 & have_locks1) != have_locks1) erts_lc_fail("Thread tries to release process lock(s) " "on %T via erts_proc_safelock().", pid1); if ((need_locks2 & have_locks2) != have_locks2) erts_lc_fail("Thread tries to release process lock(s) " "on %T via erts_proc_safelock().", pid2); #endif need_locks1 &= ~have_locks1; need_locks2 &= ~have_locks2; /* Figure out the range of locks that needs to be unlocked... */ unlock_mask = ERTS_PROC_LOCKS_ALL; for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) { ErtsProcLocks lock = (1 << lock_no); if (lock & need_locks1) break; unlock_mask &= ~lock; if (lock & need_locks2) break; } /* ... and unlock locks in that range... */ if (have_locks1 || have_locks2) { ErtsProcLocks unlock_locks; unlock_locks = unlock_mask & have_locks1; if (unlock_locks) { have_locks1 &= ~unlock_locks; need_locks1 |= unlock_locks; if (!is_managed && !have_locks1) { refc1 = 1; erts_proc_inc_refc(p1); } erts_smp_proc_unlock(p1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { have_locks2 &= ~unlock_locks; need_locks2 |= unlock_locks; if (!is_managed && !have_locks2) { refc2 = 1; erts_proc_inc_refc(p2); } erts_smp_proc_unlock(p2, unlock_locks); } } /* * lock_no equals the number of the first lock to lock on * either p1 *or* p2. */ #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); #endif /* Lock locks in lock order... */ while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) { ErtsProcLocks locks; ErtsProcLocks lock = (1 << lock_no); ErtsProcLocks lock_mask = 0; if (need_locks1 & lock) { do { lock = (1 << lock_no++); lock_mask |= lock; } while (lock_no <= ERTS_PROC_LOCK_MAX_BIT && !(need_locks2 & lock)); if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; erts_smp_proc_lock(p1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } else if (need_locks2 & lock) { while (lock_no <= ERTS_PROC_LOCK_MAX_BIT && !(need_locks1 & lock)) { lock_mask |= lock; lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; erts_smp_proc_lock(p2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } else lock_no++; } #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); if (p1 && p2) { if (p1 == a_proc) { ERTS_LC_ASSERT(a_need_locks == have_locks1); ERTS_LC_ASSERT(b_need_locks == have_locks2); } else { ERTS_LC_ASSERT(a_need_locks == have_locks2); ERTS_LC_ASSERT(b_need_locks == have_locks1); } } else { ERTS_LC_ASSERT(p1); if (a_proc) { ERTS_LC_ASSERT(have_locks1 == (a_need_locks | b_need_locks)); } else { ERTS_LC_ASSERT(have_locks1 == b_need_locks); } } #endif if (!is_managed) { if (refc1) erts_proc_dec_refc(p1); if (refc2) erts_proc_dec_refc(p2); } }
void erts_whereis_name(Process *c_p, ErtsProcLocks c_p_locks, Eterm name, Process** proc, ErtsProcLocks need_locks, int flags, Port** port, int lock_port) { RegProc* rp = NULL; HashValue hval; int ix; HashBucket* b; #ifdef ERTS_SMP ErtsProcLocks current_c_p_locks; Port *pending_port = NULL; if (!c_p) c_p_locks = 0; current_c_p_locks = c_p_locks; restart: reg_safe_read_lock(c_p, ¤t_c_p_locks); /* Locked locks: * - port lock on pending_port if pending_port != NULL * - read reg lock * - current_c_p_locks (either c_p_locks or 0) on c_p */ #endif hval = REG_HASH(name); ix = hval % process_reg.size; b = process_reg.bucket[ix]; /* * Note: We have inlined the code from hash.c for speed. */ while (b) { if (((RegProc *) b)->name == name) { rp = (RegProc *) b; break; } b = b->next; } if (proc) { if (!rp) *proc = NULL; else { #ifdef ERTS_SMP if (!rp->p) *proc = NULL; else { if (need_locks) { erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, rp->p, 0, need_locks); current_c_p_locks = c_p_locks; } if ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)) *proc = rp->p; else { if (need_locks) erts_smp_proc_unlock(rp->p, need_locks); *proc = NULL; } } #else if (rp->p && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p))) *proc = rp->p; else *proc = NULL; #endif if (*proc && (flags & ERTS_P2P_FLG_INC_REFC)) erts_proc_inc_refc(*proc); } } if (port) { if (!rp || !rp->pt) *port = NULL; else { #ifdef ERTS_SMP if (lock_port) { if (pending_port == rp->pt) pending_port = NULL; else { if (pending_port) { /* Ahh! Registered port changed while reg lock was unlocked... */ erts_port_release(pending_port); pending_port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->common.id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); pending_port = erts_id2port(id); goto restart; } } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(rp->pt)); } #endif *port = rp->pt; } } #ifdef ERTS_SMP if (c_p && !current_c_p_locks) erts_smp_proc_lock(c_p, c_p_locks); if (pending_port) erts_port_release(pending_port); #endif reg_read_unlock(); }