/* * erts_pid2proc_safelock() is called from erts_pid2proc_opt() when * it wasn't possible to trylock all locks needed. * c_p - current process * c_p_have_locks - locks held on c_p * pid - process id of process we are looking up * proc - process struct of process we are looking * up (both in and out argument) * need_locks - all locks we need (including have_locks) * pix_lock - pix lock for process we are looking up * flags - option flags */ void erts_pid2proc_safelock(Process *c_p, ErtsProcLocks c_p_have_locks, Process **proc, ErtsProcLocks need_locks, erts_pix_lock_t *pix_lock, int flags) { Process *p = *proc; ERTS_LC_ASSERT(p->lock.refc > 0); ERTS_LC_ASSERT(process_tab[internal_pid_index(p->id)] == p); p->lock.refc++; erts_pix_unlock(pix_lock); proc_safelock(c_p, c_p ? ERTS_PID2PIXLOCK(c_p->id) : NULL, c_p_have_locks, c_p_have_locks, p, pix_lock, 0, need_locks); erts_pix_lock(pix_lock); if (!p->is_exiting || ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && process_tab[internal_pid_index(p->id)] == p)) { ERTS_LC_ASSERT(p->lock.refc > 1); p->lock.refc--; } else { /* No proc. Note, we need to keep refc until after process unlock */ erts_pix_unlock(pix_lock); erts_smp_proc_unlock__(p, pix_lock, need_locks); *proc = NULL; erts_pix_lock(pix_lock); ERTS_LC_ASSERT(p->lock.refc > 0); if (--p->lock.refc == 0) { erts_pix_unlock(pix_lock); erts_free_proc(p); erts_pix_lock(pix_lock); } } }
Eterm check_process_code_2(BIF_ALIST_2) { Process* rp; Module* modp; if (is_not_atom(BIF_ARG_2)) { goto error; } if (is_internal_pid(BIF_ARG_1)) { Eterm res; if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) goto error; modp = erts_get_module(BIF_ARG_2); if (modp == NULL) { /* Doesn't exist. */ return am_false; } else if (modp->old_code == NULL) { /* No old code. */ return am_false; } #ifdef ERTS_SMP rp = erts_pid2proc_suspend(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); #else rp = erts_pid2proc(BIF_P, 0, BIF_ARG_1, 0); #endif if (!rp) { BIF_RET(am_false); } if (rp == ERTS_PROC_LOCK_BUSY) { ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } res = check_process_code(rp, modp); #ifdef ERTS_SMP if (BIF_P != rp) { erts_resume(rp, ERTS_PROC_LOCK_MAIN); erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); } #endif BIF_RET(res); } else if (is_external_pid(BIF_ARG_1) && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) { BIF_RET(am_false); } error: BIF_ERROR(BIF_P, BADARG); }
Eterm check_process_code_2(BIF_ALIST_2) { Process* rp; Module* modp; if (is_not_atom(BIF_ARG_2)) { goto error; } if (is_internal_pid(BIF_ARG_1)) { Eterm res; if (internal_pid_index(BIF_ARG_1) >= erts_max_processes) goto error; rp = erts_pid2proc_not_running(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCK_MAIN); if (!rp) { BIF_RET(am_false); } if (rp == ERTS_PROC_LOCK_BUSY) { ERTS_BIF_YIELD2(bif_export[BIF_check_process_code_2], BIF_P, BIF_ARG_1, BIF_ARG_2); } modp = erts_get_module(BIF_ARG_2); res = check_process_code(rp, modp); #ifdef ERTS_SMP if (BIF_P != rp) erts_smp_proc_unlock(rp, ERTS_PROC_LOCK_MAIN); #endif BIF_RET(res); } else if (is_external_pid(BIF_ARG_1) && external_pid_dist_entry(BIF_ARG_1) == erts_this_dist_entry) { BIF_RET(am_false); } error: BIF_ERROR(BIF_P, BADARG); }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->common.id == pid) { ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(c_p); return c_p; } } dhndl = erts_thr_progress_unmanaged_delay(); proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { int managed; if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; if (!managed) { erts_proc_inc_refc(proc); erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; /* * We don't want to call * erts_thr_progress_unmanaged_continue() * again. */ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; int need_ptl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->id == pid) { ASSERT(c_p->id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(c_p); return c_p; } } need_ptl = !erts_get_scheduler_id(); if (need_ptl) erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); if (proc) { if (proc->id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif if (need_ptl) { erts_smp_proc_inc_refc(proc); dec_refc_proc = proc; erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); need_ptl = 0; } proc_safelock(!need_ptl, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (need_ptl) erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_SMP_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_smp_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }