static ERTS_INLINE void reg_safe_write_lock(Process *c_p, ErtsProcLocks *c_p_locks) { if (*c_p_locks) { ASSERT(c_p); ASSERT(c_p_locks); ASSERT(*c_p_locks); if (reg_try_write_lock() != EBUSY) { #ifdef ERTS_ENABLE_LOCK_CHECK erts_proc_lc_might_unlock(c_p, *c_p_locks); #endif return; } /* Release process locks in order to avoid deadlock */ erts_smp_proc_unlock(c_p, *c_p_locks); *c_p_locks = 0; } reg_write_lock(); }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->common.id == pid) { ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(c_p); return c_p; } } dhndl = erts_thr_progress_unmanaged_delay(); proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy, __FILE__,__LINE__); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { int managed; if (flags & ERTS_P2P_FLG_INC_REFC) erts_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; if (!managed) { erts_proc_inc_refc(proc); erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; /* * We don't want to call * erts_thr_progress_unmanaged_continue() * again. */ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; int need_ptl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->id == pid) { ASSERT(c_p->id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(c_p); return c_p; } } need_ptl = !erts_get_scheduler_id(); if (need_ptl) erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); if (proc) { if (proc->id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif if (need_ptl) { erts_smp_proc_inc_refc(proc); dec_refc_proc = proc; erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); need_ptl = 0; } proc_safelock(!need_ptl, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (need_ptl) erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_SMP_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_smp_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }