/* * Calller _must_ yield if we return 0 */ int erts_try_seize_code_write_permission(Process* c_p) { int success; #ifdef ERTS_SMP ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ #endif ASSERT(c_p != NULL); erts_smp_mtx_lock(&code_write_permission_mtx); success = (code_writing_process == NULL); if (success) { code_writing_process = c_p; #ifdef ERTS_ENABLE_LOCK_CHECK erts_tsd_set(has_code_write_permission, (void *) 1); #endif } else { /* Already locked */ struct code_write_queue_item* qitem; ASSERT(code_writing_process != c_p); qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_smp_proc_inc_refc(c_p); qitem->next = code_write_queue; code_write_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_smp_mtx_unlock(&code_write_permission_mtx); return success; }
static void process_killer(void) { int i, j, max = erts_ptab_max(&erts_proc); Process* rp; erts_printf("\n\nProcess Information\n\n"); erts_printf("--------------------------------------------------\n"); for (i = max-1; i >= 0; i--) { rp = erts_pix2proc(i); if (rp && rp->i != ENULL) { int br; print_process_info(ERTS_PRINT_STDOUT, NULL, rp); erts_printf("(k)ill (n)ext (r)eturn:\n"); while(1) { if ((j = sys_get_key(0)) <= 0) erl_exit(0, ""); switch(j) { case 'k': { ErtsProcLocks rp_locks = ERTS_PROC_LOCKS_XSIG_SEND; erts_aint32_t state; erts_smp_proc_inc_refc(rp); erts_smp_proc_lock(rp, rp_locks); state = erts_smp_atomic32_read_acqb(&rp->state); if (state & (ERTS_PSFLG_FREE | ERTS_PSFLG_EXITING | ERTS_PSFLG_ACTIVE | ERTS_PSFLG_ACTIVE_SYS | ERTS_PSFLG_IN_RUNQ | ERTS_PSFLG_RUNNING | ERTS_PSFLG_RUNNING_SYS)) { erts_printf("Can only kill WAITING processes this way\n"); } else { (void) erts_send_exit_signal(NULL, NIL, rp, &rp_locks, am_kill, NIL, NULL, 0); } erts_smp_proc_unlock(rp, rp_locks); erts_smp_proc_dec_refc(rp); } case 'n': br = 1; break; case 'r': return; default: return; } if (br == 1) break; } } } }
/* * Calller _must_ yield if we return 0 */ int erts_try_seize_code_write_permission(Process* c_p) { int success; #ifdef ERTS_SMP ASSERT(!erts_smp_thr_progress_is_blocking()); /* to avoid deadlock */ #endif erts_smp_mtx_lock(&the_code_ix_queue_lock); success = !the_code_ix_lock; if (success) { the_code_ix_lock = 1; } else { /* Already locked */ struct code_ix_queue_item* qitem; qitem = erts_alloc(ERTS_ALC_T_CODE_IX_LOCK_Q, sizeof(*qitem)); qitem->p = c_p; erts_smp_proc_inc_refc(c_p); qitem->next = the_code_ix_queue; the_code_ix_queue = qitem; erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL); } erts_smp_mtx_unlock(&the_code_ix_queue_lock); return success; }
static void proc_safelock(Process *a_proc, erts_pix_lock_t *a_pix_lck, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, Process *b_proc, erts_pix_lock_t *b_pix_lck, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks) { Process *p1, *p2; #ifdef ERTS_ENABLE_LOCK_CHECK Eterm pid1, pid2; #endif erts_pix_lock_t *pix_lck1, *pix_lck2; ErtsProcLocks need_locks1, have_locks1, need_locks2, have_locks2; ErtsProcLocks unlock_mask; int lock_no, refc1 = 0, refc2 = 0; ERTS_LC_ASSERT(b_proc); /* Determine inter process lock order... * Locks with the same lock order should be locked on p1 before p2. */ if (a_proc) { if (a_proc->id < b_proc->id) { p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->id; #endif pix_lck1 = a_pix_lck; need_locks1 = a_need_locks; have_locks1 = a_have_locks; p2 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = b_proc->id; #endif pix_lck2 = b_pix_lck; need_locks2 = b_need_locks; have_locks2 = b_have_locks; } else if (a_proc->id > b_proc->id) { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->id; #endif pix_lck1 = b_pix_lck; need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = a_proc->id; #endif pix_lck2 = a_pix_lck; need_locks2 = a_need_locks; have_locks2 = a_have_locks; } else { ERTS_LC_ASSERT(a_proc == b_proc); ERTS_LC_ASSERT(a_proc->id == b_proc->id); p1 = a_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = a_proc->id; #endif pix_lck1 = a_pix_lck; need_locks1 = a_need_locks | b_need_locks; have_locks1 = a_have_locks | b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif pix_lck2 = NULL; need_locks2 = 0; have_locks2 = 0; } } else { p1 = b_proc; #ifdef ERTS_ENABLE_LOCK_CHECK pid1 = b_proc->id; #endif pix_lck1 = b_pix_lck; need_locks1 = b_need_locks; have_locks1 = b_have_locks; p2 = NULL; #ifdef ERTS_ENABLE_LOCK_CHECK pid2 = 0; #endif pix_lck2 = NULL; need_locks2 = 0; have_locks2 = 0; #ifdef ERTS_ENABLE_LOCK_CHECK a_need_locks = 0; a_have_locks = 0; #endif } #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); if ((need_locks1 & have_locks1) != have_locks1) erts_lc_fail("Thread tries to release process lock(s) " "on %T via erts_proc_safelock().", pid1); if ((need_locks2 & have_locks2) != have_locks2) erts_lc_fail("Thread tries to release process lock(s) " "on %T via erts_proc_safelock().", pid2); #endif need_locks1 &= ~have_locks1; need_locks2 &= ~have_locks2; /* Figure out the range of locks that needs to be unlocked... */ unlock_mask = ERTS_PROC_LOCKS_ALL; for (lock_no = 0; lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) { ErtsProcLocks lock = (1 << lock_no); if (lock & need_locks1) break; unlock_mask &= ~lock; if (lock & need_locks2) break; } /* ... and unlock locks in that range... */ if (have_locks1 || have_locks2) { ErtsProcLocks unlock_locks; unlock_locks = unlock_mask & have_locks1; if (unlock_locks) { have_locks1 &= ~unlock_locks; need_locks1 |= unlock_locks; if (!have_locks1) { refc1 = 1; erts_smp_proc_inc_refc(p1); } erts_smp_proc_unlock__(p1, pix_lck1, unlock_locks); } unlock_locks = unlock_mask & have_locks2; if (unlock_locks) { have_locks2 &= ~unlock_locks; need_locks2 |= unlock_locks; if (!have_locks2) { refc2 = 1; erts_smp_proc_inc_refc(p2); } erts_smp_proc_unlock__(p2, pix_lck2, unlock_locks); } } /* * lock_no equals the number of the first lock to lock on * either p1 *or* p2. */ #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); #endif /* Lock locks in lock order... */ while (lock_no <= ERTS_PROC_LOCK_MAX_BIT) { ErtsProcLocks locks; ErtsProcLocks lock = (1 << lock_no); ErtsProcLocks lock_mask = 0; if (need_locks1 & lock) { do { lock = (1 << lock_no++); lock_mask |= lock; } while (lock_no <= ERTS_PROC_LOCK_MAX_BIT && !(need_locks2 & lock)); if (need_locks2 & lock) lock_no--; locks = need_locks1 & lock_mask; erts_smp_proc_lock__(p1, pix_lck1, locks); have_locks1 |= locks; need_locks1 &= ~locks; } else if (need_locks2 & lock) { while (lock_no <= ERTS_PROC_LOCK_MAX_BIT && !(need_locks1 & lock)) { lock_mask |= lock; lock = (1 << ++lock_no); } locks = need_locks2 & lock_mask; erts_smp_proc_lock__(p2, pix_lck2, locks); have_locks2 |= locks; need_locks2 &= ~locks; } else lock_no++; } #ifdef ERTS_ENABLE_LOCK_CHECK if (p1) erts_proc_lc_chk_proc_locks(p1, have_locks1); if (p2) erts_proc_lc_chk_proc_locks(p2, have_locks2); if (p1 && p2) { if (p1 == a_proc) { ERTS_LC_ASSERT(a_need_locks == have_locks1); ERTS_LC_ASSERT(b_need_locks == have_locks2); } else { ERTS_LC_ASSERT(a_need_locks == have_locks2); ERTS_LC_ASSERT(b_need_locks == have_locks1); } } else { ERTS_LC_ASSERT(p1); if (a_proc) { ERTS_LC_ASSERT(have_locks1 == (a_need_locks | b_need_locks)); } else { ERTS_LC_ASSERT(have_locks1 == b_need_locks); } } #endif if (refc1) erts_smp_proc_dec_refc(p1); if (refc2) erts_smp_proc_dec_refc(p2); }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; ErtsThrPrgrDelayHandle dhndl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->common.id == pid) { ASSERT(c_p->common.id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(c_p); return c_p; } } dhndl = erts_thr_progress_unmanaged_delay(); proc = (Process *) erts_ptab_pix2intptr_ddrb(&erts_proc, pix); if (proc) { if (proc->common.id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { int managed; if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif managed = dhndl == ERTS_THR_PRGR_DHANDLE_MANAGED; if (!managed) { erts_smp_proc_inc_refc(proc); erts_thr_progress_unmanaged_continue(dhndl); dec_refc_proc = proc; /* * We don't want to call * erts_thr_progress_unmanaged_continue() * again. */ dhndl = ERTS_THR_PRGR_DHANDLE_MANAGED; } proc_safelock(managed, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (dhndl != ERTS_THR_PRGR_DHANDLE_MANAGED) erts_thr_progress_unmanaged_continue(dhndl); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_ptab_pix2intptr_nob(&erts_proc, pix)))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_SMP_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_smp_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }
void erts_whereis_name(Process *c_p, ErtsProcLocks c_p_locks, Eterm name, Process** proc, ErtsProcLocks need_locks, int flags, Port** port) { RegProc* rp = NULL; HashValue hval; int ix; HashBucket* b; #ifdef ERTS_SMP ErtsProcLocks current_c_p_locks; Port *pending_port = NULL; if (!c_p) c_p_locks = 0; current_c_p_locks = c_p_locks; restart: reg_safe_read_lock(c_p, ¤t_c_p_locks); /* Locked locks: * - port lock on pending_port if pending_port != NULL * - read reg lock * - current_c_p_locks (either c_p_locks or 0) on c_p */ #endif hval = REG_HASH(name); ix = hval % process_reg.size; b = process_reg.bucket[ix]; /* * Note: We have inlined the code from hash.c for speed. */ while (b) { if (((RegProc *) b)->name == name) { rp = (RegProc *) b; break; } b = b->next; } if (proc) { if (!rp) *proc = NULL; else { #ifdef ERTS_SMP if (!rp->p) *proc = NULL; else { if (need_locks) { erts_proc_safelock(c_p, current_c_p_locks, c_p_locks, rp->p, 0, need_locks); current_c_p_locks = c_p_locks; } if ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || is_proc_alive(rp->p)) *proc = rp->p; else { if (need_locks) erts_smp_proc_unlock(rp->p, need_locks); *proc = NULL; } if (*proc && (flags & ERTS_P2P_FLG_SMP_INC_REFC)) erts_smp_proc_inc_refc(rp->p); } #else if (rp->p && ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) || rp->p->status != P_EXITING)) *proc = rp->p; else *proc = NULL; #endif } } if (port) { if (!rp || !rp->pt) *port = NULL; else { #ifdef ERTS_SMP if (pending_port == rp->pt) pending_port = NULL; else { if (pending_port) { /* Ahh! Registered port changed while reg lock was unlocked... */ erts_smp_port_unlock(pending_port); pending_port = NULL; } if (erts_smp_port_trylock(rp->pt) == EBUSY) { Eterm id = rp->pt->id; /* id read only... */ /* Unlock all locks, acquire port lock, and restart... */ if (current_c_p_locks) { erts_smp_proc_unlock(c_p, current_c_p_locks); current_c_p_locks = 0; } reg_read_unlock(); pending_port = erts_id2port(id, NULL, 0); goto restart; } } #endif *port = rp->pt; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(*port)); } } #ifdef ERTS_SMP if (c_p && !current_c_p_locks) erts_smp_proc_lock(c_p, c_p_locks); if (pending_port) erts_smp_port_unlock(pending_port); #endif reg_read_unlock(); }
Process * erts_pid2proc_opt(Process *c_p, ErtsProcLocks c_p_have_locks, Eterm pid, ErtsProcLocks pid_need_locks, int flags) { Process *dec_refc_proc = NULL; int need_ptl; ErtsProcLocks need_locks; Uint pix; Process *proc; #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) ErtsProcLocks lcnt_locks; #endif #ifdef ERTS_ENABLE_LOCK_CHECK if (c_p) { ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks; if (might_unlock) erts_proc_lc_might_unlock(c_p, might_unlock); } #endif if (is_not_internal_pid(pid)) return NULL; pix = internal_pid_index(pid); ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks); need_locks = pid_need_locks; if (c_p && c_p->id == pid) { ASSERT(c_p->id != ERTS_INVALID_PID); ASSERT(c_p == erts_pix2proc(pix)); if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && ERTS_PROC_IS_EXITING(c_p)) return NULL; need_locks &= ~c_p_have_locks; if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(c_p); return c_p; } } need_ptl = !erts_get_scheduler_id(); if (need_ptl) erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx); proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]); if (proc) { if (proc->id != pid) proc = NULL; else if (!need_locks) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); } else { int busy; #if ERTS_PROC_LOCK_OWN_IMPL #ifdef ERTS_ENABLE_LOCK_COUNT lcnt_locks = need_locks; if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) { erts_lcnt_proc_lock(&proc->lock, need_locks); } #endif #ifdef ERTS_ENABLE_LOCK_CHECK /* Make sure erts_pid2proc_safelock() is enough to handle a potential lock order violation situation... */ busy = erts_proc_lc_trylock_force_busy(proc, need_locks); if (!busy) #endif #endif /* ERTS_PROC_LOCK_OWN_IMPL */ { /* Try a quick trylock to grab all the locks we need. */ busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK) erts_proc_lc_trylock(proc, need_locks, !busy); #endif #ifdef ERTS_PROC_LOCK_DEBUG if (!busy) erts_proc_lock_op_debug(proc, need_locks, 1); #endif } #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) if (flags & ERTS_P2P_FLG_TRY_LOCK) erts_lcnt_proc_trylock(&proc->lock, need_locks, busy ? EBUSY : 0); #endif if (!busy) { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) /* all is great */ if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks, __FILE__, __LINE__); #endif } else { if (flags & ERTS_P2P_FLG_TRY_LOCK) proc = ERTS_PROC_LOCK_BUSY; else { if (flags & ERTS_P2P_FLG_SMP_INC_REFC) erts_smp_proc_inc_refc(proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT) erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks); #endif if (need_ptl) { erts_smp_proc_inc_refc(proc); dec_refc_proc = proc; erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); need_ptl = 0; } proc_safelock(!need_ptl, c_p, c_p_have_locks, c_p_have_locks, proc, 0, need_locks); } } } } if (need_ptl) erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx); if (need_locks && proc && proc != ERTS_PROC_LOCK_BUSY && (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X) ? ERTS_PROC_IS_EXITING(proc) : (proc != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) { erts_smp_proc_unlock(proc, need_locks); if (flags & ERTS_P2P_FLG_SMP_INC_REFC) dec_refc_proc = proc; proc = NULL; } if (dec_refc_proc) erts_smp_proc_dec_refc(dec_refc_proc); #if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG) ERTS_LC_ASSERT(!proc || proc == ERTS_PROC_LOCK_BUSY || (pid_need_locks == (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock) & pid_need_locks))); #endif return proc; }