void erts_proc_safelock(Process *a_proc, ErtsProcLocks a_have_locks, ErtsProcLocks a_need_locks, Process *b_proc, ErtsProcLocks b_have_locks, ErtsProcLocks b_need_locks) { proc_safelock(a_proc, a_proc ? ERTS_PID2PIXLOCK(a_proc->id) : NULL, a_have_locks, a_need_locks, b_proc, b_proc ? ERTS_PID2PIXLOCK(b_proc->id) : NULL, b_have_locks, b_need_locks); }
static ERTS_INLINE int is_proc_alive(Process *p) { int res; erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id); erts_pix_lock(pixlck); res = !p->is_exiting; erts_pix_unlock(pixlck); return res; }
/* * erts_proc_unlock_failed() is called when erts_smp_proc_unlock() * wasn't able to unlock all locks. We may need to transfer locks * to waiters. */ void erts_proc_unlock_failed(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks wait_locks) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif transfer_locks(p, wait_locks, pix_lock, 1); /* unlocks pix_lock */ }
/* * erts_pid2proc_safelock() is called from erts_pid2proc_opt() when * it wasn't possible to trylock all locks needed. * c_p - current process * c_p_have_locks - locks held on c_p * pid - process id of process we are looking up * proc - process struct of process we are looking * up (both in and out argument) * need_locks - all locks we need (including have_locks) * pix_lock - pix lock for process we are looking up * flags - option flags */ void erts_pid2proc_safelock(Process *c_p, ErtsProcLocks c_p_have_locks, Process **proc, ErtsProcLocks need_locks, erts_pix_lock_t *pix_lock, int flags) { Process *p = *proc; ERTS_LC_ASSERT(p->lock.refc > 0); ERTS_LC_ASSERT(process_tab[internal_pid_index(p->id)] == p); p->lock.refc++; erts_pix_unlock(pix_lock); proc_safelock(c_p, c_p ? ERTS_PID2PIXLOCK(c_p->id) : NULL, c_p_have_locks, c_p_have_locks, p, pix_lock, 0, need_locks); erts_pix_lock(pix_lock); if (!p->is_exiting || ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && process_tab[internal_pid_index(p->id)] == p)) { ERTS_LC_ASSERT(p->lock.refc > 1); p->lock.refc--; } else { /* No proc. Note, we need to keep refc until after process unlock */ erts_pix_unlock(pix_lock); erts_smp_proc_unlock__(p, pix_lock, need_locks); *proc = NULL; erts_pix_lock(pix_lock); ERTS_LC_ASSERT(p->lock.refc > 0); if (--p->lock.refc == 0) { erts_pix_unlock(pix_lock); erts_free_proc(p); erts_pix_lock(pix_lock); } } }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); erts_tse_t *wtr; erts_proc_lock_queues_t *qs; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); qs = wtr->udata; ASSERT(qs); /* Provide the process with waiter queues, if it doesn't have one. */ if (!p->lock.queues) { qs->next = NULL; p->lock.queues = qs; } else { qs->next = p->lock.queues->next; p->lock.queues->next = qs; } #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs) { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } erts_pix_lock(pix_lock); ASSERT(wtr->uflgs == 0); } /* Recover some queues to store in the waiter. */ ERTS_LC_ASSERT(p->lock.queues); if (p->lock.queues->next) { qs = p->lock.queues->next; p->lock.queues->next = qs->next; } else { qs = p->lock.queues; p->lock.queues = NULL; } wtr->udata = qs; erts_pix_unlock(pix_lock); ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr, 0); }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id); erts_tse_t *wtr; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs == 0) erts_pix_unlock(pix_lock); else { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } ASSERT(wtr->uflgs == 0); } ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr); }