static ERTS_INLINE erts_tse_t * tse_fetch(erts_pix_lock_t *pix_lock) { erts_tse_t *tse = erts_tse_fetch(); if (!tse->udata) { erts_proc_lock_queues_t *qs; #if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL if (pix_lock) erts_pix_unlock(pix_lock); #endif erts_smp_spin_lock(&qs_lock); qs = queue_free_list; if (qs) { queue_free_list = queue_free_list->next; erts_smp_spin_unlock(&qs_lock); } else { erts_smp_spin_unlock(&qs_lock); qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS, sizeof(erts_proc_lock_queues_t)); sys_memcpy((void *) qs, (void *) &zeroqs, sizeof(erts_proc_lock_queues_t)); } tse->udata = qs; #if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL if (pix_lock) erts_pix_lock(pix_lock); #endif } tse->uflgs = 0; return tse; }
/* * erts_pid2proc_safelock() is called from erts_pid2proc_opt() when * it wasn't possible to trylock all locks needed. * c_p - current process * c_p_have_locks - locks held on c_p * pid - process id of process we are looking up * proc - process struct of process we are looking * up (both in and out argument) * need_locks - all locks we need (including have_locks) * pix_lock - pix lock for process we are looking up * flags - option flags */ void erts_pid2proc_safelock(Process *c_p, ErtsProcLocks c_p_have_locks, Process **proc, ErtsProcLocks need_locks, erts_pix_lock_t *pix_lock, int flags) { Process *p = *proc; ERTS_LC_ASSERT(p->lock.refc > 0); ERTS_LC_ASSERT(process_tab[internal_pid_index(p->id)] == p); p->lock.refc++; erts_pix_unlock(pix_lock); proc_safelock(c_p, c_p ? ERTS_PID2PIXLOCK(c_p->id) : NULL, c_p_have_locks, c_p_have_locks, p, pix_lock, 0, need_locks); erts_pix_lock(pix_lock); if (!p->is_exiting || ((flags & ERTS_P2P_FLG_ALLOW_OTHER_X) && process_tab[internal_pid_index(p->id)] == p)) { ERTS_LC_ASSERT(p->lock.refc > 1); p->lock.refc--; } else { /* No proc. Note, we need to keep refc until after process unlock */ erts_pix_unlock(pix_lock); erts_smp_proc_unlock__(p, pix_lock, need_locks); *proc = NULL; erts_pix_lock(pix_lock); ERTS_LC_ASSERT(p->lock.refc > 0); if (--p->lock.refc == 0) { erts_pix_unlock(pix_lock); erts_free_proc(p); erts_pix_lock(pix_lock); } } }
static ERTS_INLINE int is_proc_alive(Process *p) { int res; erts_pix_lock_t *pixlck = ERTS_PID2PIXLOCK(p->id); erts_pix_lock(pixlck); res = !p->is_exiting; erts_pix_unlock(pixlck); return res; }
/* * erts_proc_lock_failed() is called when erts_smp_proc_lock() * wasn't able to lock all locks. We may need to transfer locks * to waiters and wait for our turn on locks. * * Iff !ERTS_PROC_LOCK_ATOMIC_IMPL, the pix lock is locked on entry. * * This always returns with the pix lock unlocked. */ void erts_proc_lock_failed(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks old_lflgs) { int until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD; int thr_spin_count; int spin_count; ErtsProcLocks need_locks = locks; ErtsProcLocks olflgs = old_lflgs; if (erts_thr_get_main_status()) thr_spin_count = proc_lock_spin_count; else thr_spin_count = aux_thr_proc_lock_spin_count; spin_count = thr_spin_count; while (need_locks != 0) { ErtsProcLocks can_grab; can_grab = in_order_locks(olflgs, need_locks); if (can_grab == 0) { /* Someone already has the lowest-numbered lock we want. */ if (spin_count-- <= 0) { /* Too many retries, give up and sleep for the lock. */ wait_for_locks(p, pixlck, locks, need_locks, olflgs); return; } ERTS_SPIN_BODY; if (--until_yield == 0) { until_yield = ERTS_PROC_LOCK_SPIN_UNTIL_YIELD; erts_thr_yield(); } olflgs = ERTS_PROC_LOCK_FLGS_READ_(&p->lock); } else { /* Try to grab all of the grabbable locks at once with cmpxchg. */ ErtsProcLocks grabbed = olflgs | can_grab; ErtsProcLocks nflgs = ERTS_PROC_LOCK_FLGS_CMPXCHG_ACQB_(&p->lock, grabbed, olflgs); if (nflgs == olflgs) { /* Success! We grabbed the 'can_grab' locks. */ olflgs = grabbed; need_locks &= ~can_grab; /* Since we made progress, reset the spin count. */ spin_count = thr_spin_count; } else { /* Compare-and-exchange failed, try again. */ olflgs = nflgs; } } } /* Now we have all of the locks we wanted. */ #if !ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_unlock(pixlck); #endif }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->id); erts_tse_t *wtr; erts_proc_lock_queues_t *qs; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); qs = wtr->udata; ASSERT(qs); /* Provide the process with waiter queues, if it doesn't have one. */ if (!p->lock.queues) { qs->next = NULL; p->lock.queues = qs; } else { qs->next = p->lock.queues->next; p->lock.queues->next = qs; } #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs) { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } erts_pix_lock(pix_lock); ASSERT(wtr->uflgs == 0); } /* Recover some queues to store in the waiter. */ ERTS_LC_ASSERT(p->lock.queues); if (p->lock.queues->next) { qs = p->lock.queues->next; p->lock.queues->next = qs->next; } else { qs = p->lock.queues; p->lock.queues = NULL; } wtr->udata = qs; erts_pix_unlock(pix_lock); ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr, 0); }
/* * Transfer 'trnsfr_lcks' held by this executing thread to other * threads waiting for the locks. When a lock has been transferred * we also have to try to aquire as many lock as possible for the * other thread. */ static int transfer_locks(Process *p, ErtsProcLocks trnsfr_lcks, erts_pix_lock_t *pix_lock, int unlock) { int transferred = 0; erts_tse_t *wake = NULL; erts_tse_t *wtr; ErtsProcLocks unset_waiter = 0; ErtsProcLocks tlocks = trnsfr_lcks; int lock_no; ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif for (lock_no = 0; tlocks && lock_no <= ERTS_PROC_LOCK_MAX_BIT; lock_no++) { ErtsProcLocks lock = ((ErtsProcLocks) 1) << lock_no; if (tlocks & lock) { erts_proc_lock_queues_t *qs = p->lock.queues; /* Transfer lock */ #ifdef ERTS_ENABLE_LOCK_CHECK tlocks &= ~lock; #endif ERTS_LC_ASSERT(ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & (lock << ERTS_PROC_LOCK_WAITER_SHIFT)); transferred++; wtr = dequeue_waiter(qs, lock_no); ERTS_LC_ASSERT(wtr); if (!qs->queue[lock_no]) unset_waiter |= lock; ERTS_LC_ASSERT(wtr->uflgs & lock); wtr->uflgs &= ~lock; if (wtr->uflgs) try_aquire(&p->lock, wtr); if (!wtr->uflgs) { /* * The other thread got all locks it needs; * need to wake it up. */ wtr->next = wake; wake = wtr; } } } if (unset_waiter) { unset_waiter <<= ERTS_PROC_LOCK_WAITER_SHIFT; (void) ERTS_PROC_LOCK_FLGS_BAND_(&p->lock, ~unset_waiter); } #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif ERTS_LC_ASSERT(tlocks == 0); /* We should have transferred all of them */ if (!wake) { if (unlock) erts_pix_unlock(pix_lock); } else { erts_pix_unlock(pix_lock); do { erts_tse_t *tmp = wake; wake = wake->next; erts_atomic32_set_nob(&tmp->uaflgs, 0); erts_tse_set(tmp); } while (wake); if (!unlock) erts_pix_lock(pix_lock); } return transferred; }
/* * Try to grab locks one at a time in lock order and wait on the lowest * lock we fail to grab, if any. * * If successful, this returns 0 and all locks in 'need_locks' are held. * * On entry, the pix lock is held iff !ERTS_PROC_LOCK_ATOMIC_IMPL. * On exit it is not held. */ static void wait_for_locks(Process *p, erts_pix_lock_t *pixlck, ErtsProcLocks locks, ErtsProcLocks need_locks, ErtsProcLocks olflgs) { erts_pix_lock_t *pix_lock = pixlck ? pixlck : ERTS_PID2PIXLOCK(p->common.id); erts_tse_t *wtr; /* Acquire a waiter object on which this thread can wait. */ wtr = tse_fetch(pix_lock); /* Record which locks this waiter needs. */ wtr->uflgs = need_locks; ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #if ERTS_PROC_LOCK_ATOMIC_IMPL erts_pix_lock(pix_lock); #endif ERTS_LC_ASSERT(erts_lc_pix_lock_is_locked(pix_lock)); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif /* Try to aquire locks one at a time in lock order and set wait flag */ try_aquire(&p->lock, wtr); ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); #ifdef ERTS_PROC_LOCK_HARD_DEBUG check_queue(&p->lock); #endif if (wtr->uflgs == 0) erts_pix_unlock(pix_lock); else { /* We didn't get them all; need to wait... */ ASSERT((wtr->uflgs & ~ERTS_PROC_LOCKS_ALL) == 0); erts_atomic32_set_nob(&wtr->uaflgs, 1); erts_pix_unlock(pix_lock); while (1) { int res; erts_tse_reset(wtr); if (erts_atomic32_read_nob(&wtr->uaflgs) == 0) break; /* * Wait for needed locks. When we are woken all needed locks have * have been acquired by other threads and transfered to us. * However, we need to be prepared for spurious wakeups. */ do { res = erts_tse_wait(wtr); /* might return EINTR */ } while (res != 0); } ASSERT(wtr->uflgs == 0); } ERTS_LC_ASSERT(locks == (ERTS_PROC_LOCK_FLGS_READ_(&p->lock) & locks)); tse_return(wtr); }