static void waitq_link(waitq_t *wq, kthread_t *t) { kthread_t *next_tp; kthread_t *last_tp; kthread_t **tpp; pri_t tpri, next_pri, last_pri = -1; ASSERT(DISP_LOCK_HELD(&wq->wq_lock)); tpri = DISP_PRIO(t); tpp = &wq->wq_first; while ((next_tp = *tpp) != NULL) { next_pri = DISP_PRIO(next_tp); if (tpri > next_pri) break; last_tp = next_tp->t_priback; last_pri = next_pri; tpp = &last_tp->t_link; } *tpp = t; t->t_link = next_tp; if (last_pri == tpri) { /* last_tp points to the last thread of this priority */ t->t_priback = last_tp; t->t_priforw = last_tp->t_priforw; last_tp->t_priforw->t_priback = t; last_tp->t_priforw = t; } else { t->t_priback = t->t_priforw = t; } wq->wq_count++; t->t_waitq = wq; }
/* * Make 'inheritor' inherit priority from this turnstile. */ static void turnstile_pi_inherit(turnstile_t *ts, kthread_t *inheritor, pri_t epri) { ASSERT(THREAD_LOCK_HELD(inheritor)); ASSERT(DISP_LOCK_HELD(&TURNSTILE_CHAIN(ts->ts_sobj).tc_lock)); if (epri <= inheritor->t_pri) return; if (ts->ts_inheritor == NULL) { ts->ts_inheritor = inheritor; ts->ts_epri = epri; disp_lock_enter_high(&inheritor->t_pi_lock); ts->ts_prioinv = inheritor->t_prioinv; inheritor->t_prioinv = ts; disp_lock_exit_high(&inheritor->t_pi_lock); } else { /* * 'inheritor' is already inheriting from this turnstile, * so just adjust its priority. */ ASSERT(ts->ts_inheritor == inheritor); if (ts->ts_epri < epri) ts->ts_epri = epri; } if (epri > DISP_PRIO(inheritor)) thread_change_epri(inheritor, epri); }
/* * Remove thread from specified turnstile sleep queue; retrieve its * free turnstile; if it is the last waiter, delete the turnstile * from the turnstile chain and if there is an inheritor, delete it * from the inheritor's t_prioinv chain. */ static void turnstile_dequeue(kthread_t *t) { turnstile_t *ts = t->t_ts; turnstile_chain_t *tc = &TURNSTILE_CHAIN(ts->ts_sobj); turnstile_t *tsfree, **tspp; ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); ASSERT(t->t_lockp == &tc->tc_lock); if ((tsfree = ts->ts_free) != NULL) { ASSERT(ts->ts_waiters > 1); ASSERT(tsfree->ts_waiters == 0); t->t_ts = tsfree; ts->ts_free = tsfree->ts_free; tsfree->ts_free = NULL; } else { /* * The active turnstile's freelist is empty, so this * must be the last waiter. Remove the turnstile * from the hash chain and leave the now-inactive * turnstile attached to the thread we're waking. * Note that the ts_inheritor for the turnstile * may be NULL. If one exists, its t_prioinv * chain has to be updated. */ ASSERT(ts->ts_waiters == 1); if (ts->ts_inheritor != NULL) { (void) turnstile_pi_tsdelete(ts, ts->ts_inheritor); /* * If we ever do a "disinherit" or "unboost", we need * to do it only if "t" is a thread at the head of the * sleep queue. Since the sleep queue is prioritized, * the disinherit is necessary only if the interrupted * thread is the highest priority thread. * Otherwise, there is a higher priority thread blocked * on the turnstile, whose inheritance cannot be * disinherited. However, disinheriting is explicitly * not done here, since it would require holding the * inheritor's thread lock (see turnstile_unsleep()). */ ts->ts_inheritor = NULL; } tspp = &tc->tc_first; while (*tspp != ts) tspp = &(*tspp)->ts_next; *tspp = ts->ts_next; ASSERT(t->t_ts == ts); } ts->ts_waiters--; sleepq_dequeue(t); t->t_sobj_ops = NULL; t->t_wchan = NULL; t->t_wchan0 = NULL; ASSERT(t->t_state == TS_SLEEP); }
void waitq_fini(waitq_t *wq) { ASSERT(wq->wq_count == 0); ASSERT(wq->wq_first == NULL); ASSERT(wq->wq_blocked == B_TRUE); ASSERT(!DISP_LOCK_HELD(&wq->wq_lock)); DISP_LOCK_DESTROY(&wq->wq_lock); }
/* * Free cpucap structure */ static void cap_free(cpucap_t *cap) { if (cap == NULL) return; /* * This cap should not be active */ ASSERT(!list_link_active(&cap->cap_link)); ASSERT(cap->cap_value == 0); ASSERT(!DISP_LOCK_HELD(&cap->cap_usagelock)); waitq_fini(&cap->cap_waitq); DISP_LOCK_DESTROY(&cap->cap_usagelock); kmem_free(cap, sizeof (cpucap_t)); }
static void waitq_unlink(waitq_t *wq, kthread_t *t) { kthread_t *nt; kthread_t **ptl; ASSERT(THREAD_LOCK_HELD(t)); ASSERT(DISP_LOCK_HELD(&wq->wq_lock)); ASSERT(t->t_waitq == wq); ptl = &t->t_priback->t_link; /* * Is it the head of a priority sublist? If so, need to walk * the priorities to find the t_link pointer that points to it. */ if (*ptl != t) { /* * Find the right priority level. */ ptl = &t->t_waitq->wq_first; while ((nt = *ptl) != t) ptl = &nt->t_priback->t_link; } /* * Remove thread from the t_link list. */ *ptl = t->t_link; /* * Take it off the priority sublist if there's more than one * thread there. */ if (t->t_priforw != t) { t->t_priback->t_priforw = t->t_priforw; t->t_priforw->t_priback = t->t_priback; } t->t_link = NULL; wq->wq_count--; t->t_waitq = NULL; t->t_priforw = NULL; t->t_priback = NULL; }
/* * Wake threads that are blocked in a turnstile. */ void turnstile_wakeup(turnstile_t *ts, int qnum, int nthreads, kthread_t *owner) { turnstile_chain_t *tc = &TURNSTILE_CHAIN(ts->ts_sobj); sleepq_t *sqp = &ts->ts_sleepq[qnum]; ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); /* * Waive any priority we may have inherited from this turnstile. */ if (ts->ts_inheritor != NULL) { turnstile_pi_waive(ts); } while (nthreads-- > 0) { kthread_t *t = sqp->sq_first; ASSERT(t->t_ts == ts); ASSERT(ts->ts_waiters > 1 || ts->ts_inheritor == NULL); DTRACE_SCHED1(wakeup, kthread_t *, t); turnstile_dequeue(t); CL_WAKEUP(t); /* previous thread lock, tc_lock, not dropped */ /* * If the caller did direct handoff of ownership, * make the new owner inherit from this turnstile. */ if (t == owner) { kthread_t *wp = ts->ts_sleepq[TS_WRITER_Q].sq_first; kthread_t *rp = ts->ts_sleepq[TS_READER_Q].sq_first; pri_t wpri = wp ? DISP_PRIO(wp) : 0; pri_t rpri = rp ? DISP_PRIO(rp) : 0; turnstile_pi_inherit(ts, t, MAX(wpri, rpri)); owner = NULL; } thread_unlock_high(t); /* drop run queue lock */ } if (owner != NULL) panic("turnstile_wakeup: owner %p not woken", (void *)owner); disp_lock_exit(&tc->tc_lock); }
int turnstile_block(turnstile_t *ts, int qnum, void *sobj, sobj_ops_t *sobj_ops, kmutex_t *mp, lwp_timer_t *lwptp) { kthread_t *owner; kthread_t *t = curthread; proc_t *p = ttoproc(t); klwp_t *lwp = ttolwp(t); turnstile_chain_t *tc = &TURNSTILE_CHAIN(sobj); int error = 0; int loser = 0; ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); ASSERT(mp == NULL || IS_UPI(mp)); ASSERT((SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) ^ (mp == NULL)); thread_lock_high(t); if (ts == NULL) { /* * This is the first thread to block on this sobj. * Take its attached turnstile and add it to the hash chain. */ ts = t->t_ts; ts->ts_sobj = sobj; ts->ts_next = tc->tc_first; tc->tc_first = ts; ASSERT(ts->ts_waiters == 0); } else { /* * Another thread has already donated its turnstile * to block on this sobj, so ours isn't needed. * Stash it on the active turnstile's freelist. */ turnstile_t *myts = t->t_ts; myts->ts_free = ts->ts_free; ts->ts_free = myts; t->t_ts = ts; ASSERT(ts->ts_sobj == sobj); ASSERT(ts->ts_waiters > 0); } /* * Put the thread to sleep. */ ASSERT(t != CPU->cpu_idle_thread); ASSERT(CPU_ON_INTR(CPU) == 0); ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); ASSERT(t->t_state == TS_ONPROC); if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) { curthread->t_flag |= T_WAKEABLE; } CL_SLEEP(t); /* assign kernel priority */ THREAD_SLEEP(t, &tc->tc_lock); t->t_wchan = sobj; t->t_sobj_ops = sobj_ops; DTRACE_SCHED(sleep); if (lwp != NULL) { lwp->lwp_ru.nvcsw++; (void) new_mstate(t, LMS_SLEEP); if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) { lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; /* * make wchan0 non-zero to conform to the rule that * threads blocking for user-level objects have a * non-zero wchan0: this prevents spurious wake-ups * by, for example, /proc. */ t->t_wchan0 = (caddr_t)1; } } ts->ts_waiters++; sleepq_insert(&ts->ts_sleepq[qnum], t); if (SOBJ_TYPE(sobj_ops) == SOBJ_MUTEX && SOBJ_OWNER(sobj_ops, sobj) == NULL) panic("turnstile_block(%p): unowned mutex", (void *)ts); /* * Follow the blocking chain to its end, willing our priority to * everyone who's in our way. */ while (t->t_sobj_ops != NULL && (owner = SOBJ_OWNER(t->t_sobj_ops, t->t_wchan)) != NULL) { if (owner == curthread) { if (SOBJ_TYPE(sobj_ops) != SOBJ_USER_PI) { panic("Deadlock: cycle in blocking chain"); } /* * If the cycle we've encountered ends in mp, * then we know it isn't a 'real' cycle because * we're going to drop mp before we go to sleep. * Moreover, since we've come full circle we know * that we must have willed priority to everyone * in our way. Therefore, we can break out now. */ if (t->t_wchan == (void *)mp) break; if (loser) lock_clear(&turnstile_loser_lock); /* * For SOBJ_USER_PI, a cycle is an application * deadlock which needs to be communicated * back to the application. */ thread_unlock_nopreempt(t); mutex_exit(mp); setrun(curthread); swtch(); /* necessary to transition state */ curthread->t_flag &= ~T_WAKEABLE; if (lwptp->lwpt_id != 0) (void) lwp_timer_dequeue(lwptp); setallwatch(); lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; return (EDEADLK); } if (!turnstile_interlock(t->t_lockp, &owner->t_lockp)) { /* * If we failed to grab the owner's thread lock, * turnstile_interlock() will have dropped t's * thread lock, so at this point we don't even know * that 't' exists anymore. The simplest solution * is to restart the entire priority inheritance dance * from the beginning of the blocking chain, since * we *do* know that 'curthread' still exists. * Application of priority inheritance is idempotent, * so it's OK that we're doing it more than once. * Note also that since we've dropped our thread lock, * we may already have been woken up; if so, our * t_sobj_ops will be NULL, the loop will terminate, * and the call to swtch() will be a no-op. Phew. * * There is one further complication: if two (or more) * threads keep trying to grab the turnstile locks out * of order and keep losing the race to another thread, * these "dueling losers" can livelock the system. * Therefore, once we get into this rare situation, * we serialize all the losers. */ if (loser == 0) { loser = 1; lock_set(&turnstile_loser_lock); } t = curthread; thread_lock_high(t); continue; } /* * We now have the owner's thread lock. If we are traversing * from non-SOBJ_USER_PI ops to SOBJ_USER_PI ops, then we know * that we have caught the thread while in the TS_SLEEP state, * but holding mp. We know that this situation is transient * (mp will be dropped before the holder actually sleeps on * the SOBJ_USER_PI sobj), so we will spin waiting for mp to * be dropped. Then, as in the turnstile_interlock() failure * case, we will restart the priority inheritance dance. */ if (SOBJ_TYPE(t->t_sobj_ops) != SOBJ_USER_PI && owner->t_sobj_ops != NULL && SOBJ_TYPE(owner->t_sobj_ops) == SOBJ_USER_PI) { kmutex_t *upi_lock = (kmutex_t *)t->t_wchan; ASSERT(IS_UPI(upi_lock)); ASSERT(SOBJ_TYPE(t->t_sobj_ops) == SOBJ_MUTEX); if (t->t_lockp != owner->t_lockp) thread_unlock_high(owner); thread_unlock_high(t); if (loser) lock_clear(&turnstile_loser_lock); while (mutex_owner(upi_lock) == owner) { SMT_PAUSE(); continue; } if (loser) lock_set(&turnstile_loser_lock); t = curthread; thread_lock_high(t); continue; } turnstile_pi_inherit(t->t_ts, owner, DISP_PRIO(t)); if (t->t_lockp != owner->t_lockp) thread_unlock_high(t); t = owner; } if (loser) lock_clear(&turnstile_loser_lock); /* * Note: 't' and 'curthread' were synonymous before the loop above, * but now they may be different. ('t' is now the last thread in * the blocking chain.) */ if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) { ushort_t s = curthread->t_oldspl; int timedwait = 0; uint_t imm_timeout = 0; clock_t tim = -1; thread_unlock_high(t); if (lwptp->lwpt_id != 0) { /* * We enqueued a timeout. If it has already fired, * lwptp->lwpt_imm_timeout has been set with cas, * so fetch it with cas. */ timedwait = 1; imm_timeout = atomic_cas_uint(&lwptp->lwpt_imm_timeout, 0, 0); } mutex_exit(mp); splx(s); if (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(p, curthread) || imm_timeout) setrun(curthread); swtch(); curthread->t_flag &= ~T_WAKEABLE; if (timedwait) tim = lwp_timer_dequeue(lwptp); setallwatch(); if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort || MUSTRETURN(p, curthread)) error = EINTR; else if (imm_timeout || (timedwait && tim == -1)) error = ETIME; lwp->lwp_sysabort = 0; lwp->lwp_asleep = 0; } else { thread_unlock_nopreempt(t); swtch(); } return (error); }