/* * Like cv_wait_sig_swap but allows the caller to indicate (with a * non-NULL sigret) that they will take care of signalling the cv * after wakeup, if necessary. This is a vile hack that should only * be used when no other option is available; almost all callers * should just use cv_wait_sig_swap (which takes care of the cv_signal * stuff automatically) instead. */ int cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) { kthread_t *t = curthread; proc_t *p = ttoproc(t); klwp_t *lwp = ttolwp(t); int rval = 1; int signalled = 0; if (panicstr) return (rval); /* * The check for t_intr is to catch an interrupt thread * that has not yet unpinned the thread underneath. */ if (lwp == NULL || t->t_intr) { cv_wait(cvp, mp); return (rval); } lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; thread_lock(t); t->t_kpri_req = 0; /* don't need kernel priority */ cv_block_sig(t, (condvar_impl_t *)cvp); /* I can be swapped now */ curthread->t_schedflag &= ~TS_DONT_SWAP; thread_unlock_nopreempt(t); mutex_exit(mp); if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)) setrun(t); /* ASSERT(no locks are held) */ swtch(); signalled = (t->t_schedflag & TS_SIGNALLED); t->t_flag &= ~T_WAKEABLE; /* TS_DONT_SWAP set by disp() */ ASSERT(curthread->t_schedflag & TS_DONT_SWAP); mutex_enter(mp); if (ISSIG_PENDING(t, lwp, p)) { mutex_exit(mp); if (issig(FORREAL)) rval = 0; mutex_enter(mp); } if (lwp->lwp_sysabort || MUSTRETURN(p, t)) rval = 0; lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; if (rval == 0) { if (sigret != NULL) *sigret = signalled; /* just tell the caller */ else if (signalled) cv_signal(cvp); /* avoid consuming the cv_signal() */ } return (rval); }
int cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) { kthread_t *t = curthread; proc_t *p = ttoproc(t); klwp_t *lwp = ttolwp(t); int rval = 1; int signalled = 0; if (panicstr) return (rval); /* * The check for t_intr is to catch an interrupt thread * that has not yet unpinned the thread underneath. */ if (lwp == NULL || t->t_intr) { cv_wait(cvp, mp); return (rval); } ASSERT(curthread->t_schedflag & TS_DONT_SWAP); lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; thread_lock(t); cv_block_sig(t, (condvar_impl_t *)cvp); thread_unlock_nopreempt(t); mutex_exit(mp); if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)) setrun(t); /* ASSERT(no locks are held) */ swtch(); signalled = (t->t_schedflag & TS_SIGNALLED); t->t_flag &= ~T_WAKEABLE; mutex_enter(mp); if (ISSIG_PENDING(t, lwp, p)) { mutex_exit(mp); if (issig(FORREAL)) rval = 0; mutex_enter(mp); } if (lwp->lwp_sysabort || MUSTRETURN(p, t)) rval = 0; lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ cv_signal(cvp); return (rval); }
/* * Mark the current thread as sleeping on a shuttle object, and * switch to a new thread. * No locks other than 'l' should be held at this point. */ void shuttle_swtch(kmutex_t *l) { klwp_t *lwp = ttolwp(curthread); thread_lock(curthread); disp_lock_enter_high(&shuttle_lock); lwp->lwp_asleep = 1; /* /proc */ lwp->lwp_sysabort = 0; /* /proc */ lwp->lwp_ru.nvcsw++; curthread->t_flag |= T_WAKEABLE; curthread->t_sobj_ops = &shuttle_sobj_ops; curthread->t_wchan0 = (caddr_t)1; CL_INACTIVE(curthread); DTRACE_SCHED(sleep); THREAD_SLEEP(curthread, &shuttle_lock); (void) new_mstate(curthread, LMS_SLEEP); disp_lock_exit_high(&shuttle_lock); mutex_exit(l); if (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread)) setrun(curthread); swtch(); /* * Caller must check for ISSIG/lwp_sysabort conditions * and clear lwp->lwp_asleep/lwp->lwp_sysabort */ }
/* * Mark the specified thread as once again sleeping on a shuttle object. This * routine is called to put a server thread -- one that was dequeued but for * which shuttle_resume() was _not_ called -- back to sleep on a shuttle * object. Because we don't hit the sched:::wakeup DTrace probe until * shuttle_resume(), we do _not_ have a sched:::sleep probe here. */ void shuttle_sleep(kthread_t *t) { klwp_t *lwp = ttolwp(t); proc_t *p = ttoproc(t); thread_lock(t); disp_lock_enter_high(&shuttle_lock); if (lwp != NULL) { lwp->lwp_asleep = 1; /* /proc */ lwp->lwp_sysabort = 0; /* /proc */ lwp->lwp_ru.nvcsw++; } t->t_flag |= T_WAKEABLE; t->t_sobj_ops = &shuttle_sobj_ops; t->t_wchan0 = (caddr_t)1; CL_INACTIVE(t); ASSERT(t->t_mstate == LMS_SLEEP); THREAD_SLEEP(t, &shuttle_lock); disp_lock_exit_high(&shuttle_lock); if (lwp && (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))) setrun(t); }
int turnstile_block(turnstile_t *ts, int qnum, void *sobj, sobj_ops_t *sobj_ops, kmutex_t *mp, lwp_timer_t *lwptp) { kthread_t *owner; kthread_t *t = curthread; proc_t *p = ttoproc(t); klwp_t *lwp = ttolwp(t); turnstile_chain_t *tc = &TURNSTILE_CHAIN(sobj); int error = 0; int loser = 0; ASSERT(DISP_LOCK_HELD(&tc->tc_lock)); ASSERT(mp == NULL || IS_UPI(mp)); ASSERT((SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) ^ (mp == NULL)); thread_lock_high(t); if (ts == NULL) { /* * This is the first thread to block on this sobj. * Take its attached turnstile and add it to the hash chain. */ ts = t->t_ts; ts->ts_sobj = sobj; ts->ts_next = tc->tc_first; tc->tc_first = ts; ASSERT(ts->ts_waiters == 0); } else { /* * Another thread has already donated its turnstile * to block on this sobj, so ours isn't needed. * Stash it on the active turnstile's freelist. */ turnstile_t *myts = t->t_ts; myts->ts_free = ts->ts_free; ts->ts_free = myts; t->t_ts = ts; ASSERT(ts->ts_sobj == sobj); ASSERT(ts->ts_waiters > 0); } /* * Put the thread to sleep. */ ASSERT(t != CPU->cpu_idle_thread); ASSERT(CPU_ON_INTR(CPU) == 0); ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); ASSERT(t->t_state == TS_ONPROC); if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) { curthread->t_flag |= T_WAKEABLE; } CL_SLEEP(t); /* assign kernel priority */ THREAD_SLEEP(t, &tc->tc_lock); t->t_wchan = sobj; t->t_sobj_ops = sobj_ops; DTRACE_SCHED(sleep); if (lwp != NULL) { lwp->lwp_ru.nvcsw++; (void) new_mstate(t, LMS_SLEEP); if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) { lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; /* * make wchan0 non-zero to conform to the rule that * threads blocking for user-level objects have a * non-zero wchan0: this prevents spurious wake-ups * by, for example, /proc. */ t->t_wchan0 = (caddr_t)1; } } ts->ts_waiters++; sleepq_insert(&ts->ts_sleepq[qnum], t); if (SOBJ_TYPE(sobj_ops) == SOBJ_MUTEX && SOBJ_OWNER(sobj_ops, sobj) == NULL) panic("turnstile_block(%p): unowned mutex", (void *)ts); /* * Follow the blocking chain to its end, willing our priority to * everyone who's in our way. */ while (t->t_sobj_ops != NULL && (owner = SOBJ_OWNER(t->t_sobj_ops, t->t_wchan)) != NULL) { if (owner == curthread) { if (SOBJ_TYPE(sobj_ops) != SOBJ_USER_PI) { panic("Deadlock: cycle in blocking chain"); } /* * If the cycle we've encountered ends in mp, * then we know it isn't a 'real' cycle because * we're going to drop mp before we go to sleep. * Moreover, since we've come full circle we know * that we must have willed priority to everyone * in our way. Therefore, we can break out now. */ if (t->t_wchan == (void *)mp) break; if (loser) lock_clear(&turnstile_loser_lock); /* * For SOBJ_USER_PI, a cycle is an application * deadlock which needs to be communicated * back to the application. */ thread_unlock_nopreempt(t); mutex_exit(mp); setrun(curthread); swtch(); /* necessary to transition state */ curthread->t_flag &= ~T_WAKEABLE; if (lwptp->lwpt_id != 0) (void) lwp_timer_dequeue(lwptp); setallwatch(); lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; return (EDEADLK); } if (!turnstile_interlock(t->t_lockp, &owner->t_lockp)) { /* * If we failed to grab the owner's thread lock, * turnstile_interlock() will have dropped t's * thread lock, so at this point we don't even know * that 't' exists anymore. The simplest solution * is to restart the entire priority inheritance dance * from the beginning of the blocking chain, since * we *do* know that 'curthread' still exists. * Application of priority inheritance is idempotent, * so it's OK that we're doing it more than once. * Note also that since we've dropped our thread lock, * we may already have been woken up; if so, our * t_sobj_ops will be NULL, the loop will terminate, * and the call to swtch() will be a no-op. Phew. * * There is one further complication: if two (or more) * threads keep trying to grab the turnstile locks out * of order and keep losing the race to another thread, * these "dueling losers" can livelock the system. * Therefore, once we get into this rare situation, * we serialize all the losers. */ if (loser == 0) { loser = 1; lock_set(&turnstile_loser_lock); } t = curthread; thread_lock_high(t); continue; } /* * We now have the owner's thread lock. If we are traversing * from non-SOBJ_USER_PI ops to SOBJ_USER_PI ops, then we know * that we have caught the thread while in the TS_SLEEP state, * but holding mp. We know that this situation is transient * (mp will be dropped before the holder actually sleeps on * the SOBJ_USER_PI sobj), so we will spin waiting for mp to * be dropped. Then, as in the turnstile_interlock() failure * case, we will restart the priority inheritance dance. */ if (SOBJ_TYPE(t->t_sobj_ops) != SOBJ_USER_PI && owner->t_sobj_ops != NULL && SOBJ_TYPE(owner->t_sobj_ops) == SOBJ_USER_PI) { kmutex_t *upi_lock = (kmutex_t *)t->t_wchan; ASSERT(IS_UPI(upi_lock)); ASSERT(SOBJ_TYPE(t->t_sobj_ops) == SOBJ_MUTEX); if (t->t_lockp != owner->t_lockp) thread_unlock_high(owner); thread_unlock_high(t); if (loser) lock_clear(&turnstile_loser_lock); while (mutex_owner(upi_lock) == owner) { SMT_PAUSE(); continue; } if (loser) lock_set(&turnstile_loser_lock); t = curthread; thread_lock_high(t); continue; } turnstile_pi_inherit(t->t_ts, owner, DISP_PRIO(t)); if (t->t_lockp != owner->t_lockp) thread_unlock_high(t); t = owner; } if (loser) lock_clear(&turnstile_loser_lock); /* * Note: 't' and 'curthread' were synonymous before the loop above, * but now they may be different. ('t' is now the last thread in * the blocking chain.) */ if (SOBJ_TYPE(sobj_ops) == SOBJ_USER_PI) { ushort_t s = curthread->t_oldspl; int timedwait = 0; uint_t imm_timeout = 0; clock_t tim = -1; thread_unlock_high(t); if (lwptp->lwpt_id != 0) { /* * We enqueued a timeout. If it has already fired, * lwptp->lwpt_imm_timeout has been set with cas, * so fetch it with cas. */ timedwait = 1; imm_timeout = atomic_cas_uint(&lwptp->lwpt_imm_timeout, 0, 0); } mutex_exit(mp); splx(s); if (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(p, curthread) || imm_timeout) setrun(curthread); swtch(); curthread->t_flag &= ~T_WAKEABLE; if (timedwait) tim = lwp_timer_dequeue(lwptp); setallwatch(); if (ISSIG(curthread, FORREAL) || lwp->lwp_sysabort || MUSTRETURN(p, curthread)) error = EINTR; else if (imm_timeout || (timedwait && tim == -1)) error = ETIME; lwp->lwp_sysabort = 0; lwp->lwp_asleep = 0; } else { thread_unlock_nopreempt(t); swtch(); } return (error); }
/* * Mark the current thread as sleeping on a shuttle object, and * resume the specified thread. The 't' thread must be marked as ONPROC. * * No locks other than 'l' should be held at this point. */ void shuttle_resume(kthread_t *t, kmutex_t *l) { klwp_t *lwp = ttolwp(curthread); cpu_t *cp; disp_lock_t *oldtlp; thread_lock(curthread); disp_lock_enter_high(&shuttle_lock); if (lwp != NULL) { lwp->lwp_asleep = 1; /* /proc */ lwp->lwp_sysabort = 0; /* /proc */ lwp->lwp_ru.nvcsw++; } curthread->t_flag |= T_WAKEABLE; curthread->t_sobj_ops = &shuttle_sobj_ops; /* * setting cpu_dispthread before changing thread state * so that kernel preemption will be deferred to after swtch_to() */ cp = CPU; cp->cpu_dispthread = t; cp->cpu_dispatch_pri = DISP_PRIO(t); /* * Set the wchan0 field so that /proc won't just do a setrun * on this thread when trying to stop a process. Instead, * /proc will mark the thread as VSTOPPED similar to threads * that are blocked on user level condition variables. */ curthread->t_wchan0 = (caddr_t)1; CL_INACTIVE(curthread); DTRACE_SCHED1(wakeup, kthread_t *, t); DTRACE_SCHED(sleep); THREAD_SLEEP(curthread, &shuttle_lock); disp_lock_exit_high(&shuttle_lock); /* * Update ustate records (there is no waitrq obviously) */ (void) new_mstate(curthread, LMS_SLEEP); thread_lock_high(t); oldtlp = t->t_lockp; restore_mstate(t); t->t_flag &= ~T_WAKEABLE; t->t_wchan0 = NULL; t->t_sobj_ops = NULL; /* * Make sure we end up on the right CPU if we are dealing with bound * CPU's or processor partitions. */ if (t->t_bound_cpu != NULL || t->t_cpupart != cp->cpu_part) { aston(t); cp->cpu_runrun = 1; } /* * We re-assign t_disp_queue and t_lockp of 't' here because * 't' could have been preempted. */ if (t->t_disp_queue != cp->cpu_disp) { t->t_disp_queue = cp->cpu_disp; thread_onproc(t, cp); } /* * We can't call thread_unlock_high() here because t's thread lock * could have changed by thread_onproc() call above to point to * CPU->cpu_thread_lock. */ disp_lock_exit_high(oldtlp); mutex_exit(l); /* * Make sure we didn't receive any important events while * we weren't looking */ if (lwp && (ISSIG(curthread, JUSTLOOKING) || MUSTRETURN(curproc, curthread))) setrun(curthread); swtch_to(t); /* * Caller must check for ISSIG/lwp_sysabort conditions * and clear lwp->lwp_asleep/lwp->lwp_sysabort */ }
/* * similiar to sema_p except that it blocks at an interruptible * priority. if a signal is present then return 1 otherwise 0. */ int sema_p_sig(ksema_t *sp) { kthread_t *t = curthread; klwp_t *lwp = ttolwp(t); sema_impl_t *s; disp_lock_t *sqlp; if (lwp == NULL) { sema_p(sp); return (0); } s = (sema_impl_t *)sp; sqlp = &SQHASH(s)->sq_lock; disp_lock_enter(sqlp); ASSERT(s->s_count >= 0); while (s->s_count == 0) { proc_t *p = ttoproc(t); thread_lock_high(t); t->t_flag |= T_WAKEABLE; SEMA_BLOCK(s, sqlp); lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; thread_unlock_nopreempt(t); if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)) setrun(t); swtch(); t->t_flag &= ~T_WAKEABLE; if (ISSIG(t, FORREAL) || lwp->lwp_sysabort || MUSTRETURN(p, t)) { kthread_t *sq, *tp; lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; disp_lock_enter(sqlp); sq = s->s_slpq; /* * in case sema_v and interrupt happen * at the same time, we need to pass the * sema_v to the next thread. */ if ((sq != NULL) && (s->s_count > 0)) { tp = sq; ASSERT(THREAD_LOCK_HELD(tp)); sq = sq->t_link; tp->t_link = NULL; DTRACE_SCHED1(wakeup, kthread_t *, tp); tp->t_sobj_ops = NULL; tp->t_wchan = NULL; ASSERT(tp->t_state == TS_SLEEP); CL_WAKEUP(tp); s->s_slpq = sq; disp_lock_exit_high(sqlp); thread_unlock(tp); } else { disp_lock_exit(sqlp); } return (1); } lwp->lwp_asleep = 0; disp_lock_enter(sqlp); } s->s_count--; disp_lock_exit(sqlp); return (0); }
/* * Returns: * Function result in order of presidence: * 0 if a signal was received * -1 if timeout occured * >0 if awakened via cv_signal() or cv_broadcast(). * (returns time remaining) * * cv_timedwait_sig() is now part of the DDI. */ clock_t cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) { kthread_t *t = curthread; proc_t *p = ttoproc(t); klwp_t *lwp = ttolwp(t); timeout_id_t id; clock_t rval = 1; clock_t timeleft; int signalled = 0; if (panicstr) return (rval); /* * If there is no lwp, then we don't need to wait for a signal. * The check for t_intr is to catch an interrupt thread * that has not yet unpinned the thread underneath. */ if (lwp == NULL || t->t_intr) return (cv_timedwait(cvp, mp, tim)); /* * If tim is less than or equal to lbolt, then the timeout * has already occured. So just check to see if there is a signal * pending. If so return 0 indicating that there is a signal pending. * Else return -1 indicating that the timeout occured. No need to * wait on anything. */ timeleft = tim - lbolt; if (timeleft <= 0) { lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; rval = -1; goto out; } /* * Set the timeout and wait. */ id = realtime_timeout((void (*)(void *))setrun, t, timeleft); lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; thread_lock(t); cv_block_sig(t, (condvar_impl_t *)cvp); thread_unlock_nopreempt(t); mutex_exit(mp); if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || (tim - lbolt <= 0)) setrun(t); /* ASSERT(no locks are held) */ swtch(); signalled = (t->t_schedflag & TS_SIGNALLED); t->t_flag &= ~T_WAKEABLE; mutex_enter(mp); /* * Untimeout the thread. untimeout() returns -1 if the timeout has * occured or the time remaining. If the time remaining is zero, * the timeout has occured between when we were awoken and * we called untimeout. We will treat this as if the timeout * has occured and set rval to -1. */ rval = untimeout(id); if (rval <= 0) rval = -1; /* * Check to see if a signal is pending. If so, regardless of whether * or not we were awoken due to the signal, the signal is now pending * and a return of 0 has the highest priority. */ out: if (ISSIG_PENDING(t, lwp, p)) { mutex_exit(mp); if (issig(FORREAL)) rval = 0; mutex_enter(mp); } if (lwp->lwp_sysabort || MUSTRETURN(p, t)) rval = 0; lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ cv_signal(cvp); return (rval); }