int lwp_park(struct timespec *ts, const void *hint) { struct timespec tsx; sleepq_t *sq; kmutex_t *mp; wchan_t wchan; int timo, error; lwp_t *l; /* Fix up the given timeout value. */ if (ts != NULL) { getnanotime(&tsx); timespecsub(ts, &tsx, &tsx); if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) return ETIMEDOUT; if ((error = itimespecfix(&tsx)) != 0) return error; timo = tstohz(&tsx); KASSERT(timo != 0); } else timo = 0; /* Find and lock the sleep queue. */ l = curlwp; wchan = lwp_park_wchan(l->l_proc, hint); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); /* * Before going the full route and blocking, check to see if an * unpark op is pending. */ lwp_lock(l); if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); lwp_unlock(l); mutex_spin_exit(mp); return EALREADY; } lwp_unlock_to(l, mp); l->l_biglocks = 0; sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); error = sleepq_block(timo, true); switch (error) { case EWOULDBLOCK: error = ETIMEDOUT; break; case ERESTART: error = EINTR; break; default: /* nothing */ break; } return error; }
/* * callout_halt: * * Cancel a pending callout. If in-flight, block until it completes. * May not be called from a hard interrupt handler. If the callout * can take locks, the caller of callout_halt() must not hold any of * those locks, otherwise the two could deadlock. If 'interlock' is * non-NULL and we must wait for the callout to complete, it will be * released and re-acquired before returning. */ bool callout_halt(callout_t *cs, void *interlock) { callout_impl_t *c = (callout_impl_t *)cs; struct callout_cpu *cc; struct lwp *l; kmutex_t *lock, *relock; bool expired; KASSERT(c->c_magic == CALLOUT_MAGIC); KASSERT(!cpu_intr_p()); lock = callout_lock(c); relock = NULL; expired = ((c->c_flags & CALLOUT_FIRED) != 0); if ((c->c_flags & CALLOUT_PENDING) != 0) CIRCQ_REMOVE(&c->c_list); c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_FIRED); l = curlwp; for (;;) { cc = c->c_cpu; if (__predict_true(cc->cc_active != c || cc->cc_lwp == l)) break; if (interlock != NULL) { /* * Avoid potential scheduler lock order problems by * dropping the interlock without the callout lock * held. */ mutex_spin_exit(lock); mutex_exit(interlock); relock = interlock; interlock = NULL; } else { /* XXX Better to do priority inheritance. */ KASSERT(l->l_wchan == NULL); cc->cc_nwait++; cc->cc_ev_block.ev_count++; l->l_kpriority = true; sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock); sleepq_enqueue(&cc->cc_sleepq, cc, "callout", &sleep_syncobj); sleepq_block(0, false); } lock = callout_lock(c); } mutex_spin_exit(lock); if (__predict_false(relock != NULL)) mutex_enter(relock); return expired; }
int lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint) { sleepq_t *sq; kmutex_t *mp; wchan_t wchan; int timo, error; lwp_t *l; if (ts != NULL) { if ((error = ts2timo(clock_id, flags, ts, &timo, NULL)) != 0) return error; KASSERT(timo != 0); } else { timo = 0; } /* Find and lock the sleep queue. */ l = curlwp; wchan = lwp_park_wchan(l->l_proc, hint); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); /* * Before going the full route and blocking, check to see if an * unpark op is pending. */ lwp_lock(l); if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); lwp_unlock(l); mutex_spin_exit(mp); return EALREADY; } lwp_unlock_to(l, mp); l->l_biglocks = 0; sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); error = sleepq_block(timo, true); switch (error) { case EWOULDBLOCK: error = ETIMEDOUT; break; case ERESTART: error = EINTR; break; default: /* nothing */ break; } return error; }