/* * the semaphore's count is incremented by one. a blocked thread * is awakened and re-tries to acquire the semaphore. */ void sema_v(ksema_t *sp) { sema_impl_t *s; kthread_t *sq, *tp; disp_lock_t *sqlp; s = (sema_impl_t *)sp; sqlp = &SQHASH(s)->sq_lock; disp_lock_enter(sqlp); if (panicstr) { disp_lock_exit(sqlp); return; } s->s_count++; sq = s->s_slpq; if (sq != NULL) { tp = sq; ASSERT(THREAD_LOCK_HELD(tp)); sq = sq->t_link; tp->t_link = NULL; DTRACE_SCHED1(wakeup, kthread_t *, tp); tp->t_sobj_ops = NULL; tp->t_wchan = NULL; ASSERT(tp->t_state == TS_SLEEP); CL_WAKEUP(tp); s->s_slpq = sq; disp_lock_exit_high(sqlp); thread_unlock(tp); } else { disp_lock_exit(sqlp); } }
void cv_signal(kcondvar_t *cvp) { condvar_impl_t *cp = (condvar_impl_t *)cvp; /* make sure the cv_waiters field looks sane */ ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); if (cp->cv_waiters > 0) { sleepq_head_t *sqh = SQHASH(cp); disp_lock_enter(&sqh->sq_lock); ASSERT(CPU_ON_INTR(CPU) == 0); if (cp->cv_waiters & CV_WAITERS_MASK) { kthread_t *t; cp->cv_waiters--; t = sleepq_wakeone_chan(&sqh->sq_queue, cp); /* * If cv_waiters is non-zero (and less than * CV_MAX_WAITERS) there should be a thread * in the queue. */ ASSERT(t != NULL); } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { cp->cv_waiters = 0; } disp_lock_exit(&sqh->sq_lock); } }
/* * Prevent any new threads from entering wait queue and make all threads * currently on the wait queue runnable. After waitq_block() completion, no * threads should ever appear on the wait queue untill it is unblocked. */ void waitq_block(waitq_t *wq) { ASSERT(!wq->wq_blocked); disp_lock_enter(&wq->wq_lock); wq->wq_blocked = B_TRUE; disp_lock_exit(&wq->wq_lock); waitq_runall(wq); ASSERT(waitq_isempty(wq)); }
/* * Allow threads to be placed on the wait queue. */ void waitq_unblock(waitq_t *wq) { disp_lock_enter(&wq->wq_lock); ASSERT(waitq_isempty(wq)); ASSERT(wq->wq_blocked); wq->wq_blocked = B_FALSE; disp_lock_exit(&wq->wq_lock); }
/* * The callback function called for every cap on capped_projects list. * Decay cap usage by CAP_DECAY_FACTOR * Add this cap project usage to its zone usage. * Kick off a thread from the cap waitq if cap is not reached. */ static void cap_project_usage_walker(cpucap_t *cap, int64_t gen) { zone_t *zone = cap->cap_zone; hrtime_t cap_usage = cap->cap_usage; ASSERT(MUTEX_HELD(&caps_lock)); ASSERT(cap->cap_project->kpj_cpucap == cap); ASSERT(zone == cap->cap_project->kpj_zone); ASSERT(CAP_ENABLED(cap)); /* * Set or clear the CAP_REACHED flag based on the current usage. * Only projects having their own caps are ever marked as CAP_REACHED. */ cap_poke_waitq(cap, 0); /* * Add project's CPU usage to our zone's CPU usage. */ if (ZONE_IS_CAPPED(zone)) { cpucap_t *zcap = zone->zone_cpucap; ASSERT(zcap->cap_zone == zone); /* * If we haven't reset this zone's usage during this clock tick * yet, then do it now. The cap_gen field is used to check * whether this is the first zone's project we see during this * tick or a subsequent one. */ if (zcap->cap_gen != gen) { if (zcap->cap_usage > zcap->cap_maxusage) zcap->cap_maxusage = zcap->cap_usage; zcap->cap_usage = 0; zcap->cap_gen = gen; } DTRACE_PROBE2(cpucaps__zusage, cpucap_t *, zcap, hrtime_t, cap_usage); zcap->cap_usage += cap_usage; /* Check for overflows */ if (zcap->cap_usage < 0) zcap->cap_usage = MAX_USAGE - 1; } /* * Decay project usage. */ disp_lock_enter(&cap->cap_usagelock); cap->cap_usage -= ROUND_SCALE(cap_usage, CAP_DECAY_FACTOR); disp_lock_exit(&cap->cap_usagelock); }
/* * Grab the lock protecting the hash chain for sobj * and return the active turnstile for sobj, if any. */ turnstile_t * turnstile_lookup(void *sobj) { turnstile_t *ts; turnstile_chain_t *tc = &TURNSTILE_CHAIN(sobj); disp_lock_enter(&tc->tc_lock); for (ts = tc->tc_first; ts != NULL; ts = ts->ts_next) if (ts->ts_sobj == sobj) break; return (ts); }
/* * Take the first thread off the wait queue and return pointer to it. */ static kthread_t * waitq_takeone(waitq_t *wq) { kthread_t *t; disp_lock_enter(&wq->wq_lock); /* * waitq_dequeue drops wait queue lock but leaves the CPU at high PIL. */ if ((t = wq->wq_first) != NULL) waitq_dequeue(wq, wq->wq_first); else disp_lock_exit(&wq->wq_lock); return (t); }
/* * the semaphore is granted when the semaphore's * count is greater than zero and blocks when equal * to zero. */ void sema_p(ksema_t *sp) { sema_impl_t *s; disp_lock_t *sqlp; s = (sema_impl_t *)sp; sqlp = &SQHASH(s)->sq_lock; disp_lock_enter(sqlp); ASSERT(s->s_count >= 0); while (s->s_count == 0) { if (panicstr) { disp_lock_exit(sqlp); return; } thread_lock_high(curthread); SEMA_BLOCK(s, sqlp); thread_unlock_nopreempt(curthread); swtch(); disp_lock_enter(sqlp); } s->s_count--; disp_lock_exit(sqlp); }
void cv_broadcast(kcondvar_t *cvp) { condvar_impl_t *cp = (condvar_impl_t *)cvp; /* make sure the cv_waiters field looks sane */ ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); if (cp->cv_waiters > 0) { sleepq_head_t *sqh = SQHASH(cp); disp_lock_enter(&sqh->sq_lock); ASSERT(CPU_ON_INTR(CPU) == 0); sleepq_wakeall_chan(&sqh->sq_queue, cp); cp->cv_waiters = 0; disp_lock_exit(&sqh->sq_lock); } }
/* * try to acquire the semaphore. if the semaphore is greater than * zero, then the semaphore is granted and returns 1. otherwise * return 0. */ int sema_tryp(ksema_t *sp) { sema_impl_t *s; sleepq_head_t *sqh; int gotit = 0; s = (sema_impl_t *)sp; sqh = SQHASH(s); disp_lock_enter(&sqh->sq_lock); if (s->s_count > 0) { s->s_count--; gotit = 1; } disp_lock_exit(&sqh->sq_lock); return (gotit); }
/* * similiar to sema_p except that it blocks at an interruptible * priority. if a signal is present then return 1 otherwise 0. */ int sema_p_sig(ksema_t *sp) { kthread_t *t = curthread; klwp_t *lwp = ttolwp(t); sema_impl_t *s; disp_lock_t *sqlp; if (lwp == NULL) { sema_p(sp); return (0); } s = (sema_impl_t *)sp; sqlp = &SQHASH(s)->sq_lock; disp_lock_enter(sqlp); ASSERT(s->s_count >= 0); while (s->s_count == 0) { proc_t *p = ttoproc(t); thread_lock_high(t); t->t_flag |= T_WAKEABLE; SEMA_BLOCK(s, sqlp); lwp->lwp_asleep = 1; lwp->lwp_sysabort = 0; thread_unlock_nopreempt(t); if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)) setrun(t); swtch(); t->t_flag &= ~T_WAKEABLE; if (ISSIG(t, FORREAL) || lwp->lwp_sysabort || MUSTRETURN(p, t)) { kthread_t *sq, *tp; lwp->lwp_asleep = 0; lwp->lwp_sysabort = 0; disp_lock_enter(sqlp); sq = s->s_slpq; /* * in case sema_v and interrupt happen * at the same time, we need to pass the * sema_v to the next thread. */ if ((sq != NULL) && (s->s_count > 0)) { tp = sq; ASSERT(THREAD_LOCK_HELD(tp)); sq = sq->t_link; tp->t_link = NULL; DTRACE_SCHED1(wakeup, kthread_t *, tp); tp->t_sobj_ops = NULL; tp->t_wchan = NULL; ASSERT(tp->t_state == TS_SLEEP); CL_WAKEUP(tp); s->s_slpq = sq; disp_lock_exit_high(sqlp); thread_unlock(tp); } else { disp_lock_exit(sqlp); } return (1); } lwp->lwp_asleep = 0; disp_lock_enter(sqlp); } s->s_count--; disp_lock_exit(sqlp); return (0); }