static int docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts) { struct lwp *l = curlwp; int rv; if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { /* * yield() here, someone might want the cpu * to set a condition. otherwise we'll just * loop forever. */ yield(); return EINTR; } UNLOCKED(mtx, false); l->l_private = cv; rv = 0; if (ts) { if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), ts->tv_sec, ts->tv_nsec)) rv = EWOULDBLOCK; } else { rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx)); } LOCKED(mtx, false); /* * Check for QEXIT. if so, we need to wait here until we * are allowed to exit. */ if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { struct proc *p = l->l_proc; UNLOCKED(mtx, false); mutex_exit(mtx); /* drop and retake later */ mutex_enter(p->p_lock); while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) { /* avoid recursion */ rumpuser_cv_wait(RUMPCV(&p->p_waitcv), RUMPMTX(p->p_lock)); } KASSERT(p->p_sflag & PS_RUMP_LWPEXIT); mutex_exit(p->p_lock); /* ok, we can exit and remove "reference" to l->private */ mutex_enter(mtx); LOCKED(mtx, false); rv = EINTR; } l->l_private = NULL; return rv; }
int cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx) { rumpuser_cv_wait(RUMPCV(cv), mtx->kmtx_mtx); return 0; }
/* * condvar ops using scheduler lock as the rumpuser interlock. */ void rump_schedlock_cv_wait(struct rumpuser_cv *cv) { struct lwp *l = curlwp; struct rumpcpu *rcpu = cpuinfo_to_rumpcpu(l->l_cpu); /* mutex will be taken and released in cpu schedule/unschedule */ rumpuser_cv_wait(cv, rcpu->rcpu_mtx); }
/* * condvar ops using scheduler lock as the rumpuser interlock. */ void rump_schedlock_cv_wait(struct rumpuser_cv *cv) { struct lwp *l = curlwp; struct rumpcpu *rcpu = &rcpu_storage[l->l_cpu-&rump_cpus[0]]; /* mutex will be taken and released in cpu schedule/unschedule */ rumpuser_cv_wait(cv, rcpu->rcpu_mtx); }
void cv_wait(kcondvar_t *cv, kmutex_t *mtx) { rumpuser_cv_wait(RUMPCV(cv), mtx->kmtx_mtx); }