static int docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts) { struct lwp *l = curlwp; int rv; if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { /* * yield() here, someone might want the cpu * to set a condition. otherwise we'll just * loop forever. */ yield(); return EINTR; } UNLOCKED(mtx, false); l->l_private = cv; rv = 0; if (ts) { if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), ts->tv_sec, ts->tv_nsec)) rv = EWOULDBLOCK; } else { rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx)); } LOCKED(mtx, false); /* * Check for QEXIT. if so, we need to wait here until we * are allowed to exit. */ if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { struct proc *p = l->l_proc; UNLOCKED(mtx, false); mutex_exit(mtx); /* drop and retake later */ mutex_enter(p->p_lock); while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) { /* avoid recursion */ rumpuser_cv_wait(RUMPCV(&p->p_waitcv), RUMPMTX(p->p_lock)); } KASSERT(p->p_sflag & PS_RUMP_LWPEXIT); mutex_exit(p->p_lock); /* ok, we can exit and remove "reference" to l->private */ mutex_enter(mtx); LOCKED(mtx, false); rv = EINTR; } l->l_private = NULL; return rv; }
void cv_broadcast(kcondvar_t *cv) { /* CPU == interlock */ rumpuser_cv_broadcast(RUMPCV(cv)); }
int cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx) { rumpuser_cv_wait(RUMPCV(cv), mtx->kmtx_mtx); return 0; }
void cv_signal(kcondvar_t *cv) { /* CPU == interlock */ rumpuser_cv_signal(RUMPCV(cv)); }
int cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks) { struct timespec ts; #ifdef DIAGNOSTIC UPMTX(mtx); KASSERT(upm->upm_owner == curlwp); #endif ts.tv_sec = ticks / hz; ts.tv_nsec = (ticks % hz) * (1000000000/hz); if (ticks == 0) { cv_wait(cv, mtx); return 0; } else { int rv; mutex_exit(mtx); rv = rump_schedlock_cv_timedwait(RUMPCV(cv), &ts); mutex_enter(mtx); if (rv) return EWOULDBLOCK; else return 0; } }
bool cv_has_waiters(kcondvar_t *cv) { int rv; rumpuser_cv_has_waiters(RUMPCV(cv), &rv); return rv != 0; }
bool cv_has_waiters(kcondvar_t *cv) { int n; rumpuser_cv_has_waiters(RUMPCV(cv), &n); return n > 0; }
int cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks) { #ifdef DIAGNOSTIC extern int hz; #endif if (ticks == 0) { cv_wait(cv, mtx); return 0; } else { KASSERT(hz == 100); return rumpuser_cv_timedwait(RUMPCV(cv), mtx->kmtx_mtx, ticks); } }
void cv_wait(kcondvar_t *cv, kmutex_t *mtx) { #ifdef DIAGNOSTIC UPMTX(mtx); KASSERT(upm->upm_owner == curlwp); if (rump_threads == 0) panic("cv_wait without threads"); #endif /* * NOTE: we must atomically release the *CPU* here, i.e. * nothing between mutex_exit and entering rumpuser condwait * may preempt us from the virtual CPU. */ mutex_exit(mtx); rump_schedlock_cv_wait(RUMPCV(cv)); mutex_enter(mtx); }
/* this is not much of an attempt, but ... */ bool cv_is_valid(kcondvar_t *cv) { return RUMPCV(cv) != NULL; }
void cv_broadcast(kcondvar_t *cv) { rumpuser_cv_broadcast(RUMPCV(cv)); }
void cv_signal(kcondvar_t *cv) { rumpuser_cv_signal(RUMPCV(cv)); }
void cv_destroy(kcondvar_t *cv) { rumpuser_cv_destroy(RUMPCV(cv)); }
bool cv_has_waiters(kcondvar_t *cv) { return rumpuser_cv_has_waiters(RUMPCV(cv)); }
void cv_wait(kcondvar_t *cv, kmutex_t *mtx) { rumpuser_cv_wait(RUMPCV(cv), mtx->kmtx_mtx); }