int lwp_unpark(lwpid_t target, const void *hint) { sleepq_t *sq; wchan_t wchan; kmutex_t *mp; proc_t *p; lwp_t *t; /* * Easy case: search for the LWP on the sleep queue. If * it's parked, remove it from the queue and set running. */ p = curproc; wchan = lwp_park_wchan(p, hint); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); TAILQ_FOREACH(t, sq, l_sleepchain) if (t->l_proc == p && t->l_lid == target) break; if (__predict_true(t != NULL)) { sleepq_remove(sq, t); mutex_spin_exit(mp); return 0; } /* * The LWP hasn't parked yet. Take the hit and mark the * operation as pending. */ mutex_spin_exit(mp); mutex_enter(p->p_lock); if ((t = lwp_find(p, target)) == NULL) { mutex_exit(p->p_lock); return ESRCH; } /* * It may not have parked yet, we may have raced, or it * is parked on a different user sync object. */ lwp_lock(t); if (t->l_syncobj == &lwp_park_sobj) { /* Releases the LWP lock. */ lwp_unsleep(t, true); } else { /* * Set the operation pending. The next call to _lwp_park * will return early. */ t->l_flag |= LW_UNPARKED; lwp_unlock(t); } mutex_exit(p->p_lock); return 0; }
/* * sleepq_unsleep: * * Remove an LWP from its sleep queue and set it runnable again. * sleepq_unsleep() is called with the LWP's mutex held, and will * always release it. */ u_int sleepq_unsleep(lwp_t *l, bool cleanup) { sleepq_t *sq = l->l_sleepq; kmutex_t *mp = l->l_mutex; int swapin; #ifndef T2EX KASSERT(lwp_locked(l, mp)); #endif KASSERT(l->l_wchan != NULL); swapin = sleepq_remove(sq, l); if (cleanup) { mutex_spin_exit(mp); } return swapin; }
int sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval) { /* { syscallarg(const lwpid_t *) targets; syscallarg(size_t) ntargets; syscallarg(const void *) hint; } */ struct proc *p; struct lwp *t; sleepq_t *sq; wchan_t wchan; lwpid_t targets[32], *tp, *tpp, *tmax, target; int error; kmutex_t *mp; u_int ntargets; size_t sz; p = l->l_proc; ntargets = SCARG(uap, ntargets); if (SCARG(uap, targets) == NULL) { /* * Let the caller know how much we are willing to do, and * let it unpark the LWPs in blocks. */ *retval = LWP_UNPARK_MAX; return 0; } if (ntargets > LWP_UNPARK_MAX || ntargets == 0) return EINVAL; /* * Copy in the target array. If it's a small number of LWPs, then * place the numbers on the stack. */ sz = sizeof(target) * ntargets; if (sz <= sizeof(targets)) tp = targets; else { tp = kmem_alloc(sz, KM_SLEEP); if (tp == NULL) return ENOMEM; } error = copyin(SCARG(uap, targets), tp, sz); if (error != 0) { if (tp != targets) { kmem_free(tp, sz); } return error; } wchan = lwp_park_wchan(p, SCARG(uap, hint)); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { target = *tpp; /* * Easy case: search for the LWP on the sleep queue. If * it's parked, remove it from the queue and set running. */ TAILQ_FOREACH(t, sq, l_sleepchain) if (t->l_proc == p && t->l_lid == target) break; if (t != NULL) { sleepq_remove(sq, t); continue; } /* * The LWP hasn't parked yet. Take the hit and * mark the operation as pending. */ mutex_spin_exit(mp); mutex_enter(p->p_lock); if ((t = lwp_find(p, target)) == NULL) { mutex_exit(p->p_lock); mutex_spin_enter(mp); continue; } lwp_lock(t); /* * It may not have parked yet, we may have raced, or * it is parked on a different user sync object. */ if (t->l_syncobj == &lwp_park_sobj) { /* Releases the LWP lock. */ lwp_unsleep(t, true); } else { /* * Set the operation pending. The next call to * _lwp_park will return early. */ t->l_flag |= LW_UNPARKED; lwp_unlock(t); } mutex_exit(p->p_lock); mutex_spin_enter(mp); } mutex_spin_exit(mp); if (tp != targets) kmem_free(tp, sz); return 0; }
void sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj) { struct lwp *l = curlwp; #ifndef T2EX if (__predict_false(sobj != &sleep_syncobj || strcmp(wemsg, "callout"))) { #else if (__predict_false(sobj != &sleep_syncobj || (strcmp(wmesg, "callout") != 0 && strcmp(wmesg, "select") != 0 && strcmp(wmesg, "pollsock") != 0))) { #endif panic("sleepq: unsupported enqueue"); } /* * Remove an LWP from a sleep queue if the LWP was deleted while in * the waiting state. */ if ( l->l_sleepq != NULL && (l->l_stat & LSSLEEP) != 0 ) { sleepq_remove(l->l_sleepq, l); } #ifndef T2EX l->l_syncobj = sobj; #endif l->l_wchan = wchan; l->l_sleepq = sq; #ifndef T2EX l->l_wmesg = wmesg; l->l_slptime = 0; #endif l->l_stat = LSSLEEP; #ifndef T2EX l->l_sleeperr = 0; #endif TAILQ_INSERT_TAIL(sq, l, l_sleepchain); } int sleepq_block(int timo, bool hatch) { struct lwp *l = curlwp; int error = 0; //KASSERT(timo == 0 && !hatch); if (timo != 0) { callout_schedule(&l->l_timeout_ch, timo); } #ifdef T2EX if ( l->l_mutex != NULL ) { mutex_exit(l->l_mutex); } #endif mutex_enter(&sq_mtx); while (l->l_wchan) { if ( hatch ) { error = cv_timedwait_sig( &sq_cv, &sq_mtx, timo ); } else { error = cv_timedwait( &sq_cv, &sq_mtx, timo ); } if (error == EINTR) { if (l->l_wchan) { TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain); l->l_wchan = NULL; l->l_sleepq = NULL; } } } mutex_exit(&sq_mtx); #ifdef T2EX l->l_mutex = &spc_lock; #endif if (timo != 0) { /* * Even if the callout appears to have fired, we need to * stop it in order to synchronise with other CPUs. */ if (callout_halt(&l->l_timeout_ch, NULL)) { error = EWOULDBLOCK; } } return error; } #ifdef T2EX lwp_t * sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) { struct lwp *l; bool found = false; TAILQ_FOREACH(l, sq, l_sleepchain) { if (l->l_wchan == wchan) { found = true; l->l_wchan = NULL; } } if (found) cv_broadcast(&sq_cv); mutex_spin_exit(mp); return NULL; } #else /* * sleepq_wake: * * Wake zero or more LWPs blocked on a single wait channel. */ lwp_t * sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) { lwp_t *l, *next; int swapin = 0; KASSERT(mutex_owned(mp)); for (l = TAILQ_FIRST(sq); l != NULL; l = next) { KASSERT(l->l_sleepq == sq); KASSERT(l->l_mutex == mp); next = TAILQ_NEXT(l, l_sleepchain); if (l->l_wchan != wchan) continue; swapin |= sleepq_remove(sq, l); if (--expected == 0) break; } mutex_spin_exit(mp); #if 0 /* * If there are newly awakend threads that need to be swapped in, * then kick the swapper into action. */ if (swapin) uvm_kick_scheduler(); #endif return l; }
int poller_resume(mrkthr_ctx_t *ctx) { int res; /* * Can only be the result of yield or start, ie, the state cannot be * dormant or resumed. */ if (!(ctx->co.state & CO_STATE_RESUMABLE)) { /* This is an error (currently no reason is known, though) */ sleepq_remove(ctx); /* not sure if we can push it here ... */ push_free_ctx(ctx); TRRET(RESUME + 1); } ctx->co.state = CO_STATE_RESUMED; me = ctx; #ifdef TRACE_VERBOSE CTRACE("resuming >>>"); //mrkthr_dump(ctx); #endif PROFILE_STOP(mrkthr_sched0_p); PROFILE_START(mrkthr_swap_p); res = swapcontext(&main_uc, &me->co.uc); PROFILE_STOP(mrkthr_swap_p); PROFILE_START(mrkthr_sched0_p); #ifdef TRACE_VERBOSE CTRACE("back from resume <<<"); //mrkthr_dump(me); #endif if (errno == EINTR) { CTRACE("ignoring EINTR"); #ifdef TRACE_VERBOSE //mrkthr_dump(ctx); #endif errno = 0; return 0; } /* no one in the thread context may touch me */ assert(me == ctx); me = NULL; if (ctx->co.state & CO_STATE_RESUMABLE) { return ctx->co.rc; } else if (ctx->co.state == CO_STATE_RESUMED) { /* * This is the case of the exited (dead) thread. */ #ifdef TRACE_VERBOSE CTRACE("Assuming exited (dead) ..."); //mrkthr_dump(ctx); #endif sleepq_remove(ctx); push_free_ctx(ctx); //TRRET(RESUME + 2); //return MRKTHR_CO_RC_EXITED; return ctx->co.rc; } else { CTRACE("Unknown case:"); mrkthr_dump(ctx); FAIL("resume"); } return res; }