int lwp_unpark(lwpid_t target, const void *hint) { sleepq_t *sq; wchan_t wchan; kmutex_t *mp; proc_t *p; lwp_t *t; /* * Easy case: search for the LWP on the sleep queue. If * it's parked, remove it from the queue and set running. */ p = curproc; wchan = lwp_park_wchan(p, hint); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); TAILQ_FOREACH(t, sq, l_sleepchain) if (t->l_proc == p && t->l_lid == target) break; if (__predict_true(t != NULL)) { sleepq_remove(sq, t); mutex_spin_exit(mp); return 0; } /* * The LWP hasn't parked yet. Take the hit and mark the * operation as pending. */ mutex_spin_exit(mp); mutex_enter(p->p_lock); if ((t = lwp_find(p, target)) == NULL) { mutex_exit(p->p_lock); return ESRCH; } /* * It may not have parked yet, we may have raced, or it * is parked on a different user sync object. */ lwp_lock(t); if (t->l_syncobj == &lwp_park_sobj) { /* Releases the LWP lock. */ lwp_unsleep(t, true); } else { /* * Set the operation pending. The next call to _lwp_park * will return early. */ t->l_flag |= LW_UNPARKED; lwp_unlock(t); } mutex_exit(p->p_lock); return 0; }
int lwp_park(struct timespec *ts, const void *hint) { struct timespec tsx; sleepq_t *sq; kmutex_t *mp; wchan_t wchan; int timo, error; lwp_t *l; /* Fix up the given timeout value. */ if (ts != NULL) { getnanotime(&tsx); timespecsub(ts, &tsx, &tsx); if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0)) return ETIMEDOUT; if ((error = itimespecfix(&tsx)) != 0) return error; timo = tstohz(&tsx); KASSERT(timo != 0); } else timo = 0; /* Find and lock the sleep queue. */ l = curlwp; wchan = lwp_park_wchan(l->l_proc, hint); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); /* * Before going the full route and blocking, check to see if an * unpark op is pending. */ lwp_lock(l); if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); lwp_unlock(l); mutex_spin_exit(mp); return EALREADY; } lwp_unlock_to(l, mp); l->l_biglocks = 0; sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); error = sleepq_block(timo, true); switch (error) { case EWOULDBLOCK: error = ETIMEDOUT; break; case ERESTART: error = EINTR; break; default: /* nothing */ break; } return error; }
int lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint) { sleepq_t *sq; kmutex_t *mp; wchan_t wchan; int timo, error; lwp_t *l; if (ts != NULL) { if ((error = ts2timo(clock_id, flags, ts, &timo, NULL)) != 0) return error; KASSERT(timo != 0); } else { timo = 0; } /* Find and lock the sleep queue. */ l = curlwp; wchan = lwp_park_wchan(l->l_proc, hint); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); /* * Before going the full route and blocking, check to see if an * unpark op is pending. */ lwp_lock(l); if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) { l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED); lwp_unlock(l); mutex_spin_exit(mp); return EALREADY; } lwp_unlock_to(l, mp); l->l_biglocks = 0; sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj); error = sleepq_block(timo, true); switch (error) { case EWOULDBLOCK: error = ETIMEDOUT; break; case ERESTART: error = EINTR; break; default: /* nothing */ break; } return error; }
int sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval) { /* { syscallarg(const lwpid_t *) targets; syscallarg(size_t) ntargets; syscallarg(const void *) hint; } */ struct proc *p; struct lwp *t; sleepq_t *sq; wchan_t wchan; lwpid_t targets[32], *tp, *tpp, *tmax, target; int error; kmutex_t *mp; u_int ntargets; size_t sz; p = l->l_proc; ntargets = SCARG(uap, ntargets); if (SCARG(uap, targets) == NULL) { /* * Let the caller know how much we are willing to do, and * let it unpark the LWPs in blocks. */ *retval = LWP_UNPARK_MAX; return 0; } if (ntargets > LWP_UNPARK_MAX || ntargets == 0) return EINVAL; /* * Copy in the target array. If it's a small number of LWPs, then * place the numbers on the stack. */ sz = sizeof(target) * ntargets; if (sz <= sizeof(targets)) tp = targets; else { tp = kmem_alloc(sz, KM_SLEEP); if (tp == NULL) return ENOMEM; } error = copyin(SCARG(uap, targets), tp, sz); if (error != 0) { if (tp != targets) { kmem_free(tp, sz); } return error; } wchan = lwp_park_wchan(p, SCARG(uap, hint)); sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp); for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) { target = *tpp; /* * Easy case: search for the LWP on the sleep queue. If * it's parked, remove it from the queue and set running. */ TAILQ_FOREACH(t, sq, l_sleepchain) if (t->l_proc == p && t->l_lid == target) break; if (t != NULL) { sleepq_remove(sq, t); continue; } /* * The LWP hasn't parked yet. Take the hit and * mark the operation as pending. */ mutex_spin_exit(mp); mutex_enter(p->p_lock); if ((t = lwp_find(p, target)) == NULL) { mutex_exit(p->p_lock); mutex_spin_enter(mp); continue; } lwp_lock(t); /* * It may not have parked yet, we may have raced, or * it is parked on a different user sync object. */ if (t->l_syncobj == &lwp_park_sobj) { /* Releases the LWP lock. */ lwp_unsleep(t, true); } else { /* * Set the operation pending. The next call to * _lwp_park will return early. */ t->l_flag |= LW_UNPARKED; lwp_unlock(t); } mutex_exit(p->p_lock); mutex_spin_enter(mp); } mutex_spin_exit(mp); if (tp != targets) kmem_free(tp, sz); return 0; }