Exemplo n.º 1
0
void
cv_broadcast(kcondvar_t *cv)
{

	/* CPU == interlock */
	rumpuser_cv_broadcast(RUMPCV(cv));
}
Exemplo n.º 2
0
void
rump_unschedule_cpu1(struct lwp *l, void *interlock)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    void *old;

    ci = l->l_cpu;
    ci->ci_curlwp = ci->ci_data.cpu_onproc = NULL;
    rcpu = cpuinfo_to_rumpcpu(ci);

    KASSERT(rcpu->rcpu_ci == ci);

    /*
     * Make sure all stores are seen before the CPU release.  This
     * is relevant only in the non-fastpath scheduling case, but
     * we don't know here if that's going to happen, so need to
     * expect the worst.
     *
     * If the scheduler interlock was requested by the caller, we
     * need to obtain it before we release the CPU.  Otherwise, we risk a
     * race condition where another thread is scheduled onto the
     * rump kernel CPU before our current thread can
     * grab the interlock.
     */
    if (interlock == rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    else
        membar_exit();

    /* Release the CPU. */
    old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, l);

    /* No waiters?  No problems.  We're outta here. */
    if (old == RCPULWP_BUSY) {
        return;
    }

    KASSERT(old == RCPULWP_WANTED);

    /*
     * Ok, things weren't so snappy.
     *
     * Snailpath: take lock and signal anyone waiting for this CPU.
     */

    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    if (rcpu->rcpu_wanted)
        rumpuser_cv_broadcast(rcpu->rcpu_cv);
    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_exit(rcpu->rcpu_mtx);
}
Exemplo n.º 3
0
void
cv_broadcast(kcondvar_t *cv)
{

	rumpuser_cv_broadcast(RUMPCV(cv));
}