Exemple #1
0
static void
lwp0busy(void)
{

    /* busy lwp0 */
    KASSERT(curlwp == NULL || curlwp->l_stat != LSONPROC);
    rumpuser_mutex_enter_nowrap(lwp0mtx);
    while (lwp0isbusy)
        rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
    lwp0isbusy = true;
    rumpuser_mutex_exit(lwp0mtx);
}
Exemple #2
0
static void
biothread(void *arg)
{
	DEFINE_WAIT(w);
	int i, flags, did;

	/* for the bio callback */
	rumpuser__hyp.hyp_schedule();
	rumpuser__hyp.hyp_lwproc_newlwp(0);
	rumpuser__hyp.hyp_unschedule();

	for (;;) {
		rumpuser_mutex_enter_nowrap(bio_mtx);
		while (bio_outstanding_total == 0) {
			rumpuser_cv_wait_nowrap(bio_cv, bio_mtx);
		}
		rumpuser_mutex_exit(bio_mtx);

		/*
		 * if we made any progress, recheck.  could be batched,
		 * but since currently locks are free here ... meh
		 */
		local_irq_save(flags);
		for (did = 0;;) {
			for (i = 0; i < NBLKDEV; i++) {
				if (blkdev_outstanding[i])
					did += blkfront_aio_poll(blkdevs[i]);
			}
			if (did)
				break;
			add_waiter(w, blkfront_queue);
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
		}
		local_irq_restore(flags);
	}
}
Exemple #3
0
/*
 * Schedule a CPU.  This optimizes for the case where we schedule
 * the same thread often, and we have nCPU >= nFrequently-Running-Thread
 * (where CPU is virtual rump cpu, not host CPU).
 */
void
rump_schedule_cpu_interlock(struct lwp *l, void *interlock)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    void *old;
    bool domigrate;
    bool bound = l->l_pflag & LP_BOUND;

    l->l_stat = LSRUN;

    /*
     * First, try fastpath: if we were the previous user of the
     * CPU, everything is in order cachewise and we can just
     * proceed to use it.
     *
     * If we are a different thread (i.e. CAS fails), we must go
     * through a memory barrier to ensure we get a truthful
     * view of the world.
     */

    KASSERT(l->l_target_cpu != NULL);
    rcpu = cpuinfo_to_rumpcpu(l->l_target_cpu);
    if (atomic_cas_ptr(&rcpu->rcpu_prevlwp, l, RCPULWP_BUSY) == l) {
        if (interlock == rcpu->rcpu_mtx)
            rumpuser_mutex_exit(rcpu->rcpu_mtx);
        SCHED_FASTPATH(rcpu);
        /* jones, you're the man */
        goto fastlane;
    }

    /*
     * Else, it's the slowpath for us.  First, determine if we
     * can migrate.
     */
    if (ncpu == 1)
        domigrate = false;
    else
        domigrate = true;

    /* Take lock.  This acts as a load barrier too. */
    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);

    for (;;) {
        SCHED_SLOWPATH(rcpu);
        old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, RCPULWP_WANTED);

        /* CPU is free? */
        if (old != RCPULWP_BUSY && old != RCPULWP_WANTED) {
            if (atomic_cas_ptr(&rcpu->rcpu_prevlwp,
                               RCPULWP_WANTED, RCPULWP_BUSY) == RCPULWP_WANTED) {
                break;
            }
        }

        /*
         * Do we want to migrate once?
         * This may need a slightly better algorithm, or we
         * might cache pingpong eternally for non-frequent
         * threads.
         */
        if (domigrate && !bound) {
            domigrate = false;
            SCHED_MIGRATED(rcpu);
            rumpuser_mutex_exit(rcpu->rcpu_mtx);
            rcpu = getnextcpu();
            rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
            continue;
        }

        /* Want CPU, wait until it's released an retry */
        rcpu->rcpu_wanted++;
        rumpuser_cv_wait_nowrap(rcpu->rcpu_cv, rcpu->rcpu_mtx);
        rcpu->rcpu_wanted--;
    }
    rumpuser_mutex_exit(rcpu->rcpu_mtx);

fastlane:
    ci = rcpu->rcpu_ci;
    l->l_cpu = l->l_target_cpu = ci;
    l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
    l->l_ncsw++;
    l->l_stat = LSONPROC;

    /*
     * No interrupts, so ci_curlwp === cpu_onproc.
     * Okay, we could make an attempt to not set cpu_onproc
     * in the case that an interrupt is scheduled immediately
     * after a user proc, but leave that for later.
     */
    ci->ci_curlwp = ci->ci_data.cpu_onproc = l;
}