void
mutex_exit(kmutex_t *mtx)
{

	UNLOCKED(mtx, false);
	rumpuser_mutex_exit(RUMPMTX(mtx));
}
Beispiel #2
0
void
_kernel_unlock(int nlocks, int *countp)
{

	if (giantowner != curlwp) {
		KASSERT(nlocks == 0);
		if (countp)
			*countp = 0;
		return;
	}

	if (countp)
		*countp = giantcnt;
	if (nlocks == 0)
		nlocks = giantcnt;
	if (nlocks == -1) {
		KASSERT(giantcnt == 1);
		nlocks = 1;
	}
	KASSERT(nlocks <= giantcnt);
	while (nlocks--) {
		giantcnt--;
	}

	if (giantcnt == 0) {
		giantowner = NULL;
		rumpuser_mutex_exit(rump_giantlock);
	}
}
Beispiel #3
0
static void
cv_unsched(struct rumpuser_mtx *mtx, int *nlocks)
{

	rumpkern_unsched(nlocks, mtx);
	rumpuser_mutex_exit(mtx);
}
Beispiel #4
0
void
rumpuser_bio(int fd, int op, void *data, size_t dlen, int64_t off,
	rump_biodone_fn biodone, void *donearg)
{
	static int bio_inited;
	struct biocb *bio = memalloc(sizeof(*bio), 0);
	struct blkfront_aiocb *aiocb = &bio->bio_aiocb;
	int nlocks;
	int num = fd - BLKFDOFF;

	rumpkern_unsched(&nlocks, NULL);

	if (!bio_inited) {
		rumpuser_mutex_enter_nowrap(bio_mtx);
		if (!bio_inited) {
			bio_inited = 1;
			rumpuser_mutex_exit(bio_mtx);
			create_thread("biopoll", biothread, NULL);
		} else {
			rumpuser_mutex_exit(bio_mtx);
		}
	}

	bio->bio_done = biodone;
	bio->bio_arg = donearg;
	bio->bio_num = num;

	aiocb->aio_dev = blkdevs[num];
	aiocb->aio_buf = data;
	aiocb->aio_nbytes = dlen;
	aiocb->aio_offset = off;
	aiocb->aio_cb = biocomp;
	aiocb->data  = bio;

	if (op & RUMPUSER_BIO_READ)
		blkfront_aio_read(aiocb);
	else
		blkfront_aio_write(aiocb);

	rumpuser_mutex_enter(bio_mtx);
	bio_outstanding_total++;
	blkdev_outstanding[num]++;
	rumpuser_cv_signal(bio_cv);
	rumpuser_mutex_exit(bio_mtx);

	rumpkern_sched(nlocks, NULL);
}
Beispiel #5
0
void
rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
{

	cv->nwaiters++;
	rumpuser_mutex_exit(mtx);
	wait(&cv->waiters, BMK_SCHED_BLOCK_INFTIME);
	rumpuser_mutex_enter_nowrap(mtx);
	cv->nwaiters--;
}
void
rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
{

	cv->nwaiters++;
	rumpuser_mutex_exit(mtx);
	wait(&cv->waiters, 0);
	rumpuser_mutex_enter_nowrap(mtx);
	cv->nwaiters--;
}
Beispiel #7
0
static void
lwp0rele(void)
{

    rumpuser_mutex_enter_nowrap(lwp0mtx);
    KASSERT(lwp0isbusy == true);
    lwp0isbusy = false;
    rumpuser_cv_signal(lwp0cv);
    rumpuser_mutex_exit(lwp0mtx);
}
Beispiel #8
0
void
rump_unschedule_cpu1(struct lwp *l, void *interlock)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    void *old;

    ci = l->l_cpu;
    ci->ci_curlwp = ci->ci_data.cpu_onproc = NULL;
    rcpu = cpuinfo_to_rumpcpu(ci);

    KASSERT(rcpu->rcpu_ci == ci);

    /*
     * Make sure all stores are seen before the CPU release.  This
     * is relevant only in the non-fastpath scheduling case, but
     * we don't know here if that's going to happen, so need to
     * expect the worst.
     *
     * If the scheduler interlock was requested by the caller, we
     * need to obtain it before we release the CPU.  Otherwise, we risk a
     * race condition where another thread is scheduled onto the
     * rump kernel CPU before our current thread can
     * grab the interlock.
     */
    if (interlock == rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    else
        membar_exit();

    /* Release the CPU. */
    old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, l);

    /* No waiters?  No problems.  We're outta here. */
    if (old == RCPULWP_BUSY) {
        return;
    }

    KASSERT(old == RCPULWP_WANTED);

    /*
     * Ok, things weren't so snappy.
     *
     * Snailpath: take lock and signal anyone waiting for this CPU.
     */

    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
    if (rcpu->rcpu_wanted)
        rumpuser_cv_broadcast(rcpu->rcpu_cv);
    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_exit(rcpu->rcpu_mtx);
}
Beispiel #9
0
static void
lwp0busy(void)
{

    /* busy lwp0 */
    KASSERT(curlwp == NULL || curlwp->l_stat != LSONPROC);
    rumpuser_mutex_enter_nowrap(lwp0mtx);
    while (lwp0isbusy)
        rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
    lwp0isbusy = true;
    rumpuser_mutex_exit(lwp0mtx);
}
Beispiel #10
0
static void
biocomp(struct blkfront_aiocb *aiocb, int ret)
{
	struct biocb *bio = aiocb->data;
	int dummy, num;

	rumpkern_sched(0, NULL);
	if (ret)
		bio->bio_done(bio->bio_arg, 0, EIO);
	else
		bio->bio_done(bio->bio_arg, bio->bio_aiocb.aio_nbytes, 0);
	rumpkern_unsched(&dummy, NULL);
	num = bio->bio_num;
	xfree(bio);

	rumpuser_mutex_enter_nowrap(bio_mtx);
	bio_outstanding_total--;
	blkdev_outstanding[num]--;
	rumpuser_mutex_exit(bio_mtx);
}
Beispiel #11
0
static void
biothread(void *arg)
{
	DEFINE_WAIT(w);
	int i, flags, did;

	/* for the bio callback */
	rumpuser__hyp.hyp_schedule();
	rumpuser__hyp.hyp_lwproc_newlwp(0);
	rumpuser__hyp.hyp_unschedule();

	for (;;) {
		rumpuser_mutex_enter_nowrap(bio_mtx);
		while (bio_outstanding_total == 0) {
			rumpuser_cv_wait_nowrap(bio_cv, bio_mtx);
		}
		rumpuser_mutex_exit(bio_mtx);

		/*
		 * if we made any progress, recheck.  could be batched,
		 * but since currently locks are free here ... meh
		 */
		local_irq_save(flags);
		for (did = 0;;) {
			for (i = 0; i < NBLKDEV; i++) {
				if (blkdev_outstanding[i])
					did += blkfront_aio_poll(blkdevs[i]);
			}
			if (did)
				break;
			add_waiter(w, blkfront_queue);
			local_irq_restore(flags);
			schedule();
			local_irq_save(flags);
		}
		local_irq_restore(flags);
	}
}
Beispiel #12
0
void
mutex_exit(kmutex_t *mtx)
{

	rumpuser_mutex_exit(mtx->kmtx_mtx);
}
Beispiel #13
0
/*
 * Schedule a CPU.  This optimizes for the case where we schedule
 * the same thread often, and we have nCPU >= nFrequently-Running-Thread
 * (where CPU is virtual rump cpu, not host CPU).
 */
void
rump_schedule_cpu_interlock(struct lwp *l, void *interlock)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    void *old;
    bool domigrate;
    bool bound = l->l_pflag & LP_BOUND;

    l->l_stat = LSRUN;

    /*
     * First, try fastpath: if we were the previous user of the
     * CPU, everything is in order cachewise and we can just
     * proceed to use it.
     *
     * If we are a different thread (i.e. CAS fails), we must go
     * through a memory barrier to ensure we get a truthful
     * view of the world.
     */

    KASSERT(l->l_target_cpu != NULL);
    rcpu = cpuinfo_to_rumpcpu(l->l_target_cpu);
    if (atomic_cas_ptr(&rcpu->rcpu_prevlwp, l, RCPULWP_BUSY) == l) {
        if (interlock == rcpu->rcpu_mtx)
            rumpuser_mutex_exit(rcpu->rcpu_mtx);
        SCHED_FASTPATH(rcpu);
        /* jones, you're the man */
        goto fastlane;
    }

    /*
     * Else, it's the slowpath for us.  First, determine if we
     * can migrate.
     */
    if (ncpu == 1)
        domigrate = false;
    else
        domigrate = true;

    /* Take lock.  This acts as a load barrier too. */
    if (interlock != rcpu->rcpu_mtx)
        rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);

    for (;;) {
        SCHED_SLOWPATH(rcpu);
        old = atomic_swap_ptr(&rcpu->rcpu_prevlwp, RCPULWP_WANTED);

        /* CPU is free? */
        if (old != RCPULWP_BUSY && old != RCPULWP_WANTED) {
            if (atomic_cas_ptr(&rcpu->rcpu_prevlwp,
                               RCPULWP_WANTED, RCPULWP_BUSY) == RCPULWP_WANTED) {
                break;
            }
        }

        /*
         * Do we want to migrate once?
         * This may need a slightly better algorithm, or we
         * might cache pingpong eternally for non-frequent
         * threads.
         */
        if (domigrate && !bound) {
            domigrate = false;
            SCHED_MIGRATED(rcpu);
            rumpuser_mutex_exit(rcpu->rcpu_mtx);
            rcpu = getnextcpu();
            rumpuser_mutex_enter_nowrap(rcpu->rcpu_mtx);
            continue;
        }

        /* Want CPU, wait until it's released an retry */
        rcpu->rcpu_wanted++;
        rumpuser_cv_wait_nowrap(rcpu->rcpu_cv, rcpu->rcpu_mtx);
        rcpu->rcpu_wanted--;
    }
    rumpuser_mutex_exit(rcpu->rcpu_mtx);

fastlane:
    ci = rcpu->rcpu_ci;
    l->l_cpu = l->l_target_cpu = ci;
    l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
    l->l_ncsw++;
    l->l_stat = LSONPROC;

    /*
     * No interrupts, so ci_curlwp === cpu_onproc.
     * Okay, we could make an attempt to not set cpu_onproc
     * in the case that an interrupt is scheduled immediately
     * after a user proc, but leave that for later.
     */
    ci->ci_curlwp = ci->ci_data.cpu_onproc = l;
}