Exemplo n.º 1
0
void
rump_scheduler_init(int numcpu)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    int i;

    rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
    rumpuser_cv_init(&lwp0cv);
    for (i = 0; i < numcpu; i++) {
        if (i == 0) {
            ci = &rump_bootcpu;
        } else {
            ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
            ci->ci_index = i;
        }

        rcpu = &rcpu_storage[i];
        rcpu->rcpu_ci = ci;
        rcpu->rcpu_wanted = 0;
        rumpuser_cv_init(&rcpu->rcpu_cv);
        rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);

        ci->ci_schedstate.spc_mutex =
            mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
        ci->ci_schedstate.spc_flags = SPCF_RUNNING;
    }

    mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
}
Exemplo n.º 2
0
void
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
{
	int ruflags = RUMPUSER_MTX_KMUTEX;
	int isspin;

	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));

	/*
	 * Try to figure out if the caller wanted a spin mutex or
	 * not with this easy set of conditionals.  The difference
	 * between a spin mutex and an adaptive mutex for a rump
	 * kernel is that the hypervisor does not relinquish the
	 * rump kernel CPU context for a spin mutex.  The
	 * hypervisor itself may block even when "spinning".
	 */
	if (type == MUTEX_SPIN) {
		isspin = 1;
	} else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
	    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
	    ipl == IPL_SOFTSERIAL) {
		isspin = 0;
	} else {
		isspin = 1;
	}

	if (isspin)
		ruflags |= RUMPUSER_MTX_SPIN;
	rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
	ALLOCK(mtx, &mutex_lockops);
}
Exemplo n.º 3
0
void
rump_scheduler_init(int numcpu)
{
	struct rumpcpu *rcpu;
	struct cpu_info *ci;
	int i;

	rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
	rumpuser_cv_init(&lwp0cv);
	for (i = 0; i < numcpu; i++) {
		rcpu = &rcpu_storage[i];
		ci = &rump_cpus[i];
		rcpu->rcpu_ci = ci;
		ci->ci_schedstate.spc_mutex =
		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
		ci->ci_schedstate.spc_flags = SPCF_RUNNING;
		rcpu->rcpu_wanted = 0;
		rumpuser_cv_init(&rcpu->rcpu_cv);
		rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);
	}

	mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
}
Exemplo n.º 4
0
int
rumpuser_init(int version, const struct rumpuser_hyperup *hyp)
{

	if (version != RUMPHYPER_MYVERSION) {
		printk("Unsupported hypercall versions requested, %d vs %d\n",
		    version, RUMPHYPER_MYVERSION);
		return 1;
	}

	rumpuser__hyp = *hyp;

	rumpuser_mutex_init(&bio_mtx, RUMPUSER_MTX_SPIN);
	rumpuser_cv_init(&bio_cv);

	return 0;
}
Exemplo n.º 5
0
void
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
{

	rumpuser_mutex_init(&mtx->kmtx_mtx);
}