Exemplo n.º 1
0
void
rump_scheduler_init(int numcpu)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    int i;

    rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
    rumpuser_cv_init(&lwp0cv);
    for (i = 0; i < numcpu; i++) {
        if (i == 0) {
            ci = &rump_bootcpu;
        } else {
            ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
            ci->ci_index = i;
        }

        rcpu = &rcpu_storage[i];
        rcpu->rcpu_ci = ci;
        rcpu->rcpu_wanted = 0;
        rumpuser_cv_init(&rcpu->rcpu_cv);
        rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);

        ci->ci_schedstate.spc_mutex =
            mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
        ci->ci_schedstate.spc_flags = SPCF_RUNNING;
    }

    mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
}
Exemplo n.º 2
0
void
rw_init(krwlock_t *rw)
{
	struct uprw *uprw;

	CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
	checkncpu();

	uprw = rump_hypermalloc(sizeof(*uprw), 0, true, "rwinit");
	memset(uprw, 0, sizeof(*uprw));
	rumpuser_cv_init(&uprw->uprw_rucv_reader);
	rumpuser_cv_init(&uprw->uprw_rucv_writer);
	memcpy(rw, &uprw, sizeof(void *));
}
Exemplo n.º 3
0
void
cv_init(kcondvar_t *cv, const char *msg)
{

	CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));

	rumpuser_cv_init((struct rumpuser_cv **)cv);
}
Exemplo n.º 4
0
void
rump_scheduler_init(int numcpu)
{
	struct rumpcpu *rcpu;
	struct cpu_info *ci;
	int i;

	rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
	rumpuser_cv_init(&lwp0cv);
	for (i = 0; i < numcpu; i++) {
		rcpu = &rcpu_storage[i];
		ci = &rump_cpus[i];
		rcpu->rcpu_ci = ci;
		ci->ci_schedstate.spc_mutex =
		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
		ci->ci_schedstate.spc_flags = SPCF_RUNNING;
		rcpu->rcpu_wanted = 0;
		rumpuser_cv_init(&rcpu->rcpu_cv);
		rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);
	}

	mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
}
Exemplo n.º 5
0
int
rumpuser_init(int version, const struct rumpuser_hyperup *hyp)
{

	if (version != RUMPHYPER_MYVERSION) {
		printk("Unsupported hypercall versions requested, %d vs %d\n",
		    version, RUMPHYPER_MYVERSION);
		return 1;
	}

	rumpuser__hyp = *hyp;

	rumpuser_mutex_init(&bio_mtx, RUMPUSER_MTX_SPIN);
	rumpuser_cv_init(&bio_cv);

	return 0;
}
Exemplo n.º 6
0
void
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
{
	struct upmtx *upm;

	CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
	checkncpu();

	/*
	 * In uniprocessor locking we don't need to differentiate
	 * between spin mutexes and adaptive ones.  We could
	 * replace mutex_enter() with a NOP for spin mutexes, but
	 * not bothering with that for now.
	 */

	/*
	 * XXX: pool_cache would be nice, but not easily possible,
	 * as pool cache init wants to call mutex_init() ...
	 */
	upm = rump_hypermalloc(sizeof(*upm), 0, true, "mutex_init");
	memset(upm, 0, sizeof(*upm));
	rumpuser_cv_init(&upm->upm_rucv);
	memcpy(mtx, &upm, sizeof(void *));
}
Exemplo n.º 7
0
void
cv_init(kcondvar_t *cv, const char *msg)
{

	rumpuser_cv_init((struct rumpuser_cv **)__UNCONST(&cv->cv_wmesg));
}