Пример #1
0
/*
 * rump_schedule: ensure that the calling host thread has a valid lwp context.
 * ie. ensure that curlwp != NULL.  Also, ensure that there
 * a 1:1 mapping between the lwp and rump kernel cpu.
 */
void
rump_schedule()
{
    struct lwp *l;

    /*
     * If there is no dedicated lwp, allocate a temp one and
     * set it to be free'd upon unschedule().  Use lwp0 context
     * for reserving the necessary resources.  Don't optimize
     * for this case -- anyone who cares about performance will
     * start a real thread.
     */
    if (__predict_true((l = curlwp) != NULL)) {
        rump_schedule_cpu(l);
        LWP_CACHE_CREDS(l, l->l_proc);
    } else {
        lwp0busy();

        /* schedule cpu and use lwp0 */
        rump_schedule_cpu(&lwp0);
        rump_lwproc_curlwp_set(&lwp0);

        /* allocate thread, switch to it, and release lwp0 */
        l = rump__lwproc_alloclwp(initproc);
        rump_lwproc_switch(l);
        lwp0rele();

        /*
         * mark new thread dead-on-unschedule.  this
         * means that we'll be running with l_refcnt == 0.
         * relax, it's fine.
         */
        rump_lwproc_releaselwp();
    }
}
Пример #2
0
void
_kernel_lock(int nlocks)
{
	struct lwp *l = curlwp;

	while (nlocks) {
		if (giantowner == l) {
			giantcnt += nlocks;
			nlocks = 0;
			ev_biglock_recurse.ev_count++;
		} else {
			if (rumpuser_mutex_tryenter(rump_giantlock) != 0) {
				rump_unschedule_cpu1(l, NULL);
				rumpuser_mutex_enter_nowrap(rump_giantlock);
				rump_schedule_cpu(l);
				ev_biglock_slow.ev_count++;
			} else {
				ev_biglock_fast.ev_count++;
			}
			giantowner = l;
			giantcnt = 1;
			nlocks--;
		}
	}
}
Пример #3
0
/* Give up and retake CPU (perhaps a different one) */
void
yield()
{
    struct lwp *l = curlwp;
    int nlocks;

    KERNEL_UNLOCK_ALL(l, &nlocks);
    rump_unschedule_cpu(l);
    rump_schedule_cpu(l);
    KERNEL_LOCK(nlocks, l);
}
Пример #4
0
void
rump_unschedule()
{
    struct lwp *l = curlwp;
#ifdef DIAGNOSTIC
    int nlock;

    KERNEL_UNLOCK_ALL(l, &nlock);
    KASSERT(nlock == 0);
#endif

    KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);
    rump_unschedule_cpu(l);
    l->l_mutex = &unruntime_lock;
    l->l_stat = LSSTOP;

    /*
     * Check special conditions:
     *  1) do we need to free the lwp which just unscheduled?
     *     (locking order: lwp0, cpu)
     *  2) do we want to clear curlwp for the current host thread
     */
    if (__predict_false(l->l_flag & LW_WEXIT)) {
        lwp0busy();

        /* Now that we have lwp0, we can schedule a CPU again */
        rump_schedule_cpu(l);

        /* switch to lwp0.  this frees the old thread */
        KASSERT(l->l_flag & LW_WEXIT);
        rump_lwproc_switch(&lwp0);

        /* release lwp0 */
        rump_unschedule_cpu(&lwp0);
        lwp0.l_mutex = &unruntime_lock;
        lwp0.l_pflag &= ~LP_RUNNING;
        lwp0rele();
        rump_lwproc_curlwp_clear(&lwp0);

    } else if (__predict_false(l->l_flag & LW_RUMP_CLEAR)) {
        rump_lwproc_curlwp_clear(l);
        l->l_flag &= ~LW_RUMP_CLEAR;
    }
}