Example #1
0
/*
 * Process pending interrupts
 */
void
splz(void)
{
	struct mdglobaldata *gd = mdcpu;
	thread_t td = gd->mi.gd_curthread;
	int irq;

	while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) {
		crit_enter_quick(td);
		if (gd->mi.gd_reqflags & RQF_IPIQ) {
			atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ);
			lwkt_process_ipiq();
		}
		if (gd->mi.gd_reqflags & RQF_INTPEND) {
			atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND);
			while ((irq = ffs(gd->gd_spending)) != 0) {
				--irq;
				atomic_clear_int(&gd->gd_spending, 1 << irq);
				irq += FIRST_SOFTINT;
				sched_ithd_soft(irq);
			}
			while ((irq = ffs(gd->gd_fpending)) != 0) {
				--irq;
				atomic_clear_int(&gd->gd_fpending, 1 << irq);
				sched_ithd_hard_virtual(irq);
			}
		}
		crit_exit_noyield(td);
	}
}
Example #2
0
/*
 * Add a (pmap, va) pair to the invalidation list and protect access
 * as appropriate.
 *
 * CPUMASK_LOCK is used to interlock thread switchins, otherwise another
 * cpu can switch in a pmap that we are unaware of and interfere with our
 * pte operation.
 */
void
pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
{
    cpumask_t oactive;
#ifdef SMP
    cpumask_t nactive;

    DEBUG_PUSH_INFO("pmap_inval_interlock");
    for (;;) {
	oactive = pmap->pm_active;
	cpu_ccfence();
	nactive = oactive | CPUMASK_LOCK;
	if ((oactive & CPUMASK_LOCK) == 0 &&
	    atomic_cmpset_cpumask(&pmap->pm_active, oactive, nactive)) {
		break;
	}
	lwkt_process_ipiq();
	cpu_pause();
    }
    DEBUG_POP_INFO();
#else
    oactive = pmap->pm_active & ~CPUMASK_LOCK;
#endif
    KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0);
    info->pir_va = va;
    info->pir_flags = PIRF_CPUSYNC;
    lwkt_cpusync_init(&info->pir_cpusync, oactive, pmap_inval_callback, info);
    lwkt_cpusync_interlock(&info->pir_cpusync);
}
Example #3
0
/*
 * IPIs are 'fast' interrupts, so we deal with them directly from our
 * signal handler.
 *
 * WARNING: Signals are not physically disabled here so we have to enter
 * our critical section before bumping gd_intr_nesting_level or another
 * interrupt can come along and get really confused.
 */
static
void
ipisig(int nada, siginfo_t *info, void *ctxp)
{
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;

	if (td->td_critcount == 0) {
		++td->td_critcount;
		++gd->gd_intr_nesting_level;
		atomic_swap_int(&gd->gd_npoll, 0);
		lwkt_process_ipiq();
		--gd->gd_intr_nesting_level;
		--td->td_critcount;
	} else {
		need_ipiq();
	}
}
Example #4
0
/*
 * IPIs are 'fast' interrupts, so we deal with them directly from our
 * signal handler.
 *
 * WARNING: Signals are not physically disabled here so we have to enter
 * our critical section before bumping gd_intr_nesting_level or another
 * interrupt can come along and get really confused.
 */
static
void
ipisig(int nada, siginfo_t *info, void *ctxp)
{
	globaldata_t gd = mycpu;
	thread_t td = gd->gd_curthread;
	int save;

	save = errno;
	if (td->td_critcount == 0) {
		crit_enter_raw(td);
		++gd->gd_cnt.v_ipi;
		++gd->gd_intr_nesting_level;
		atomic_swap_int(&gd->gd_npoll, 0);
		lwkt_process_ipiq();
		--gd->gd_intr_nesting_level;
		crit_exit_raw(td);
	} else {
		need_ipiq();
	}
	errno = save;
}
/*
 * Add a (pmap, va) pair to the invalidation list and protect access
 * as appropriate.
 *
 * CPULOCK_EXCL is used to interlock thread switchins
 */
void
pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
{
    cpulock_t olock;
    cpulock_t nlock;

    DEBUG_PUSH_INFO("pmap_inval_interlock");
    for (;;) {
	olock = pmap->pm_active_lock & ~CPULOCK_EXCL;
	nlock = olock | CPULOCK_EXCL;
	if (atomic_cmpset_int(&pmap->pm_active_lock, olock, nlock))
		break;
	lwkt_process_ipiq();
	cpu_pause();
    }
    DEBUG_POP_INFO();
    KKASSERT((info->pir_flags & PIRF_CPUSYNC) == 0);
    info->pir_va = va;
    info->pir_flags = PIRF_CPUSYNC;
    lwkt_cpusync_init(&info->pir_cpusync, pmap->pm_active,
		      pmap_inval_callback, info);
    lwkt_cpusync_interlock(&info->pir_cpusync);
}
Example #6
0
static void
pmap_inval_init(pmap_t pmap)
{
    cpulock_t olock;
    cpulock_t nlock;

    crit_enter_id("inval");

    if (pmap != &kernel_pmap) {
        for (;;) {
            olock = pmap->pm_active_lock;
            cpu_ccfence();
            nlock = olock | CPULOCK_EXCL;
            if (olock != nlock &&
                    atomic_cmpset_int(&pmap->pm_active_lock,
                                      olock, nlock)) {
                break;
            }
            lwkt_process_ipiq();
            cpu_pause();
        }
        atomic_add_acq_long(&pmap->pm_invgen, 1);
    }
}
Example #7
0
/*
 * Stop a running timer and ensure that any running callout completes before
 * returning.  If the timer is running on another cpu this function may block
 * to interlock against the callout.  If the callout is currently executing
 * or blocked in another thread this function may also block to interlock
 * against the callout.
 *
 * The caller must be careful to avoid deadlocks, either by using
 * callout_init_lk() (which uses the lockmgr lock cancelation feature),
 * by using tokens and dealing with breaks in the serialization, or using
 * the lockmgr lock cancelation feature yourself in the callout callback
 * function.
 *
 * callout_stop() returns non-zero if the callout was pending.
 */
static int
_callout_stop(struct callout *c, int issync)
{
	globaldata_t gd = mycpu;
	globaldata_t tgd;
	softclock_pcpu_t sc;
	int flags;
	int nflags;
	int rc;
	int cpuid;

#ifdef INVARIANTS
        if ((c->c_flags & CALLOUT_DID_INIT) == 0) {
		callout_init(c);
		kprintf(
		    "callout_stop(%p) from %p: callout was not initialized\n",
		    c, ((int **)&c)[-1]);
		print_backtrace(-1);
	}
#endif
	crit_enter_gd(gd);

	/*
	 * Fast path operations:
	 *
	 * If ARMED and owned by our cpu, or not ARMED, and other simple
	 * conditions are met, we can just clear ACTIVE and EXECUTED
	 * and we are done.
	 */
	for (;;) {
		flags = c->c_flags;
		cpu_ccfence();

		cpuid = CALLOUT_FLAGS_TO_CPU(flags);

		/*
		 * Can't handle an armed callout in the fast path if it is
		 * not on the current cpu.  We must atomically increment the
		 * IPI count for the IPI we intend to send and break out of
		 * the fast path to enter the slow path.
		 */
		if (flags & CALLOUT_ARMED) {
			if (gd->gd_cpuid != cpuid) {
				nflags = flags + 1;
				if (atomic_cmpset_int(&c->c_flags,
						      flags, nflags)) {
					/* break to slow path */
					break;
				}
				continue;	/* retry */
			}
		} else {
			cpuid = gd->gd_cpuid;
			KKASSERT((flags & CALLOUT_IPI_MASK) == 0);
			KKASSERT((flags & CALLOUT_PENDING) == 0);
		}

		/*
		 * Process pending IPIs and retry (only if not called from
		 * an IPI).
		 */
		if (flags & CALLOUT_IPI_MASK) {
			lwkt_process_ipiq();
			continue;	/* retry */
		}

		/*
		 * Transition to the stopped state, recover the EXECUTED
		 * status.  If pending we cannot clear ARMED until after
		 * we have removed (c) from the callwheel.
		 *
		 * NOTE: The callout might already not be armed but in this
		 *	 case it should also not be pending.
		 */
		nflags = flags & ~(CALLOUT_ACTIVE |
				   CALLOUT_EXECUTED |
				   CALLOUT_WAITING |
				   CALLOUT_PENDING);

		/* NOTE: IPI_MASK already tested */
		if ((flags & CALLOUT_PENDING) == 0)
			nflags &= ~CALLOUT_ARMED;
		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			/*
			 * Can only remove from callwheel if currently
			 * pending.
			 */
			if (flags & CALLOUT_PENDING) {
				sc = &softclock_pcpu_ary[gd->gd_cpuid];
				if (sc->next == c)
					sc->next = TAILQ_NEXT(c, c_links.tqe);
				TAILQ_REMOVE(
					&sc->callwheel[c->c_time & cwheelmask],
					c,
					c_links.tqe);
				c->c_func = NULL;

				/*
				 * NOTE: Can't clear ARMED until we have
				 *	 physically removed (c) from the
				 *	 callwheel.
				 *
				 * NOTE: WAITING bit race exists when doing
				 *	 unconditional bit clears.
				 */
				callout_maybe_clear_armed(c);
				if (c->c_flags & CALLOUT_WAITING)
					flags |= CALLOUT_WAITING;
			}

			/*
			 * ARMED has been cleared at this point and (c)
			 * might now be stale.  Only good for wakeup()s.
			 */
			if (flags & CALLOUT_WAITING)
				wakeup(c);

			goto skip_slow;
		}
		/* retry */
	}

	/*
	 * Slow path (and not called via an IPI).
	 *
	 * When ARMED to a different cpu the stop must be processed on that
	 * cpu.  Issue the IPI and wait for completion.  We have already
	 * incremented the IPI count.
	 */
	tgd = globaldata_find(cpuid);
	lwkt_send_ipiq3(tgd, callout_stop_ipi, c, issync);

	for (;;) {
		int flags;
		int nflags;

		flags = c->c_flags;
		cpu_ccfence();
		if ((flags & CALLOUT_IPI_MASK) == 0)	/* fast path */
			break;
		nflags = flags | CALLOUT_WAITING;
		tsleep_interlock(c, 0);
		if (atomic_cmpset_int(&c->c_flags, flags, nflags)) {
			tsleep(c, PINTERLOCKED, "cstp1", 0);
		}
	}

skip_slow:

	/*
	 * If (issync) we must also wait for any in-progress callbacks to
	 * complete, unless the stop is being executed from the callback
	 * itself.  The EXECUTED flag is set prior to the callback
	 * being made so our existing flags status already has it.
	 *
	 * If auto-lock mode is being used, this is where we cancel any
	 * blocked lock that is potentially preventing the target cpu
	 * from completing the callback.
	 */
	while (issync) {
		intptr_t *runp;
		intptr_t runco;

		sc = &softclock_pcpu_ary[cpuid];
		if (gd->gd_curthread == &sc->thread)	/* stop from cb */
			break;
		runp = &sc->running;
		runco = *runp;
		cpu_ccfence();
		if ((runco & ~(intptr_t)1) != (intptr_t)c)
			break;
		if (c->c_flags & CALLOUT_AUTOLOCK)
			lockmgr(c->c_lk, LK_CANCEL_BEG);
		tsleep_interlock(c, 0);
		if (atomic_cmpset_long(runp, runco, runco | 1))
			tsleep(c, PINTERLOCKED, "cstp3", 0);
		if (c->c_flags & CALLOUT_AUTOLOCK)
			lockmgr(c->c_lk, LK_CANCEL_END);
	}

	crit_exit_gd(gd);
	rc = (flags & CALLOUT_EXECUTED) != 0;

	return rc;
}
Example #8
0
int
sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
{
    int error = 0;
    cpulock_t olock;
    cpulock_t nlock;
    cpumask_t mask;
    long val;
    struct proc *p = curproc;

    if (p->p_vmm == NULL)
        return ENOSYS;

    crit_enter_id("vmm_inval");

    /*
     * Acquire CPULOCK_EXCL, spin while we wait.  This will prevent
     * any other cpu trying to use related VMMs to wait for us.
     */
    KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0);
    for (;;) {
        olock = p->p_vmm_cpulock & ~CPULOCK_EXCL;
        cpu_ccfence();
        nlock = olock | CPULOCK_EXCL;
        if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock))
            break;
        lwkt_process_ipiq();
        cpu_pause();
    }

    /*
     * Wait for other cpu's to exit VMM mode (for this vkernel).  No
     * new cpus will enter VMM mode while we hold the lock.  New waiters
     * may turn-up though so the wakeup() later on has to be
     * unconditional.
     *
     * We must test on p_vmm_cpulock's counter, not the mask, because
     * VMM entries will set the mask bit unconditionally first
     * (interlocking our IPI below) and then conditionally bump the
     * counter.
     */
    if (olock & CPULOCK_CNTMASK) {
        mask = p->p_vmm_cpumask;
        CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus);
        lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL);
        while (p->p_vmm_cpulock & CPULOCK_CNTMASK) {
            lwkt_process_ipiq();
            cpu_pause();
        }
    }

#ifndef _KERNEL_VIRTUAL
    /*
     * Ensure that any new entries into VMM mode using
     * vmm's managed under this process will issue a
     * INVEPT before resuming.
     */
    atomic_add_acq_long(&p->p_vmspace->vm_pmap.pm_invgen, 1);
#endif

    /*
     * Make the requested modification, wakeup any waiters.
     */
    if (uap->srcaddr) {
        copyin(uap->srcaddr, &val, sizeof(long));
        copyout(&val, uap->dstaddr, sizeof(long));
    }

    /*
     * VMMs on remote cpus will not be re-entered until we
     * clear the lock.
     */
    atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL);
#if 0
    wakeup(&p->p_vmm_cpulock);
#endif

    crit_exit_id("vmm_inval");

    return error;
}