Пример #1
0
static int
acpi_timer_test(void)
{
    uint32_t	last, this;
    int		min, max, max2, n, delta;
    register_t	s;

    min = INT32_MAX;
    max = max2 = 0;

    /* Test the timer with interrupts disabled to get accurate results. */
#if defined(__i386__)
    s = read_eflags();
#elif defined(__x86_64__)
    s = read_rflags();
#else
#error "no read_eflags"
#endif
    cpu_disable_intr();
    AcpiGetTimer(&last);
    for (n = 0; n < 2000; n++) {
	AcpiGetTimer(&this);
	delta = acpi_TimerDelta(this, last);
	if (delta > max) {
	    max2 = max;
	    max = delta;
	} else if (delta > max2) {
	    max2 = delta;
	}
	if (delta < min)
	    min = delta;
	last = this;
    }
#if defined(__i386__)
    write_eflags(s);
#elif defined(__x86_64__)
    write_rflags(s);
#else
#error "no read_eflags"
#endif

    delta = max2 - min;
    if ((max - min > 8 || delta > 3) && vmm_guest == VMM_GUEST_NONE)
	n = 0;
    else if (min < 0 || max == 0 || max2 == 0)
	n = 0;
    else
	n = 1;
    if (bootverbose) {
	kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n",
		n ? "GOOD" : "BAD ",
		min, max, max - min);
    }

    return (n);
}
Пример #2
0
static void
ioapic_abi_intr_teardown(int intr)
{
	const struct ioapic_irqmap *map;
	int vector, select;
	uint32_t value;
	register_t ef;

	KASSERT(intr >= 0 && intr < IOAPIC_HWI_VECTORS,
	    ("ioapic teardown, invalid irq %d\n", intr));

	map = &ioapic_irqmaps[mycpuid][intr];
	KASSERT(IOAPIC_IMT_ISHWI(map),
	    ("ioapic teardown, not hwi irq %d, type %d, cpu%d",
	     intr, map->im_type, mycpuid));
	if (map->im_type != IOAPIC_IMT_LEGACY)
		return;

	KASSERT(ioapic_irqs[intr].io_addr != NULL,
	    ("ioapic teardown, no GSI information, irq %d\n", intr));

	ef = read_rflags();
	cpu_disable_intr();

	/*
	 * Teardown an interrupt vector.  The vector should already be
	 * installed in the cpu's IDT, but make sure.
	 */
	IOAPIC_INTRDIS(intr);

	vector = IDT_OFFSET + intr;

	/*
	 * In order to avoid losing an EOI for a level interrupt, which
	 * is vector based, make sure that the IO APIC is programmed for
	 * edge-triggering first, then reprogrammed with the new vector.
	 * This should clear the IRR bit.
	 */
	imen_lock();

	select = ioapic_irqs[intr].io_idx;
	value = ioapic_read(ioapic_irqs[intr].io_addr, select);

	ioapic_write(ioapic_irqs[intr].io_addr, select,
	    (value & ~APIC_TRIGMOD_MASK));
	ioapic_write(ioapic_irqs[intr].io_addr, select,
	    (value & ~IOART_INTVEC) | vector);

	imen_unlock();

	write_rflags(ef);
}
Пример #3
0
static int
acpi_timer_test(void)
{
    uint32_t	last, this;
    int		min, max, n, delta;
    register_t	s;

    min = 10000000;
    max = 0;

    /* Test the timer with interrupts disabled to get accurate results. */
#if defined(__i386__)
    s = read_eflags();
#elif defined(__x86_64__)
    s = read_rflags();
#else
#error "no read_eflags"
#endif
    cpu_disable_intr();
    last = acpi_timer_read();
    for (n = 0; n < 2000; n++) {
	this = acpi_timer_read();
	delta = acpi_TimerDelta(this, last);
	if (delta > max)
	    max = delta;
	else if (delta < min)
	    min = delta;
	last = this;
    }
#if defined(__i386__)
    write_eflags(s);
#elif defined(__x86_64__)
    write_rflags(s);
#else
#error "no read_eflags"
#endif

    if (max - min > 2)
	n = 0;
    else if (min < 0 || max == 0)
	n = 0;
    else
	n = 1;
    if (bootverbose) {
	kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n",
		n ? "GOOD" : "BAD ",
		min, max, max - min);
    }

    return (n);
}
Пример #4
0
static void
ioapic_abi_intr_setup(int intr, int flags)
{
	const struct ioapic_irqmap *map;
	int vector, select;
	uint32_t value;
	register_t ef;

	KASSERT(intr >= 0 && intr < IOAPIC_HWI_VECTORS,
	    ("ioapic setup, invalid irq %d", intr));

	map = &ioapic_irqmaps[mycpuid][intr];
	KASSERT(IOAPIC_IMT_ISHWI(map),
	    ("ioapic setup, not hwi irq %d, type %d, cpu%d",
	     intr, map->im_type, mycpuid));
	if (map->im_type != IOAPIC_IMT_LEGACY)
		return;

	KASSERT(ioapic_irqs[intr].io_addr != NULL,
	    ("ioapic setup, no GSI information, irq %d", intr));

	ef = read_rflags();
	cpu_disable_intr();

	vector = IDT_OFFSET + intr;

	/*
	 * Now reprogram the vector in the IO APIC.  In order to avoid
	 * losing an EOI for a level interrupt, which is vector based,
	 * make sure that the IO APIC is programmed for edge-triggering
	 * first, then reprogrammed with the new vector.  This should
	 * clear the IRR bit.
	 */
	imen_lock();

	select = ioapic_irqs[intr].io_idx;
	value = ioapic_read(ioapic_irqs[intr].io_addr, select);
	value |= IOART_INTMSET;

	ioapic_write(ioapic_irqs[intr].io_addr, select,
	    (value & ~APIC_TRIGMOD_MASK));
	ioapic_write(ioapic_irqs[intr].io_addr, select,
	    (value & ~IOART_INTVEC) | vector);

	imen_unlock();

	IOAPIC_INTREN(intr);

	write_rflags(ef);
}
Пример #5
0
void
icu_definit(void)
{
	register_t ef;

	KKASSERT(MachIntrABI.type == MACHINTR_ICU);

	ef = read_rflags();
	cpu_disable_intr();

	/* Leave interrupts masked */
	outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
	outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);

	MachIntrABI.setdefault();
	icu_init();

	write_rflags(ef);
}
Пример #6
0
void
icu_reinit_noioapic(void)
{
	register_t ef;

	KKASSERT(MachIntrABI.type == MACHINTR_ICU);
	KKASSERT(ioapic_enable == 0);

	crit_enter();
	ef = read_rflags();
	cpu_disable_intr();

	/* Leave interrupts masked */
	outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
	outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);

	icu_init();
	MachIntrABI.stabilize();

	write_rflags(ef);

	MachIntrABI.cleanup();
	crit_exit();
}
Пример #7
0
/*
 * API function - invalidate the pte at (va) and replace *ptep with npte
 * atomically only if *ptep equals opte, across the pmap's active cpus.
 *
 * Returns 1 on success, 0 on failure (caller typically retries).
 */
int
pmap_inval_smp_cmpset(pmap_t pmap, vm_offset_t va, pt_entry_t *ptep,
                      pt_entry_t opte, pt_entry_t npte)
{
    globaldata_t gd = mycpu;
    pmap_inval_info_t *info;
    int success;
    int cpu = gd->gd_cpuid;
    cpumask_t tmpmask;
    unsigned long rflags;

    /*
     * Initialize invalidation for pmap and enter critical section.
     */
    if (pmap == NULL)
        pmap = &kernel_pmap;
    pmap_inval_init(pmap);

    /*
     * Shortcut single-cpu case if possible.
     */
    if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) {
        if (atomic_cmpset_long(ptep, opte, npte)) {
            if (va == (vm_offset_t)-1)
                cpu_invltlb();
            else
                cpu_invlpg((void *)va);
            pmap_inval_done(pmap);
            return 1;
        } else {
            pmap_inval_done(pmap);
            return 0;
        }
    }

    /*
     * We need a critical section to prevent getting preempted while
     * we setup our command.  A preemption might execute its own
     * pmap_inval*() command and create confusion below.
     */
    info = &invinfo[cpu];
    info->tsc_target = rdtsc() + (tsc_frequency * LOOPRECOVER_TIMEOUT1);

    /*
     * We must wait for other cpus which may still be finishing
     * up a prior operation.
     */
    while (CPUMASK_TESTNZERO(info->done)) {
#ifdef LOOPRECOVER
        if (loopwdog(info)) {
            info->failed = 1;
            loopdebug("B", info);
            /* XXX recover from possible bug */
            CPUMASK_ASSZERO(info->done);
        }
#endif
        cpu_pause();
    }
    KKASSERT(info->mode == INVDONE);

    /*
     * Must set our cpu in the invalidation scan mask before
     * any possibility of [partial] execution (remember, XINVLTLB
     * can interrupt a critical section).
     */
    ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu);

    info->va = va;
    info->npgs = 1;			/* unused */
    info->ptep = ptep;
    info->npte = npte;
    info->opte = opte;
#ifdef LOOPRECOVER
    info->failed = 0;
#endif
    info->mode = INVCMPSET;
    info->success = 0;

    tmpmask = pmap->pm_active;	/* volatile */
    cpu_ccfence();
    CPUMASK_ANDMASK(tmpmask, smp_active_mask);
    CPUMASK_ORBIT(tmpmask, cpu);
    info->mask = tmpmask;

    /*
     * Command may start executing the moment 'done' is initialized,
     * disable current cpu interrupt to prevent 'done' field from
     * changing (other cpus can't clear done bits until the originating
     * cpu clears its mask bit).
     */
#ifdef LOOPRECOVER
    info->sigmask = tmpmask;
    CHECKSIGMASK(info);
#endif
    cpu_sfence();
    rflags = read_rflags();
    cpu_disable_intr();

    ATOMIC_CPUMASK_COPY(info->done, tmpmask);

    /*
     * Pass our copy of the done bits (so they don't change out from
     * under us) to generate the Xinvltlb interrupt on the targets.
     */
    smp_invlpg(&tmpmask);
    success = info->success;
    KKASSERT(info->mode == INVDONE);

    ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu);
    write_rflags(rflags);
    pmap_inval_done(pmap);

    return success;
}
Пример #8
0
/*
 * Invalidate the specified va across all cpus associated with the pmap.
 * If va == (vm_offset_t)-1, we invltlb() instead of invlpg().  The operation
 * will be done fully synchronously with storing npte into *ptep and returning
 * opte.
 *
 * If ptep is NULL the operation will execute semi-synchronously.
 * ptep must be NULL if npgs > 1
 */
pt_entry_t
pmap_inval_smp(pmap_t pmap, vm_offset_t va, int npgs,
               pt_entry_t *ptep, pt_entry_t npte)
{
    globaldata_t gd = mycpu;
    pmap_inval_info_t *info;
    pt_entry_t opte = 0;
    int cpu = gd->gd_cpuid;
    cpumask_t tmpmask;
    unsigned long rflags;

    /*
     * Initialize invalidation for pmap and enter critical section.
     */
    if (pmap == NULL)
        pmap = &kernel_pmap;
    pmap_inval_init(pmap);

    /*
     * Shortcut single-cpu case if possible.
     */
    if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) {
        /*
         * Convert to invltlb if there are too many pages to
         * invlpg on.
         */
        if (npgs > MAX_INVAL_PAGES) {
            npgs = 0;
            va = (vm_offset_t)-1;
        }

        /*
         * Invalidate the specified pages, handle invltlb if requested.
         */
        while (npgs) {
            --npgs;
            if (ptep) {
                opte = atomic_swap_long(ptep, npte);
                ++ptep;
            }
            if (va == (vm_offset_t)-1)
                break;
            cpu_invlpg((void *)va);
            va += PAGE_SIZE;
        }
        if (va == (vm_offset_t)-1)
            cpu_invltlb();
        pmap_inval_done(pmap);

        return opte;
    }

    /*
     * We need a critical section to prevent getting preempted while
     * we setup our command.  A preemption might execute its own
     * pmap_inval*() command and create confusion below.
     *
     * tsc_target is our watchdog timeout that will attempt to recover
     * from a lost IPI.  Set to 1/16 second for now.
     */
    info = &invinfo[cpu];
    info->tsc_target = rdtsc() + (tsc_frequency * LOOPRECOVER_TIMEOUT1);

    /*
     * We must wait for other cpus which may still be finishing up a
     * prior operation that we requested.
     *
     * We do not have to disable interrupts here.  An Xinvltlb can occur
     * at any time (even within a critical section), but it will not
     * act on our command until we set our done bits.
     */
    while (CPUMASK_TESTNZERO(info->done)) {
#ifdef LOOPRECOVER
        if (loopwdog(info)) {
            info->failed = 1;
            loopdebug("A", info);
            /* XXX recover from possible bug */
            CPUMASK_ASSZERO(info->done);
        }
#endif
        cpu_pause();
    }
    KKASSERT(info->mode == INVDONE);

    /*
     * Must set our cpu in the invalidation scan mask before
     * any possibility of [partial] execution (remember, XINVLTLB
     * can interrupt a critical section).
     */
    ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu);

    info->va = va;
    info->npgs = npgs;
    info->ptep = ptep;
    info->npte = npte;
    info->opte = 0;
#ifdef LOOPRECOVER
    info->failed = 0;
#endif
    info->mode = INVSTORE;

    tmpmask = pmap->pm_active;	/* volatile (bits may be cleared) */
    cpu_ccfence();
    CPUMASK_ANDMASK(tmpmask, smp_active_mask);

    /*
     * If ptep is NULL the operation can be semi-synchronous, which means
     * we can improve performance by flagging and removing idle cpus
     * (see the idleinvlclr function in mp_machdep.c).
     *
     * Typically kernel page table operation is semi-synchronous.
     */
    if (ptep == NULL)
        smp_smurf_idleinvlclr(&tmpmask);
    CPUMASK_ORBIT(tmpmask, cpu);
    info->mask = tmpmask;

    /*
     * Command may start executing the moment 'done' is initialized,
     * disable current cpu interrupt to prevent 'done' field from
     * changing (other cpus can't clear done bits until the originating
     * cpu clears its mask bit, but other cpus CAN start clearing their
     * mask bits).
     */
#ifdef LOOPRECOVER
    info->sigmask = tmpmask;
    CHECKSIGMASK(info);
#endif
    cpu_sfence();
    rflags = read_rflags();
    cpu_disable_intr();

    ATOMIC_CPUMASK_COPY(info->done, tmpmask);
    /* execution can begin here due to races */

    /*
     * Pass our copy of the done bits (so they don't change out from
     * under us) to generate the Xinvltlb interrupt on the targets.
     */
    smp_invlpg(&tmpmask);
    opte = info->opte;
    KKASSERT(info->mode == INVDONE);

    /*
     * Target cpus will be in their loop exiting concurrently with our
     * cleanup.  They will not lose the bitmask they obtained before so
     * we can safely clear this bit.
     */
    ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu);
    write_rflags(rflags);
    pmap_inval_done(pmap);

    return opte;
}