Beispiel #1
0
/*  Put the processor into a state where MTRRs can be safely set  */
void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
{
    unsigned int cr0;

    /*  Disable interrupts locally  */
    local_irq_save(ctxt->flags);

    if (use_intel() || is_cpu(CYRIX)) {

        /*  Save value of CR4 and clear Page Global Enable (bit 7)  */
        if ( cpu_has_pge ) {
            ctxt->cr4val = read_cr4();
            write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
        }

        /*  Disable and flush caches. Note that wbinvd flushes the TLBs as
            a side-effect  */
        cr0 = read_cr0() | 0x40000000;
        wbinvd();
        write_cr0(cr0);
        wbinvd();

        if (use_intel())
            /*  Save MTRR state */
            rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
        else
            /* Cyrix ARRs - everything else were excluded at the top */
            ctxt->ccr3 = getCx86(CX86_CCR3);
    }
}
Beispiel #2
0
int
k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
{
	u_int64_t reg;
	u_int32_t mtrr;
	int error, d;

	switch (*arg) {
	case MEMRANGE_SET_UPDATE:
		error = k6_mrmake(desc, &mtrr);
		if (error)
			return error;
		for (d = 0; d < sc->mr_ndesc; d++) {
			if (!sc->mr_desc[d].mr_len) {
				sc->mr_desc[d] = *desc;
				goto out;
			}
			if (sc->mr_desc[d].mr_base == desc->mr_base &&
			    sc->mr_desc[d].mr_len == desc->mr_len)
				return EEXIST;
		}

		return ENOSPC;
	case MEMRANGE_SET_REMOVE:
		mtrr = 0;
		for (d = 0; d < sc->mr_ndesc; d++)
			if (sc->mr_desc[d].mr_base == desc->mr_base &&
			    sc->mr_desc[d].mr_len == desc->mr_len) {
				bzero(&sc->mr_desc[d], sizeof(sc->mr_desc[d]));
				goto out;
			}

		return ENOENT;
	default:
		return EOPNOTSUPP;
	}

out:
	
	disable_intr();
	wbinvd();
	reg = rdmsr(UWCCR);
	reg &= ~(0xffffffff << (32 * d));
	reg |= mtrr << (32 * d);
	wrmsr(UWCCR, reg);
	wbinvd();
	enable_intr();

	return 0;
}
Beispiel #3
0
/*  Restore the processor after a set_mtrr_prepare  */
void set_mtrr_done(struct set_mtrr_context *ctxt)
{
    if (use_intel() || is_cpu(CYRIX)) {

        /*  Flush caches and TLBs  */
        wbinvd();

        /*  Restore MTRRdefType  */
        if (use_intel())
            /* Intel (P6) standard MTRRs */
            mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
        else
            /* Cyrix ARRs - everything else was excluded at the top */
            setCx86(CX86_CCR3, ctxt->ccr3);

        /*  Enable caches  */
        write_cr0(read_cr0() & 0xbfffffff);

        /*  Restore value of CR4  */
        if ( cpu_has_pge )
            write_cr4(ctxt->cr4val);
    }
    /*  Re-enable interrupts locally (if enabled previously)  */
    local_irq_restore(ctxt->flags);
}
Beispiel #4
0
/*
 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
 * L2 cache).
 */
static void
init_mendocino(void)
{
#ifdef CPU_PPRO2CELERON
	u_long	eflags;
	u_int64_t	bbl_cr_ctl3;

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);

	/* If the L2 cache is configured, do nothing. */
	if (!(bbl_cr_ctl3 & 1)) {
		bbl_cr_ctl3 = 0x134052bLL;

		/* Set L2 Cache Latency (Default: 5). */
#ifdef	CPU_CELERON_L2_LATENCY
#if CPU_L2_LATENCY > 15
#error invalid CPU_L2_LATENCY.
#endif
		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
#else
		bbl_cr_ctl3 |= 5 << 1;
#endif
		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
	}

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
	write_eflags(eflags);
#endif /* CPU_PPRO2CELERON */
}
Beispiel #5
0
static int smm_load_handlers(void)
{
	/* All range registers are aligned to 4KiB */
	const uint32_t rmask = ~((1 << 12) - 1);
	const struct pattrs *pattrs = pattrs_get();

	/* Initialize global tracking state. */
	relo_attrs.smbase = (uint32_t)smm_region_start();
	relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
	relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask;
	relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;

	/* Install handlers. */
	if (install_relocation_handler(pattrs->num_cpus) < 0) {
		printk(BIOS_ERR, "Unable to install SMM relocation handler.\n");
		return -1;
	}

	if (install_permanent_handler(pattrs->num_cpus) < 0) {
		printk(BIOS_ERR, "Unable to install SMM permanent handler.\n");
		return -1;
	}

	/* Ensure the SMM handlers hit DRAM before performing first SMI. */
	wbinvd();

	return 0;
}
Beispiel #6
0
/*
 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
 * L2 cache).
 */
static void
init_mendocino(void)
{
#ifdef CPU_PPRO2CELERON
	register_t	saveintr;
	u_int64_t	bbl_cr_ctl3;

	saveintr = intr_disable();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);

	/* If the L2 cache is configured, do nothing. */
	if (!(bbl_cr_ctl3 & 1)) {
		bbl_cr_ctl3 = 0x134052bLL;

		/* Set L2 Cache Latency (Default: 5). */
#ifdef	CPU_CELERON_L2_LATENCY
#if CPU_L2_LATENCY > 15
#error invalid CPU_L2_LATENCY.
#endif
		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
#else
		bbl_cr_ctl3 |= 5 << 1;
#endif
		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
	}

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
	intr_restore(saveintr);
#endif /* CPU_PPRO2CELERON */
}
Beispiel #7
0
void
enable_K6_wt_alloc(void)
{
	quad_t	size;
	u_int64_t	whcr;
	u_long	eflags;

	eflags = read_eflags();
	disable_intr();
	wbinvd();

#ifdef CPU_DISABLE_CACHE
	/*
	 * Certain K6-2 box becomes unstable when write allocation is
	 * enabled.
	 */
	/*
	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
	 * All other bits in TR12 have no effect on the processer's operation.
	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
	 * on the AMD-K6.
	 */
	wrmsr(0x0000000e, (u_int64_t)0x0008);
#endif
	/* Don't assume that memory size is aligned with 4M. */
	if (Maxmem > 0)
	  size = ((Maxmem >> 8) + 3) >> 2;
	else
Beispiel #8
0
static void
fbt_resume(void *arg, dtrace_id_t id, void *parg)
{
	fbt_probe_t *fbt = parg;
#if 0
	dtrace_modctl_t *ctl = fbt->fbtp_ctl;
#endif
	u_long psl;
	u_long cr0;

#if 0	/* XXX TBD */
	ASSERT(ctl->nenabled > 0);

	if ((ctl->loadcnt != fbt->fbtp_loadcnt))
		return;
#endif
	/* Disable interrupts. */
	psl = x86_read_psl();
	x86_disable_intr();

	/* Disable write protection in supervisor mode. */
	cr0 = rcr0();
	lcr0(cr0 & ~CR0_WP);

	for (; fbt != NULL; fbt = fbt->fbtp_next)
		*fbt->fbtp_patchpoint = fbt->fbtp_patchval;

	/* Write back and invalidate cache, flush pipelines. */
	wbinvd();
	x86_flush();
	x86_write_psl(psl);

	/* Re-enable write protection. */
	lcr0(cr0);
}
Beispiel #9
0
static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
{
	unsigned long outvalue, invalue;
	unsigned long msrval;
	unsigned long cr0;

	/* we now need to transform best_i to the BVC format, see AMD#23446 */

	/*
	 * The processor doesn't respond to inquiry cycles while changing the
	 * frequency, so we must disable cache.
	 */
	local_irq_disable();
	cr0 = read_cr0();
	write_cr0(cr0 | X86_CR0_CD);
	wbinvd();

	outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);

	msrval = POWERNOW_IOPORT + 0x1;
	wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
	invalue = inl(POWERNOW_IOPORT + 0x8);
	invalue = invalue & 0x1f;
	outvalue = outvalue | invalue;
	outl(outvalue, (POWERNOW_IOPORT + 0x8));
	msrval = POWERNOW_IOPORT + 0x0;
	wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */

	write_cr0(cr0);
	local_irq_enable();
}
Beispiel #10
0
static void smm_install(void)
{
	/* The first CPU running this gets to copy the SMM handler. But not all
	 * of them.
	 */
	if (smm_handler_copied)
		return;
	smm_handler_copied = 1;


	/* if we're resuming from S3, the SMM code is already in place,
	 * so don't copy it again to keep the current SMM state */

	if (!acpi_is_wakeup_s3()) {
		/* enable the SMM memory window */
		pci_write_config8(dev_find_slot(0, PCI_DEVFN(0, 0)), SMRAM,
					D_OPEN | G_SMRAME | C_BASE_SEG);

		/* copy the real SMM handler */
		memcpy((void *)0xa0000, _binary_smm_start,
			_binary_smm_end - _binary_smm_start);
		wbinvd();
	}

	/* close the SMM memory window and enable normal SMM */
	pci_write_config8(dev_find_slot(0, PCI_DEVFN(0, 0)), SMRAM,
			G_SMRAME | C_BASE_SEG);
}
Beispiel #11
0
static void
iv_invlcache(uintptr_t a, uintptr_t b)
{

	wbinvd();
	atomic_add_int(&smp_tlb_wait, 1);
}
Beispiel #12
0
static void smm_install(void)
{
	device_t dev = PCI_DEV(0, 0, 0);
	device_t qpdev = PCI_DEV(QUICKPATH_BUS, 0, 1);
	u32 smm_base = 0xa0000;
	struct ied_header ied = {
		.signature = "INTEL RSVD",
		.size = IED_SIZE,
		.reserved = {0},
	};

	/* The first CPU running this gets to copy the SMM handler. But not all
	 * of them.
	 */
	if (smm_handler_copied)
		return;
	smm_handler_copied = 1;

	/* enable the SMM memory window */
	pci_write_config8(qpdev, QPD0F1_SMRAM, D_OPEN | G_SMRAME | C_BASE_SEG);

#if CONFIG_SMM_TSEG
	smm_base = pci_read_config32(dev, TSEG) & ~1;
#endif

	/* copy the real SMM handler */
	printk(BIOS_DEBUG, "Installing SMM handler to 0x%08x\n", smm_base);
	memcpy((void *)smm_base, &_binary_smm_start,
	       (size_t)(&_binary_smm_end - &_binary_smm_start));

	/* copy the IED header into place */
	if (CONFIG_SMM_TSEG_SIZE > IED_SIZE) {
		/* Top of TSEG region */
		smm_base += CONFIG_SMM_TSEG_SIZE - IED_SIZE;
		printk(BIOS_DEBUG, "Installing IED header to 0x%08x\n",
		       smm_base);
		memcpy((void *)smm_base, &ied, sizeof(ied));
	}
	wbinvd();

	/* close the SMM memory window and enable normal SMM */
	pci_write_config8(qpdev, QPD0F1_SMRAM, G_SMRAME | C_BASE_SEG);
}

void smm_init(void)
{
#if CONFIG_ELOG
	/* Log events from chipset before clearing */
	pch_log_state();
#endif

	/* Put SMM code to 0xa0000 */
	smm_install();

	/* Put relocation code to 0x38000 and relocate SMBASE */
	smm_relocate();

	/* We're done. Make sure SMIs can happen! */
	smi_set_eos();
}
Beispiel #13
0
static void acpi_dead_idle(void)
{
    struct acpi_processor_power *power;
    struct acpi_processor_cx *cx;
    void *mwait_ptr;

    if ( (power = processor_powers[smp_processor_id()]) == NULL )
        goto default_halt;

    if ( (cx = &power->states[power->count-1]) == NULL )
        goto default_halt;

    mwait_ptr = (void *)&mwait_wakeup(smp_processor_id());

    if ( cx->entry_method == ACPI_CSTATE_EM_FFH )
    {
        /*
         * cache must be flashed as the last ops before cpu going into dead,
         * otherwise, cpu may dead with dirty data breaking cache coherency,
         * leading to strange errors.
         */
        wbinvd();

        while ( 1 )
        {
            /*
             * 1. The CLFLUSH is a workaround for erratum AAI65 for
             * the Xeon 7400 series.  
             * 2. The WBINVD is insufficient due to the spurious-wakeup
             * case where we return around the loop.
             * 3. Unlike wbinvd, clflush is a light weight but not serializing 
             * instruction, hence memory fence is necessary to make sure all 
             * load/store visible before flush cache line.
             */
            mb();
            clflush(mwait_ptr);
            __monitor(mwait_ptr, 0, 0);
            mb();
            __mwait(cx->address, 0);
        }
    }

default_halt:
    wbinvd();
    for ( ; ; )
        halt();
}
Beispiel #14
0
/*
 * Cyrix 6x86MX (code-named M2)
 *
 * XXX - What should I do here?  Please let me know.
 */
static void
init_6x86MX(void)
{
	u_long	eflags;
	u_char	ccr3, ccr4;

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	/* Initialize CCR0. */
	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);

	/* Initialize CCR1. */
#ifdef CPU_CYRIX_NO_LOCK
	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
#else
	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
#endif

	/* Initialize CCR2. */
#ifdef CPU_SUSP_HLT
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
#else
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
#endif

	ccr3 = read_cyrix_reg(CCR3);
	write_cyrix_reg(CCR3, CCR3_MAPEN0);

	/* Initialize CCR4. */
	ccr4 = read_cyrix_reg(CCR4);
	ccr4 &= ~CCR4_IOMASK;
#ifdef CPU_IORT
	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
#else
	write_cyrix_reg(CCR4, ccr4 | 7);
#endif

	/* Initialize CCR5. */
#ifdef CPU_WT_ALLOC
	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
#endif

	/* Restore CCR3. */
	write_cyrix_reg(CCR3, ccr3);

	/* Unlock NW bit in CR0. */
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */

	/* Lock NW bit in CR0. */
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);

	write_eflags(eflags);
}
static inline void flush_tce(void* tceaddr)
{
	/*                                       */
	if (cpu_has_clflush)
		clflush(tceaddr);
	else
		wbinvd();
}
/* flush a tce at 'tceaddr' to main memory */
static inline void flush_tce(void* tceaddr)
{
	/* a single tce can't cross a cache line */
	if (cpu_has_clflush)
		clflush(tceaddr);
	else
		wbinvd();
}
Beispiel #17
0
void
x86_64_ipi_halt(struct cpu_info *ci)
{
	SCHED_ASSERT_UNLOCKED();
	KASSERT(!__mp_lock_held(&kernel_lock));
	
	fpusave_cpu(ci, 1);
	disable_intr();
	lapic_disable();
	wbinvd();
	ci->ci_flags &= ~CPUF_RUNNING;
	wbinvd();

	for(;;) {
		__asm volatile("hlt");
	}
}
Beispiel #18
0
static void flush_kernel_map(void *dummy) 
{ 
	/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
	if (boot_cpu_data.x86_model >= 4) 
		wbinvd();
	/* Flush all to work around Errata in early athlons regarding 
	 * large page flushing. 
	 */
	__flush_tlb_all(); 	
}
Beispiel #19
0
static int
fbt_enable(void *arg, dtrace_id_t id, void *parg)
{
	fbt_probe_t *fbt = parg;
#if 0
	dtrace_modctl_t *ctl = fbt->fbtp_ctl;
#endif
	u_long psl;
	u_long cr0;


#if 0	/* XXX TBD */
	ctl->nenabled++;

	/*
	 * Now check that our modctl has the expected load count.  If it
	 * doesn't, this module must have been unloaded and reloaded -- and
	 * we're not going to touch it.
	 */
	if (ctl->loadcnt != fbt->fbtp_loadcnt) {
		if (fbt_verbose) {
			printf("fbt is failing for probe %s "
			    "(module %s reloaded)",
			    fbt->fbtp_name, ctl->filename);
		}

		return;
	}
#endif

	/* Disable interrupts. */
	psl = x86_read_psl();
	x86_disable_intr();

	/* Disable write protection in supervisor mode. */
	cr0 = rcr0();
	lcr0(cr0 & ~CR0_WP);

	for (; fbt != NULL; fbt = fbt->fbtp_next) {
		*fbt->fbtp_patchpoint = fbt->fbtp_patchval;
	}

	/* Write back and invalidate cache, flush pipelines. */
	wbinvd();
	x86_flush();
	x86_write_psl(psl);

	/* Re-enable write protection. */
	lcr0(cr0);

	return 0;
}
Beispiel #20
0
static void smm_install(void)
{
	/* enable the SMM memory window */
	pci_write_config8(dev_find_slot(0, PCI_DEVFN(0, 0)), SMRAM,
				D_OPEN | G_SMRAME | C_BASE_SEG);

	/* copy the real SMM handler */
	memcpy((void *)0xa0000, &_binary_smm_start, (size_t)&_binary_smm_size);
	wbinvd();

	/* close the SMM memory window and enable normal SMM */
	pci_write_config8(dev_find_slot(0, PCI_DEVFN(0, 0)), SMRAM,
			G_SMRAME | C_BASE_SEG);
}
Beispiel #21
0
static void smm_install(void)
{
	/* enable the SMM memory window */
	pci_write_config8(pcidev_on_root(0, 0), SMRAM,
				D_OPEN | G_SMRAME | C_BASE_SEG);

	/* copy the real SMM handler */
	memcpy((void *)0xa0000, _binary_smm_start,
		_binary_smm_end - _binary_smm_start);
	wbinvd();

	/* close the SMM memory window and enable normal SMM */
	pci_write_config8(pcidev_on_root(0, 0), SMRAM,
			G_SMRAME | C_BASE_SEG);
}
Beispiel #22
0
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void)
{
	/* This must be done before dead CPU ack */
	cpu_exit_clear();
	wbinvd();
	mb();
	/* Ack it */
	__get_cpu_var(cpu_state) = CPU_DEAD;

	/*
	 * With physical CPU hotplug, we should halt the cpu
	 */
	local_irq_disable();
	while (1)
		halt();
}
int
kobj_machdep(kobj_t ko, void *base, size_t size, bool load)
{
	uint64_t where;

	if (load) {
		if (cold) {
			wbinvd();
		} else {
			where = xc_broadcast(0, (xcfunc_t)wbinvd, NULL, NULL);
			xc_wait(where);
		}
	}

	return 0;
}
Beispiel #24
0
void smm_init(void)
{
	msr_t msr, syscfg_orig, mtrr_aseg_orig;

	/* Back up MSRs for later restore */
	syscfg_orig = rdmsr(SYSCFG_MSR);
	mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);

	/* MTRR changes don't like an enabled cache */
	disable_cache();

	msr = syscfg_orig;

	/* Allow changes to MTRR extended attributes */
	msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
	/* turn the extended attributes off until we fix
	 * them so A0000 is routed to memory
	 */
	msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
	wrmsr(SYSCFG_MSR, msr);

	/* set DRAM access to 0xa0000 */
	msr.lo = 0x18181818;
	msr.hi = 0x18181818;
	wrmsr(MTRRfix16K_A0000_MSR, msr);

	/* enable the extended features */
	msr = syscfg_orig;
	msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
	msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
	wrmsr(SYSCFG_MSR, msr);

	enable_cache();
	/* copy the real SMM handler */
	memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
	wbinvd();
	disable_cache();

	/* Restore SYSCFG and MTRR */
	wrmsr(SYSCFG_MSR, syscfg_orig);
	wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
	enable_cache();

	/* CPU MSR are set in CPU init */
}
Beispiel #25
0
void smp_startup_processors()
{

#define gdt_idx(x) ((x) >> 3)
    /* the kernel code - kernel_space_exec */
    smp_boot_gdt[gdt_idx(X86_KCS)].set_seg(0, ~0, 0, GDT_DESC_TYPE_CODE);
    /* kernel data - linear_kernel_space */
    smp_boot_gdt[gdt_idx(X86_KDS)].set_seg(0, ~0, 0, GDT_DESC_TYPE_DATA);

#if defined(CONFIG_DEBUG_TRACE_SMP)
    printf("smp_start_processors\n");
#endif
    memcpy((dword_t*)SMP_STARTUP_PAGE, (dword_t*)_start_smp, (dword_t)(_end_smp) - (dword_t)(_start_smp));
    //enter_kdebug("after smp-copy");
    wbinvd();

    for (dword_t cpu = 0; cpu < CONFIG_SMP_MAX_CPU; cpu++) 
    {
	// the boot cpu is not necessarily cpu 0
	if (cpu == get_boot_cpu())
	    continue;

	// start only processor which are in the processor map
	if (!(processor_map & (1 << cpu)))
	    continue;

	send_startup_ipi(cpu);

	// wait for cpu to be online - aprox. 100 * 100us = 
	for (int i=0; i<100; i++) {
	    udelay(100);
	    if (is_cpu_online(cpu)) break;
	}
	
	if (!is_cpu_online(cpu)) {
	    printf("CPU %d failed to start\n", cpu);
	}
	else
	{
	    printf("CPU %d is online\n", cpu);
	}
    }
    //enter_kdebug("startup done");
}
Beispiel #26
0
static int
isa20on(void)
{
	int r;
	ulong o;
	ulong *zp, *mb1p;

	zp = 0;
	mb1p = (ulong *)MB;
	o = *zp;

	*zp = 0x1234;
	*mb1p = 0x8765;
	mb586();
	wbinvd();
	r = *zp != *mb1p;

	*zp = o;
	return r;
}
Beispiel #27
0
static int
isa20on(void)
{
	int r;
	ulong o;
	ulong *zp, *mb1p;

	zp = (ulong *)KZERO;
	mb1p = (ulong *)(KZERO|MB);
	o = *zp;

	*zp = 0x1234;
	*mb1p = 0x8765;
	coherence();
	wbinvd();
	r = *zp != *mb1p;

	*zp = o;
	return r;
}
Beispiel #28
0
void
smm_relocate_and_restore(void)
{
    /* init APM status port */
    outb(0x01, PORT_SMI_STATUS);

    /* raise an SMI interrupt */
    outb(0x00, PORT_SMI_CMD);

    /* wait until SMM code executed */
    while (inb(PORT_SMI_STATUS) != 0x00)
        ;

    /* restore original memory content */
    memcpy((void *)BUILD_SMM_INIT_ADDR, (void *)BUILD_SMM_ADDR, BUILD_SMM_SIZE);

    /* copy the SMM code */
    memcpy((void *)BUILD_SMM_ADDR, &smm_code_start
           , &smm_code_end - &smm_code_start);
    wbinvd();
}
Beispiel #29
0
void
make_bios_readonly_intel(u16 bdf, u32 pam0)
{
    // Flush any pending writes before locking memory.
    wbinvd();

    // Write protect roms from 0xc0000-0xf0000
    int i;
    for (i=0; i<6; i++) {
        u32 mem = BUILD_ROM_START + i * 32*1024;
        u32 pam = pam0 + 1 + i;
        if (RomEnd <= mem + 16*1024) {
            if (RomEnd > mem)
                pci_config_writeb(bdf, pam, 0x31);
            break;
        }
        pci_config_writeb(bdf, pam, 0x11);
    }

    // Write protect 0xf0000-0x100000
    pci_config_writeb(bdf, pam0, 0x10);
}
Beispiel #30
0
static void wbinvd_ipi(void *info)
{
    wbinvd();
}