Exemple #1
0
/*
 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
 * L2 cache).
 */
static void
init_mendocino(void)
{
#ifdef CPU_PPRO2CELERON
	u_long	eflags;
	u_int64_t	bbl_cr_ctl3;

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);

	/* If the L2 cache is configured, do nothing. */
	if (!(bbl_cr_ctl3 & 1)) {
		bbl_cr_ctl3 = 0x134052bLL;

		/* Set L2 Cache Latency (Default: 5). */
#ifdef	CPU_CELERON_L2_LATENCY
#if CPU_L2_LATENCY > 15
#error invalid CPU_L2_LATENCY.
#endif
		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
#else
		bbl_cr_ctl3 |= 5 << 1;
#endif
		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
	}

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
	write_eflags(eflags);
#endif /* CPU_PPRO2CELERON */
}
Exemple #2
0
/*
 * Init the FPU.
 */
void
fpuinit(struct cpu_info *ci)
{
	lcr0(rcr0() & ~(CR0_EM|CR0_TS));
	fninit();
	lcr0(rcr0() | (CR0_TS));
}
Exemple #3
0
/*
 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
 * L2 cache).
 */
static void
init_mendocino(void)
{
#ifdef CPU_PPRO2CELERON
	register_t	saveintr;
	u_int64_t	bbl_cr_ctl3;

	saveintr = intr_disable();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);

	/* If the L2 cache is configured, do nothing. */
	if (!(bbl_cr_ctl3 & 1)) {
		bbl_cr_ctl3 = 0x134052bLL;

		/* Set L2 Cache Latency (Default: 5). */
#ifdef	CPU_CELERON_L2_LATENCY
#if CPU_L2_LATENCY > 15
#error invalid CPU_L2_LATENCY.
#endif
		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
#else
		bbl_cr_ctl3 |= 5 << 1;
#endif
		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
	}

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
	intr_restore(saveintr);
#endif /* CPU_PPRO2CELERON */
}
Exemple #4
0
/*
 * IBM Blue Lightning
 */
static void
init_bluelightning(void)
{
	u_long	eflags;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	need_post_dma_flush = 1;
#endif

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	invd();

#ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
#else
	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
#endif
	/* Enables 13MB and 0-640KB cache. */
	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
#ifdef CPU_BLUELIGHTNING_3X
	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
#else
	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
#endif

	/* Enable caching in CR0. */
	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
	invd();
	write_eflags(eflags);
}
Exemple #5
0
/*
 * Cyrix 6x86MX (code-named M2)
 *
 * XXX - What should I do here?  Please let me know.
 */
static void
init_6x86MX(void)
{
	u_long	eflags;
	u_char	ccr3, ccr4;

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() | CR0_CD | CR0_NW);
	wbinvd();

	/* Initialize CCR0. */
	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);

	/* Initialize CCR1. */
#ifdef CPU_CYRIX_NO_LOCK
	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
#else
	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
#endif

	/* Initialize CCR2. */
#ifdef CPU_SUSP_HLT
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
#else
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
#endif

	ccr3 = read_cyrix_reg(CCR3);
	write_cyrix_reg(CCR3, CCR3_MAPEN0);

	/* Initialize CCR4. */
	ccr4 = read_cyrix_reg(CCR4);
	ccr4 &= ~CCR4_IOMASK;
#ifdef CPU_IORT
	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
#else
	write_cyrix_reg(CCR4, ccr4 | 7);
#endif

	/* Initialize CCR5. */
#ifdef CPU_WT_ALLOC
	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
#endif

	/* Restore CCR3. */
	write_cyrix_reg(CCR3, ccr3);

	/* Unlock NW bit in CR0. */
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */

	/* Lock NW bit in CR0. */
	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);

	write_eflags(eflags);
}
Exemple #6
0
// could consider having an API to allow these to dynamically change
// MTRRs are for physical, static ranges.  PAT are linear, more granular, and 
// more dynamic
void setup_default_mtrrs(barrier_t* smp_barrier)
{
	// disable interrupts
	int8_t state = 0;
	disable_irqsave(&state);
	// barrier - if we're meant to do this for all cores, we'll be 
	// passed a pointer to an initialized barrier
	if (smp_barrier)
		waiton_barrier(smp_barrier);
	
	// disable caching	cr0: set CD and clear NW
	lcr0((rcr0() | CR0_CD) & ~CR0_NW);
	// flush caches
	cache_flush();
	// flush tlb
	tlb_flush_global();
	// disable MTRRs, and sets default type to WB (06)
#ifndef CONFIG_NOMTRRS 
	write_msr(IA32_MTRR_DEF_TYPE, 0x00000006);

	// Now we can actually safely adjust the MTRRs
	// MTRR for IO Holes (note these are 64 bit values we are writing)
	// 0x000a0000 - 0x000c0000 : VGA - WC 0x01
	write_msr(IA32_MTRR_PHYSBASE0, PTE_ADDR(VGAPHYSMEM) | 0x01);
	// if we need to have a full 64bit val, use the UINT64 macro
	write_msr(IA32_MTRR_PHYSMASK0, 0x0000000ffffe0800);
	// 0x000c0000 - 0x00100000 : IO devices (and ROM BIOS) - UC 0x00
	write_msr(IA32_MTRR_PHYSBASE1, PTE_ADDR(DEVPHYSMEM) | 0x00);
	write_msr(IA32_MTRR_PHYSMASK1, 0x0000000ffffc0800);
	// APIC/IOAPIC holes
	/* Going to skip them, since we set their mode using PAT when we 
	 * map them in 
	 */
	// make sure all other MTRR ranges are disabled (should be unnecessary)
	write_msr(IA32_MTRR_PHYSMASK2, 0);
	write_msr(IA32_MTRR_PHYSMASK3, 0);
	write_msr(IA32_MTRR_PHYSMASK4, 0);
	write_msr(IA32_MTRR_PHYSMASK5, 0);
	write_msr(IA32_MTRR_PHYSMASK6, 0);
	write_msr(IA32_MTRR_PHYSMASK7, 0);

	// keeps default type to WB (06), turns MTRRs on, and turns off fixed ranges
	write_msr(IA32_MTRR_DEF_TYPE, 0x00000806);
#endif	
	// reflush caches and TLB
	cache_flush();
	tlb_flush_global();
	// turn on caching
	lcr0(rcr0() & ~(CR0_CD | CR0_NW));
	// barrier
	if (smp_barrier)
		waiton_barrier(smp_barrier);
	// enable interrupts
	enable_irqsave(&state);
}
Exemple #7
0
/*
 * AP cpu's call this to sync up protected mode.
 *
 * WARNING!  We must ensure that the cpu is sufficiently initialized to
 * be able to use to the FP for our optimized bzero/bcopy code before
 * we enter more mainstream C code.
 *
 * WARNING! %fs is not set up on entry.  This routine sets up %fs.
 */
void
init_secondary(void)
{
	int	gsel_tss;
	int	x, myid = bootAP;
	u_int	cr0;
	struct mdglobaldata *md;
	struct privatespace *ps;

	ps = &CPU_prvspace[myid];

	gdt_segs[GPRIV_SEL].ssd_base = (int)ps;
	gdt_segs[GPROC0_SEL].ssd_base =
		(int) &ps->mdglobaldata.gd_common_tss;
	ps->mdglobaldata.mi.gd_prvspace = ps;

	for (x = 0; x < NGDT; x++) {
		ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd);
	}

	r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
	r_gdt.rd_base = (int) &gdt[myid * NGDT];
	lgdt(&r_gdt);			/* does magic intra-segment return */

	lidt(&r_idt);

	lldt(_default_ldt);
	mdcpu->gd_currentldt = _default_ldt;

	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
	gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;

	md = mdcpu;	/* loaded through %fs:0 (mdglobaldata.mi.gd_prvspace)*/

	md->gd_common_tss.tss_esp0 = 0;	/* not used until after switch */
	md->gd_common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
	md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16;
	md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
	md->gd_common_tssd = *md->gd_tss_gdt;
	ltr(gsel_tss);

	/*
	 * Set to a known state:
	 * Set by mpboot.s: CR0_PG, CR0_PE
	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
	 */
	cr0 = rcr0();
	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
	load_cr0(cr0);
	pmap_set_opt();		/* PSE/4MB pages, etc */

	/* set up CPU registers and state */
	cpu_setregs();

	/* set up FPU state on the AP */
	npxinit(__INITIAL_NPXCW__);

	/* set up SSE registers */
	enable_sse();
}
Exemple #8
0
static void
fbt_resume(void *arg, dtrace_id_t id, void *parg)
{
	fbt_probe_t *fbt = parg;
#if 0
	dtrace_modctl_t *ctl = fbt->fbtp_ctl;
#endif
	u_long psl;
	u_long cr0;

#if 0	/* XXX TBD */
	ASSERT(ctl->nenabled > 0);

	if ((ctl->loadcnt != fbt->fbtp_loadcnt))
		return;
#endif
	/* Disable interrupts. */
	psl = x86_read_psl();
	x86_disable_intr();

	/* Disable write protection in supervisor mode. */
	cr0 = rcr0();
	lcr0(cr0 & ~CR0_WP);

	for (; fbt != NULL; fbt = fbt->fbtp_next)
		*fbt->fbtp_patchpoint = fbt->fbtp_patchval;

	/* Write back and invalidate cache, flush pipelines. */
	wbinvd();
	x86_flush();
	x86_write_psl(psl);

	/* Re-enable write protection. */
	lcr0(cr0);
}
Exemple #9
0
/*
 * Cyrix 486S/DX series
 */
static void
init_cy486dx(void)
{
	u_long	eflags;
	u_char	ccr2;

	eflags = read_eflags();
	disable_intr();
	invd();

	ccr2 = read_cyrix_reg(CCR2);
#ifdef CPU_SUSP_HLT
	ccr2 |= CCR2_SUSP_HLT;
#endif

#ifdef PC98
	/* Enables WB cache interface pin and Lock NW bit in CR0. */
	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
	/* Unlock NW bit in CR0. */
	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
#endif

	write_cyrix_reg(CCR2, ccr2);
	write_eflags(eflags);
}
Exemple #10
0
/*
 * Set up proc0's TSS and LDT.
 */
void
x86_64_proc0_tss_ldt_init(void)
{
	struct pcb *pcb;
	int x;

	gdt_init();

	cpu_info_primary.ci_curpcb = pcb = &proc0.p_addr->u_pcb;

	pcb->pcb_flags = 0;
	pcb->pcb_tss.tss_iobase =
	    (u_int16_t)((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss);
	for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
		pcb->pcb_iomap[x] = 0xffffffff;

	pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel =
	    GSYSSEL(GLDT_SEL, SEL_KPL);
	pcb->pcb_cr0 = rcr0();
	pcb->pcb_tss.tss_rsp0 = (u_int64_t)proc0.p_addr + USPACE - 16;
	pcb->pcb_tss.tss_ist[0] = (u_int64_t)proc0.p_addr + PAGE_SIZE;
	proc0.p_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_rsp0 - 1;
	proc0.p_md.md_tss_sel = tss_alloc(pcb);

	ltr(proc0.p_md.md_tss_sel);
	lldt(pcb->pcb_ldt_sel);
}
Exemple #11
0
/*
 * Cyrix 486S/DX series
 */
static void
init_cy486dx(void)
{
	register_t saveintr;
	u_char	ccr2;

	saveintr = intr_disable();
	invd();

	ccr2 = read_cyrix_reg(CCR2);
#ifdef CPU_SUSP_HLT
	ccr2 |= CCR2_SUSP_HLT;
#endif

#ifdef PC98
	/* Enables WB cache interface pin and Lock NW bit in CR0. */
	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
	/* Unlock NW bit in CR0. */
	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
#endif

	write_cyrix_reg(CCR2, ccr2);
	intr_restore(saveintr);
}
Exemple #12
0
// Turn on paging.
void
vmenable(void)
{
  uint cr0;

  switchkvm(); // load kpgdir into cr3
  cr0 = rcr0();
  cr0 |= CR0_PG;
  lcr0(cr0);
}
Exemple #13
0
void
cpu_init(struct cpu_info *ci)
{
	u_int cr4 = 0;

	/* configure the CPU if needed */
	if (ci->cpu_setup != NULL)
		(*ci->cpu_setup)(ci);

	/*
	 * We do this here after identifycpu() because errata may affect
	 * what we do.
	 */
	patinit(ci);
 
	/*
	 * Enable ring 0 write protection (486 or above, but 386
	 * no longer supported).
	 */
	lcr0(rcr0() | CR0_WP);

	if (cpu_feature & CPUID_PGE)
		cr4 |= CR4_PGE;	/* enable global TLB caching */

	if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMEP)
		cr4 |= CR4_SMEP;
#ifndef SMALL_KERNEL
	if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP)
		cr4 |= CR4_SMAP;
	if (ci->ci_feature_sefflags_ecx & SEFF0ECX_UMIP)
		cr4 |= CR4_UMIP;
#endif

	/*
	 * If we have FXSAVE/FXRESTOR, use them.
	 */
	if (cpu_feature & CPUID_FXSR) {
		cr4 |= CR4_OSFXSR;

		/*
		 * If we have SSE/SSE2, enable XMM exceptions.
		 */
		if (cpu_feature & (CPUID_SSE|CPUID_SSE2))
			cr4 |= CR4_OSXMMEXCPT;
	}
	/* no cr4 on most 486s */
	if (cr4 != 0)
		lcr4(rcr4()|cr4);

#ifdef MULTIPROCESSOR
	ci->ci_flags |= CPUF_RUNNING;
	tlbflushg();
#endif
}
Exemple #14
0
/*
 * There are i486 based upgrade products for i386 machines.
 * In this case, BIOS doesn't enables CPU cache.
 */
void
init_i486_on_386(void)
{
	u_long	eflags;

	eflags = read_eflags();
	cpu_disable_intr();

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */

	write_eflags(eflags);
}
Exemple #15
0
static void enable_paging(void)
{
	lcr3(boot_cr3);

	// turn on paging
	uint32_t cr0 = rcr0();
	cr0 |=
	    CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM |
	    CR0_MP;
	cr0 &= ~(CR0_TS | CR0_EM);
	lcr0(cr0);
}
Exemple #16
0
static int
fbt_enable(void *arg, dtrace_id_t id, void *parg)
{
	fbt_probe_t *fbt = parg;
#if 0
	dtrace_modctl_t *ctl = fbt->fbtp_ctl;
#endif
	u_long psl;
	u_long cr0;


#if 0	/* XXX TBD */
	ctl->nenabled++;

	/*
	 * Now check that our modctl has the expected load count.  If it
	 * doesn't, this module must have been unloaded and reloaded -- and
	 * we're not going to touch it.
	 */
	if (ctl->loadcnt != fbt->fbtp_loadcnt) {
		if (fbt_verbose) {
			printf("fbt is failing for probe %s "
			    "(module %s reloaded)",
			    fbt->fbtp_name, ctl->filename);
		}

		return;
	}
#endif

	/* Disable interrupts. */
	psl = x86_read_psl();
	x86_disable_intr();

	/* Disable write protection in supervisor mode. */
	cr0 = rcr0();
	lcr0(cr0 & ~CR0_WP);

	for (; fbt != NULL; fbt = fbt->fbtp_next) {
		*fbt->fbtp_patchpoint = fbt->fbtp_patchval;
	}

	/* Write back and invalidate cache, flush pipelines. */
	wbinvd();
	x86_flush();
	x86_write_psl(psl);

	/* Re-enable write protection. */
	lcr0(cr0);

	return 0;
}
Exemple #17
0
// Enable paging
// Load cr3 and set PE & PG bit in cr0 register
void
enable_paging(void)
{
	uint cr0;
	// install page talbe
	lcr3(boot_cr3);

	// turn on paging
	cr0 = rcr0();
	cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP;
	cr0 &= ~(CR0_TS | CR0_EM);
	lcr0(cr0);
}
Exemple #18
0
void
cpu_setregs(void)
{
#if 0
    unsigned int cr0;

    cr0 = rcr0();
    cr0 |= CR0_NE;			/* Done by npxinit() */
    cr0 |= CR0_MP | CR0_TS;		/* Done at every execve() too. */
    cr0 |= CR0_WP | CR0_AM;
    load_cr0(cr0);
    load_gs(_udatasel);
#endif
}
Exemple #19
0
void
cpu_init(struct cpu_info *ci)
{
	/* configure the CPU if needed */
	if (ci->cpu_setup != NULL)
		(*ci->cpu_setup)(ci);

	lcr0(rcr0() | CR0_WP);
	lcr4(rcr4() | CR4_DEFAULT);

#ifdef MULTIPROCESSOR
	ci->ci_flags |= CPUF_RUNNING;
#endif
}
Exemple #20
0
/*
 * There are i486 based upgrade products for i386 machines.
 * In this case, BIOS doesn't enable CPU cache.
 */
static void
init_i486_on_386(void)
{
	register_t saveintr;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	need_post_dma_flush = 1;
#endif

	saveintr = intr_disable();

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */

	intr_restore(saveintr);
}
Exemple #21
0
/*
 * There are i486 based upgrade products for i386 machines.
 * In this case, BIOS doesn't enables CPU cache.
 */
static void
init_i486_on_386(void)
{
	u_long	eflags;

#if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
	need_post_dma_flush = 1;
#endif

	eflags = read_eflags();
	disable_intr();

	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */

	write_eflags(eflags);
}
Exemple #22
0
static __inline void
via_padlock_cbc(void *cw, void *src, void *dst, void *key, int rep,
    void *iv)
{
	unsigned int creg0;

	creg0 = rcr0();		/* Permit access to SIMD/FPU path */
	lcr0(creg0 & ~(CR0_EM|CR0_TS));

	/* Do the deed */
	__asm __volatile("pushfl; popfl");	/* force key reload */
	__asm __volatile(".byte 0xf3, 0x0f, 0xa7, 0xd0" : /* rep xcrypt-cbc */
			: "a" (iv), "b" (key), "c" (rep), "d" (cw), "S" (src), "D" (dst)
			: "memory", "cc");

	lcr0(creg0);
}
Exemple #23
0
void
npx_pnpbios_attach(struct device *parent, struct device *self, void *aux)
{
	struct npx_softc *sc = (void *)self;
	struct pnpbiosdev_attach_args *aa = aux;
	int irq, ist;

	if (pnpbios_io_map(aa->pbt, aa->resc, 0, &sc->sc_iot, &sc->sc_ioh)) { 	
		printf(": can't map i/o space\n");
		return;
	}

	printf("\n");
	pnpbios_print_devres(self, aa);

	if (pnpbios_getirqnum(aa->pbt, aa->resc, 0, &irq, &ist) != 0) {
		printf("%s: unable to get IRQ number or type\n",
		    sc->sc_dev.dv_xname);
		return;
	}

	sc->sc_type = npxprobe1(sc->sc_iot, sc->sc_ioh, irq);

	switch (sc->sc_type) {
	case NPX_INTERRUPT:
		printf("%s: interrupting at irq %d\n", sc->sc_dev.dv_xname,
		    irq);
		lcr0(rcr0() & ~CR0_NE);
		sc->sc_ih = isa_intr_establish(0/*XXX*/, irq, ist, IPL_NONE,
		    npxintr, NULL);
		break;
	case NPX_EXCEPTION:
		printf("%s: using exception 16\n", sc->sc_dev.dv_xname);
		break;
	case NPX_BROKEN:
		printf("%s: error reporting broken; not using\n",
		    sc->sc_dev.dv_xname);
		sc->sc_type = NPX_NONE;
		return;
	case NPX_NONE:
		panic("npx_pnpbios_attach");
	}

	npxattach(sc);
}
Exemple #24
0
void
vmm_host_state_init(void)
{

	vmm_host_efer = rdmsr(MSR_EFER);
	vmm_host_pat = rdmsr(MSR_PAT);

	/*
	 * We always want CR0.TS to be set when the processor does a VM exit.
	 *
	 * With emulation turned on unconditionally after a VM exit, we are
	 * able to trap inadvertent use of the FPU until the guest FPU state
	 * has been safely squirreled away.
	 */
	vmm_host_cr0 = rcr0() | CR0_TS;

	vmm_host_cr4 = rcr4();
}
Exemple #25
0
void    
x86_64_init_pcb_tss_ldt(struct cpu_info *ci)   
{        
	int x;      
	struct pcb *pcb = ci->ci_idle_pcb;
 
	pcb->pcb_tss.tss_iobase =
	    (u_int16_t)((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss);
	for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
		pcb->pcb_iomap[x] = 0xffffffff;

	/* XXXfvdl pmap_kernel not needed */ 
	pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel =
	    GSYSSEL(GLDT_SEL, SEL_KPL);
	pcb->pcb_cr0 = rcr0();
        
        ci->ci_idle_tss_sel = tss_alloc(pcb);
}       
Exemple #26
0
static void
save_guest_fpustate(struct vcpu *vcpu)
{

	if ((rcr0() & CR0_TS) == 0)
		panic("fpu emulation not enabled in host!");

	/* save guest XCR0 and restore host XCR0 */
	if (rcr4() & CR4_XSAVE) {
		vcpu->guest_xcr0 = rxcr(0);
		load_xcr(0, vmm_get_host_xcr0());
	}

	/* save guest FPU state */
	fpu_stop_emulating();
	fpusave(vcpu->guestfpu);
	fpu_start_emulating();
}
Exemple #27
0
void
cpu_init(struct cpu_info *ci)
{
	/* configure the CPU if needed */
	if (ci->cpu_setup != NULL)
		(*ci->cpu_setup)(ci);
	/*
	 * We do this here after identifycpu() because errata may affect
	 * what we do.
	 */
	patinit(ci);

	lcr0(rcr0() | CR0_WP);
	lcr4(rcr4() | CR4_DEFAULT);

#ifdef MULTIPROCESSOR
	ci->ci_flags |= CPUF_RUNNING;
	tlbflushg();
#endif
}
Exemple #28
0
/*
 * Cyrix 486SLC/DLC/SR/DR series
 */
static void
init_486dlc(void)
{
	u_long	eflags;
	u_char	ccr0;

	eflags = read_eflags();
	disable_intr();
	invd();

	ccr0 = read_cyrix_reg(CCR0);
#ifndef CYRIX_CACHE_WORKS
	ccr0 |= CCR0_NC1 | CCR0_BARB;
	write_cyrix_reg(CCR0, ccr0);
	invd();
#else
	ccr0 &= ~CCR0_NC0;
#ifndef CYRIX_CACHE_REALLY_WORKS
	ccr0 |= CCR0_NC1 | CCR0_BARB;
#else
	ccr0 |= CCR0_NC1;
#endif
#ifdef CPU_DIRECT_MAPPED_CACHE
	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
#endif
	write_cyrix_reg(CCR0, ccr0);

	/* Clear non-cacheable region. */
	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);

	write_cyrix_reg(0, 0);	/* dummy write */

	/* Enable caching in CR0. */
	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
	invd();
#endif /* !CYRIX_CACHE_WORKS */
	write_eflags(eflags);
}
Exemple #29
0
void
init_vmm(void) {
    extern char bstack[];

    // allocate a page directory for the kernel
    kpd = kpalloc();

    boot_map(kpd, (void*) KADDR, 0, npages * PG_SIZE, true, false);
    boot_map(kpd, (void*) (KADDR - BIT(15)), PADDR(bstack), BIT(15), true, false);

    lcr3(PADDR(kpd));

    uint32_t cr0 = rcr0();
    cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_MP;
    cr0 &= ~(CR0_TS | CR0_EM);
    lcr0(cr0);

    mbi = VADDR(mbi);

    isr_install_handler(ISR_PGFLT, isr_pgfault);
}
Exemple #30
0
void
npx_isa_attach(device_t parent, device_t self, void *aux)
{
	struct npx_softc *sc = device_private(self);
	struct isa_attach_args *ia = aux;

	aprint_naive("\n");
	aprint_normal("\n");

	sc->sc_dev = self;
	sc->sc_type = (u_long) ia->ia_aux;

	switch (sc->sc_type) {
	case NPX_INTERRUPT:
		sc->sc_iot = ia->ia_iot;
		if (bus_space_map(sc->sc_iot, 0xf0, 16, 0, &sc->sc_ioh))
			panic("%s: unable to map I/O space", __func__);
		lcr0(rcr0() & ~CR0_NE);
		sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq[0].ir_irq,
		    IST_EDGE, IPL_NONE, (int (*)(void *))npxintr, 0);
		break;
	case NPX_EXCEPTION:
		/*FALLTHROUGH*/
	case NPX_CPUID:
		aprint_verbose_dev(sc->sc_dev, "%s using exception 16\n",
		    sc->sc_type == NPX_CPUID ? "reported by CPUID;" : "");
		sc->sc_type = NPX_EXCEPTION;
		break;
	case NPX_BROKEN:
		aprint_error_dev(sc->sc_dev,
		    "error reporting broken; not using\n");
		sc->sc_type = NPX_NONE;
		return;
	case NPX_NONE:
		return;
	}

	npxattach(sc);
}