void dumpregs(Ureg* ureg) { vlong mca, mct; dumpregs2(ureg); /* * Processor control registers. * If machine check exception, time stamp counter, page size extensions * or enhanced virtual 8086 mode extensions are supported, there is a * CR4. If there is a CR4 and machine check extensions, read the machine * check address and machine check type registers if RDMSR supported. */ iprint(" CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux", getcr0(), getcr2(), getcr3()); if(m->cpuiddx & 0x9A){ iprint(" CR4 %8.8lux", getcr4()); if((m->cpuiddx & 0xA0) == 0xA0){ rdmsr(0x00, &mca); rdmsr(0x01, &mct); iprint("\n MCA %8.8llux MCT %8.8llux", mca, mct); } } iprint("\n ur %#p up %#p\n", ureg, up); }
static void platform_hardware_setup(void) { // Enable CPU caching setcr0(getcr0() & ~(CR0_CD|CR0_NW)); // Make sure legacy DMA isn't running. dma_setup(); // Init base pc hardware. pic_setup(); mathcp_setup(); timer_setup(); clock_setup(); // Platform specific setup qemu_platform_setup(); coreboot_platform_setup(); }
void dumpregs(Ureg* ureg) { dumpregs2(ureg); /* * Processor control registers. * If machine check exception, time stamp counter, page size extensions * or enhanced virtual 8086 mode extensions are supported, there is a * CR4. If there is a CR4 and machine check extensions, read the machine * check address and machine check type registers if RDMSR supported. */ iprint(" CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux", getcr0(), getcr2(), getcr3()); if(m->cpuiddx & (Mce|Tsc|Pse|Vmex)){ iprint(" CR4 %8.8lux\n", getcr4()); if(ureg->trap == 18) dumpmcregs(); } iprint("\n ur %#p up %#p\n", ureg, up); }
// Entry point for Power On Self Test (POST) - the BIOS initilization // phase. This function makes the memory at 0xc0000-0xfffff // read/writable and then calls dopost(). void VISIBLE32FLAT handle_post(void) { debug_serial_setup(); dprintf(1, "Start bios (version %s)\n", VERSION); // Enable CPU caching setcr0(getcr0() & ~(CR0_CD|CR0_NW)); // Clear CMOS reboot flag. outb_cmos(0, CMOS_RESET_CODE); // Make sure legacy DMA isn't running. init_dma(); // Check if we are running under Xen. xen_probe(); // Allow writes to modify bios area (0xf0000) make_bios_writable(); // Now that memory is read/writable - start post process. dopost(); }
void kernelMain(void) { putStr("\nstarting kernel\n"); initGdt(); initTss(); initIdt(); putStr("mapping the first 2M with va = pa\n"); for (uint32_t p = 0; p < 0x200000; p += 4096) { vmm_map(p,p); } CHECK((getcr0() & 0x80000000) == 0); putStr("about to enable paging\n"); vmm_on(); CHECK((getcr0() & 0x80000000) != 0); CHECK (vmm_pa(0xf0000000) == 0xffffffff); uint32_t pa = vmm_frame(); vmm_map(0xf0000000, pa); CHECK (vmm_pa(0xf0000000) != 0xffffffff); CHECK ((vmm_pa(0xf0000123) & 0xfff) == 0x123); CHECK(vmm_dirty(0xf0000000) == 0); CHECK(vmm_accessed(0xf0000000) == 0); CHECK(peek(0xf0000000) == 0); CHECK(vmm_dirty(0xf0000000) == 0); CHECK(vmm_accessed(0xf0000000) == 1); poke(0xf0000000, 0x12345678); CHECK(peek(0xf0000000) == 0x12345678); CHECK(vmm_dirty(0xf0000000) == 1); CHECK(vmm_accessed(0xf0000000) == 1); CHECK(vmm_dirty(0xe0000000) == 0); CHECK(vmm_accessed(0xe0000000) == 0); CHECK(vmm_pa(0x40000000) == 0xffffffff); vmm_map(0xe0000000, pa); CHECK(peek(0xe0000000) == 0x12345678); CHECK(vmm_dirty(0xe0000000) == 0); CHECK(vmm_accessed(0xe0000000) == 1); CHECK(peek(0x44444444) == 0); CHECK(vmm_dirty(0x44444000) == 0); CHECK(vmm_accessed(0x44443000) == 0); CHECK(vmm_accessed(0x44444000) == 1); CHECK(vmm_accessed(0x44445000) == 0); poke(0x88888888,0x88888888); CHECK(peek(0x88888888) == 0x88888888); vmm_map(0xccccc000, vmm_pa(0x88888000)); CHECK(peek(0xccccc888) == 0x88888888); CHECK (vmm_pa(0xccccc666) == vmm_pa(0x88888666)); vmm_unmap(0xccccc000); CHECK(peek(0xccccc888) == 0); CHECK(peek(0x88888888) == 0x88888888); shutdown(); }
/* * Fill in the remaining CPU context and initialize it. */ static int mp_set_cpu_context(vcpu_guest_context_t *vgc, cpu_t *cp) { uint_t vec, iopl; vgc->flags = VGCF_IN_KERNEL; /* * fpu_ctx we leave as zero; on first fault we'll store * sse_initial into it anyway. */ #if defined(__amd64) vgc->user_regs.cs = KCS_SEL | SEL_KPL; /* force to ring 3 */ #else vgc->user_regs.cs = KCS_SEL; #endif vgc->user_regs.ds = KDS_SEL; vgc->user_regs.es = KDS_SEL; vgc->user_regs.ss = KDS_SEL; vgc->kernel_ss = KDS_SEL; /* * Allow I/O privilege level for Dom0 kernel. */ if (DOMAIN_IS_INITDOMAIN(xen_info)) iopl = (PS_IOPL & 0x1000); /* ring 1 */ else iopl = 0; #if defined(__amd64) vgc->user_regs.fs = 0; vgc->user_regs.gs = 0; vgc->user_regs.rflags = F_OFF | iopl; #elif defined(__i386) vgc->user_regs.fs = KFS_SEL; vgc->user_regs.gs = KGS_SEL; vgc->user_regs.eflags = F_OFF | iopl; vgc->event_callback_cs = vgc->user_regs.cs; vgc->failsafe_callback_cs = vgc->user_regs.cs; #endif /* * Initialize the trap_info_t from the IDT */ #if !defined(__lint) ASSERT(NIDT == sizeof (vgc->trap_ctxt) / sizeof (vgc->trap_ctxt[0])); #endif for (vec = 0; vec < NIDT; vec++) { trap_info_t *ti = &vgc->trap_ctxt[vec]; if (xen_idt_to_trap_info(vec, &cp->cpu_m.mcpu_idt[vec], ti) == 0) { ti->cs = KCS_SEL; ti->vector = vec; } } /* * No LDT */ /* * (We assert in various places that the GDT is (a) aligned on a * page boundary and (b) one page long, so this really should fit..) */ #ifdef CRASH_XEN vgc->gdt_frames[0] = pa_to_ma(mmu_btop(cp->cpu_m.mcpu_gdtpa)); #else vgc->gdt_frames[0] = pfn_to_mfn(mmu_btop(cp->cpu_m.mcpu_gdtpa)); #endif vgc->gdt_ents = NGDT; vgc->ctrlreg[0] = CR0_ENABLE_FPU_FLAGS(getcr0()); #if defined(__i386) if (mmu.pae_hat) vgc->ctrlreg[3] = xen_pfn_to_cr3(pfn_to_mfn(kas.a_hat->hat_htable->ht_pfn)); else #endif vgc->ctrlreg[3] = pa_to_ma(mmu_ptob(kas.a_hat->hat_htable->ht_pfn)); vgc->ctrlreg[4] = getcr4(); vgc->event_callback_eip = (uintptr_t)xen_callback; vgc->failsafe_callback_eip = (uintptr_t)xen_failsafe_callback; vgc->flags |= VGCF_failsafe_disables_events; #if defined(__amd64) /* * XXPV should this be moved to init_cpu_syscall? */ vgc->syscall_callback_eip = (uintptr_t)sys_syscall; vgc->flags |= VGCF_syscall_disables_events; ASSERT(vgc->user_regs.gs == 0); vgc->gs_base_kernel = (uintptr_t)cp; #endif return (xen_vcpu_initialize(cp->cpu_id, vgc)); }