示例#1
0
文件: trap.c 项目: npe9/harvey
void
dumpregs(Ureg* ureg)
{
	Mach *m = machp();

	dumpgpr(ureg);

	/*
	 * Processor control registers.
	 * If machine check exception, time stamp counter, page size extensions
	 * or enhanced virtual 8086 mode extensions are supported, there is a
	 * CR4. If there is a CR4 and machine check extensions, read the machine
	 * check address and machine check type registers if RDMSR supported.
	 */
	iprint("cr0\t%#16.16llux\n", cr0get());
	iprint("cr2\t%#16.16llux\n", m->cr2);
	iprint("cr3\t%#16.16llux\n", cr3get());
die("dumpregs");
//	archdumpregs();
}
示例#2
0
文件: mmu.c 项目: Shamar/harvey
void
mmuinit(void)
{
    uint8_t *p;
    Page *page;
    uint64_t o, pa, r, sz;

    archmmu();
    DBG("mach%d: %#p pml4 %#p npgsz %d\n", machp()->machno, machp(), machp()->MMU.pml4, sys->npgsz);

    if(machp()->machno != 0) {
        /* NIX: KLUDGE: Has to go when each mach is using
         * its own page table
         */
        p = UINT2PTR(machp()->stack);
        p += MACHSTKSZ;

        memmove(p, UINT2PTR(mach0pml4.va), PTSZ);
        machp()->MMU.pml4 = &machp()->MMU.pml4kludge;
        machp()->MMU.pml4->va = PTR2UINT(p);
        machp()->MMU.pml4->pa = PADDR(p);
        machp()->MMU.pml4->daddr = mach0pml4.daddr;	/* # of user mappings in pml4 */

        r = rdmsr(Efer);
        r |= Nxe;
        wrmsr(Efer, r);
        cr3put(machp()->MMU.pml4->pa);
        DBG("m %#p pml4 %#p\n", machp(), machp()->MMU.pml4);
        return;
    }

    page = &mach0pml4;
    page->pa = cr3get();
    page->va = PTR2UINT(KADDR(page->pa));

    machp()->MMU.pml4 = page;

    r = rdmsr(Efer);
    r |= Nxe;
    wrmsr(Efer, r);

    /*
     * Set up the various kernel memory allocator limits:
     * pmstart/pmend bound the unused physical memory;
     * vmstart/vmend bound the total possible virtual memory
     * used by the kernel;
     * vmunused is the highest virtual address currently mapped
     * and used by the kernel;
     * vmunmapped is the highest virtual address currently
     * mapped by the kernel.
     * Vmunused can be bumped up to vmunmapped before more
     * physical memory needs to be allocated and mapped.
     *
     * This is set up here so meminit can map appropriately.
     */
    o = sys->pmstart;
    sz = ROUNDUP(o, 4*MiB) - o;
    pa = asmalloc(0, sz, 1, 0);
    if(pa != o)
        panic("mmuinit: pa %#llux memstart %#llux\n", pa, o);
    sys->pmstart += sz;

    sys->vmstart = KSEG0;
    sys->vmunused = sys->vmstart + ROUNDUP(o, 4*KiB);
    sys->vmunmapped = sys->vmstart + o + sz;
    sys->vmend = sys->vmstart + TMFM;

    print("mmuinit: vmstart %#p vmunused %#p vmunmapped %#p vmend %#p\n",
          sys->vmstart, sys->vmunused, sys->vmunmapped, sys->vmend);

    /*
     * Set up the map for PD entry access by inserting
     * the relevant PDP entry into the PD. It's equivalent
     * to PADDR(sys->pd)|PteRW|PteP.
     *
     */
    sys->pd[PDX(PDMAP)] = sys->pdp[PDPX(PDMAP)] & ~(PteD|PteA);
    print("sys->pd %#p %#p\n", sys->pd[PDX(PDMAP)], sys->pdp[PDPX(PDMAP)]);
    assert((pdeget(PDMAP) & ~(PteD|PteA)) == (PADDR(sys->pd)|PteRW|PteP));


    dumpmmuwalk(KZERO);

    mmuphysaddr(PTR2UINT(end));
}