static void checkpte(uintmem ppn, void *a) { Proc *up = externup(); int l; PTE *pte, *pml4; uint64_t addr; char buf[240], *s; addr = PTR2UINT(a); pml4 = UINT2PTR(machp()->pml4->va); pte = 0; s = buf; *s = 0; if((l = mmuwalk(pml4, addr, 3, &pte, nil)) < 0 || (*pte&PteP) == 0) goto Panic; s = seprint(buf, buf+sizeof buf, "check3: l%d pte %#p = %llux\n", l, pte, pte?*pte:~0); if((l = mmuwalk(pml4, addr, 2, &pte, nil)) < 0 || (*pte&PteP) == 0) goto Panic; s = seprint(s, buf+sizeof buf, "check2: l%d pte %#p = %llux\n", l, pte, pte?*pte:~0); if(*pte&PtePS) return; if((l = mmuwalk(pml4, addr, 1, &pte, nil)) < 0 || (*pte&PteP) == 0) goto Panic; seprint(s, buf+sizeof buf, "check1: l%d pte %#p = %llux\n", l, pte, pte?*pte:~0); return; Panic: seprint(s, buf+sizeof buf, "checkpte: l%d addr %#p ppn %#ullx kaddr %#p pte %#p = %llux", l, a, ppn, KADDR(ppn), pte, pte?*pte:~0); print("%s\n", buf); seprint(buf, buf+sizeof buf, "start %#ullx unused %#ullx" " unmap %#ullx end %#ullx\n", sys->vmstart, sys->vmunused, sys->vmunmapped, sys->vmend); panic("%s", buf); }
static void sanity(void) { uintptr cr3; cr3 = (uintptr)KADDR(getcr3()); if (cr3 == 0) panic("zero cr3"); if ((uintptr)m->pdb != cr3 || (uintptr)mach0pdb != cr3) panic("not all same: cr3 %#p m->pdb %#p mach0pdb %#p", cr3, m->pdb, mach0pdb); if (m != mach0m) panic("m %#p != mach0m %#p", m, mach0m); if (m->gdt != mach0gdt) panic("m->gdt %#p != mach0gdt %#p", m->gdt, mach0gdt); if (0) iprint("m->pdb %#p m %#p sp %#p m->gdt %#p\n", m->pdb, m, &cr3, m->gdt); }
// // Frees env e and all memory it uses. // void env_free(struct Env *e) { pte_t *pt; uint32_t pdeno, pteno; physaddr_t pa; // Note the environment's demise. cprintf("[%08x] free env %08x\n", curenv ? curenv->env_id : 0, e->env_id); // Flush all mapped pages in the user portion of the address space static_assert(UTOP % PTSIZE == 0); for (pdeno = 0; pdeno < PDX(UTOP); pdeno++) { // only look at mapped page tables if (!(e->env_pgdir[pdeno] & PTE_P)) continue; // find the pa and va of the page table pa = PTE_ADDR(e->env_pgdir[pdeno]); pt = (pte_t*) KADDR(pa); // unmap all PTEs in this page table for (pteno = 0; pteno <= PTX(~0); pteno++) { if (pt[pteno] & PTE_P) page_remove(e->env_pgdir, PGADDR(pdeno, pteno, 0)); } // free the page table itself e->env_pgdir[pdeno] = 0; page_decref(pa2page(pa)); } // free the page directory pa = e->env_cr3; e->env_pgdir = 0; e->env_cr3 = 0; page_decref(pa2page(pa)); // return the environment to the free list e->env_status = ENV_FREE; LIST_INSERT_HEAD(&env_free_list, e, env_link); }
static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va) { pte_t *p; pgdir = &pgdir[PDX(va)]; if (!(*pgdir & PTE_P)){ //cprintf("!(*pgdir & PTE_P)\n"); return ~0; } p = (pte_t*) KADDR(PTE_ADDR(*pgdir)); if (!(p[PTX(va)] & PTE_P)){ // cprintf("!(p[PTX(va)] & PTE_P)\n"); return ~0; } return PTE_ADDR(p[PTX(va)]); }
pte_t * pml4e_walk(pml4e_t *pml4e, const void *va, int create) { struct Page *newPage = NULL; //if(!create) cprintf("va = %0x, pml4e[PML4(va)] = %0x\n", va, pml4e[PML4(va)]); if (!pml4e[PML4(va)]) { if (!create) return NULL; else { newPage = page_alloc(0); if (newPage == 0) { return NULL; } else { newPage->pp_ref++; pml4e[PML4(va)] = page2pa(newPage) | PTE_U | PTE_W | PTE_P; memset(page2kva(newPage), 0x00, PGSIZE); } } } pdpe_t* pdpe = (pdpe_t*)(KADDR((PTE_ADDR(pml4e[PML4(va)])))); pte_t *result = pdpe_walk(pdpe, va, create); if (!result && newPage) { pml4e[PML4(va)] = 0; newPage->pp_ref = 0; page_free(newPage); } //return result + PTX(va); if (result) { return result + PTX(va); } else { return result; } }
void * vmap(ulong phys, ulong length) { ulong virt, off, *l2; off = phys % BY2PG; length = (ROUNDUP(phys + length, BY2PG) - ROUNDDN(phys, BY2PG)) / BY2PG; if(length == 0) return nil; phys = ROUNDDN(phys, BY2PG); virt = getiopages(length); l2 = KADDR(IOPT); l2 += virt; while(length--){ *l2++ = phys | L2AP(Krw) | Small | PTEIO; phys += BY2PG; } flushtlb(); return (void *) (IZERO + BY2PG * virt + off); }
int mon_si(int argc, char **argv, struct Trapframe *tf) { if (tf == NULL) { cprintf("Cannot invoke si, no breakpoint exception or debug exception invoked\n"); return 1; } if (tf->tf_trapno != T_BRKPT && tf->tf_trapno != T_DEBUG) { cprintf("Cannot invoke si, no breakpoint exception or debug exception invoked\n"); return 1; } uint32_t opcode; pte_t *entry; uint32_t address; // Get the page table entry of tf_eip, // because we in kernel mode address = tf->tf_eip; entry = pgdir_walk(curenv->env_pgdir, (void *)address, 0); // Debug if (entry == NULL) { panic("Bad address in gdb"); } // Debug info address = (uint32_t)KADDR(PTE_ADDR(*entry)) | (address & 0xfff); // Debug info // print the instruction name opcode = *((uint32_t *)address); opcode &= 0xff; cprintf("Instruction: %s\n", opcnames[(int)opcode]); // Debug info if (tf->tf_eflags & FL_TF) { cprintf("Trap Flag set in EFLAGS\n"); } tf->tf_eflags |= FL_TF | FL_RF; return -1; }
void mmuinit(void) { ulong *pte, npgs, pa; if(paemode){ int i; xenpdpt = (uvlong*)m->pdb; m->pdb = xspanalloc(32, 32, 0); /* clear "reserved" bits in initial page directory pointers -- Xen bug? */ for(i = 0; i < 4; i++) ((uvlong*)m->pdb)[i] = xenpdpt[i] & ~0x1E6LL; } /* * So far only memory up to xentop is mapped, map the rest. * We cant use large pages because our contiguous PA space * is not necessarily contiguous in MA. */ npgs = conf.mem[0].npage; for(pa=conf.mem[0].base; npgs; npgs--, pa+=BY2PG) { pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 1); if(!pte) panic("mmuinit"); xenupdate(pte, pa|PTEVALID|PTEWRITE); } memglobal(); #ifdef we_may_eventually_want_this /* make kernel text unwritable */ for(x = KTZERO; x < (ulong)etext; x += BY2PG){ p = mmuwalk(m->pdb, x, 2, 0); if(p == nil) panic("mmuinit"); *p &= ~PTEWRITE; } #endif taskswitch(0, (ulong)m + BY2PG); }
/** * Check whether page directory for boot lives well. * NOTE: we don't have mm_struct at present. * as write to a clean page also raises SIGSEGV, we're not able to deal with it now. * so just mark all page inserted to be accessed and dirty. */ void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } //assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); assert(boot_pgdir[PDX(TEST_PAGE)] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, TEST_PAGE, PTE_W | PTE_D | PTE_A) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, TEST_PAGE + PGSIZE, PTE_W | PTE_D | PTE_A) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)TEST_PAGE, str); assert(strcmp((void *)TEST_PAGE, (void *)(TEST_PAGE + PGSIZE)) == 0); *(char *)(page2kva(p)) = '\0'; assert(strlen((const char *)TEST_PAGE) == 0); /* * in um architecture clear page table doesn't mean * the linear address is invalid * so remove them by hand */ tlb_invalidate (boot_pgdir, TEST_PAGE); tlb_invalidate (boot_pgdir, TEST_PAGE + PGSIZE); free_page(p); free_page(pa2page(PDE_ADDR(boot_pgdir[PDX(TEST_PAGE)]))); boot_pgdir[PDX(TEST_PAGE)] = 0; kprintf("check_boot_pgdir() succeeded.\n"); }
void mpshutdown(void) { /* * To be done... */ if(!canlock(&mpshutdownlock)){ /* * If this processor received the CTRL-ALT-DEL from * the keyboard, acknowledge it. Send an INIT to self. */ #ifdef FIXTHIS if(lapicisr(VectorKBD)) lapiceoi(VectorKBD); #endif /* FIX THIS */ idle(); } print("apshutdown: active = 0x%2.2uX\n", active.machs); delay(1000); splhi(); /* * INIT all excluding self. */ lapicicrw(0, 0x000C0000|ApicINIT); #ifdef notdef /* * Often the BIOS hangs during restart if a conventional 8042 * warm-boot sequence is tried. The following is Intel specific and * seems to perform a cold-boot, but at least it comes back. */ *(ushort*)KADDR(0x472) = 0x1234; /* BIOS warm-boot flag */ outb(0xCF9, 0x02); outb(0xCF9, 0x06); #else pcireset(); i8042reset(); #endif /* notdef */ }
ZVMSTATUS MmInitManager(uint32_t *pgdir,uint32_t *hostcr3) { void *va; uint32_t pa,tmp; memcpy(hostcr3,pgdir,PGSIZE); for(uint32_t i=0; i<1024; i++) { if(hostcr3[i]!=0) { va = MmAllocPages(1,&pa); tmp = hostcr3[i]; tmp = tmp & 0xfffff000; memcpy(va,KADDR(tmp),PGSIZE); // 从物理地址找虚拟地址 hostcr3[i] = hostcr3[i] & 0xfff; hostcr3[i] = hostcr3[i] | pa ; } } return ZVMSUCCESS; }
/* * called by the reset routine of any driver using the IIC */ void i2csetup(int polling) { I2Cregs *i2c; Ctlr *ctlr; ctlr = i2cctlr; ctlr->polling = polling; i2c = KADDR(PHYSI2C); ctlr->regs = i2c; if(!polling){ if(ctlr->init == 0){ initialise(i2c, 1); ctlr->init = 1; intrenable(IRQ, IRQi2c, interrupt, i2cctlr, "i2c"); if(Chatty) i2cdump("init", i2c); } }else initialise(i2c, 0); }
void vunmap(void *virt, ulong length) { ulong v, *l2; if((ulong)virt < IZERO || (ulong)virt >= IZERO + NIOPAGES * BY2PG) panic("vunmap: virt=%p", virt); v = (ROUNDDN((ulong) virt, BY2PG) - IZERO) / BY2PG; length = (ROUNDUP(((ulong) virt) + length, BY2PG) - ROUNDDN((ulong) virt, BY2PG)) / BY2PG; if(length == 0) return; l2 = KADDR(IOPT); l2 += v; lock(&iopagelock); while(length--){ *l2++ = 0; freeio(v++); } unlock(&iopagelock); flushtlb(); }
static void unmap_range_pud(pgd_t *pgdir, pud_t *pud, uintptr_t base, uintptr_t start, uintptr_t end) { #if PUXSHIFT == PGXSHIFT unmap_range_pmd (pgdir, pud, base, start, end); #else assert(start >= 0 && start < end && end <= PUSIZE); size_t off, size; uintptr_t la = ROUNDDOWN(start, PMSIZE); do { off = start - la, size = PMSIZE - off; if (size > end - start) { size = end - start; } pud_t *pudp = &pud[PUX(la)]; if (ptep_present(pudp)) { unmap_range_pmd(pgdir, KADDR(PUD_ADDR(*pudp)), base + la, off, off + size); } start += size, la += PMSIZE; } while (start != 0 && start < end); #endif }
static long pcmwrite(int dev, int attr, void *a, long n, vlong off) { int i, len; PCMmap *m; uchar *ac; PCMslot *pp; ulong offset = off; pp = slot + dev; if(pp->memlen < offset) return 0; if(pp->memlen < offset + n) n = pp->memlen - offset; m = 0; if(waserror()){ if(m) pcmunmap(pp->slotno, m); nexterror(); } ac = a; for(len = n; len > 0; len -= i){ m = pcmmap(pp->slotno, offset, 0, attr); if(m == 0) error("cannot map PCMCIA card"); if(offset + len > m->cea) i = m->cea - offset; else i = len; memmoveb(KADDR(m->isa + offset - m->ca), ac, i); pcmunmap(pp->slotno, m); offset += i; ac += i; } poperror(); return n; }
static int fload(Ctlr *c) { ulong data, io, r, adr; ushort sum; Flash f; Pcidev *p; // io = c->pcidev->mem[1].bar & ~0x0f; // f.reg = vmap(io, c->pcidev->mem[1].size); // if(f.reg == nil) // return -1; p = c->pcidev; io = upamalloc(p->mem[1].bar & ~0x0F, p->mem[1].size, 0); if(io == 0){ print("igbepcie: can't map flash @ 0x%8.8lux\n", p->mem[1].bar); return -1; } f.reg = KADDR(io); f.reg32 = (ulong*)f.reg; f.sz = f.reg32[Bfpr]; r = f.sz & 0x1fff; if(csr32r(c, Eec) & (1<<22)) ++r; r <<= 12; sum = 0; for (adr = 0; adr < 0x40; adr++) { data = fread(c, &f, r + adr*2); if(data == -1) break; c->eeprom[adr] = data; sum += data; } // vunmap(f.reg, c->pcidev->mem[1].size); return sum; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // bluesea // pgdir_walk具体返回的是: // 虚拟地址va, 所在的页面对应的page table 表项的地址,所以是二级页表page table // 的表项的地址,而非page dir的表项 // (理由分析见check_page()中的相关分析) // 并且是该地址的虚拟地址! // // // 下面这个需求可能和这个想法有矛盾:PTE_P置为0,即缺页的时候本应该由缺页中断处理。 // 那是另外故事,在这儿,pgdir_walk基本上只用于初始化内核虚拟内存的映射, // 所以缺页新alloc page table没什么问题。 // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // (注:这种情况下也是返回页表项的地址,而页目录的地址。页表项的各个FLAG不用管 // 只需要把页目录对应的位置PTE_P置位即可。) // // Hint 1: you can turn a PageInfo * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in // bluesea uint32_t pdx = PDX(va), ptx = PTX(va); pde_t *pt = 0; if (pgdir[pdx] & PTE_P){ pt = KADDR(PTE_ADDR(pgdir[pdx])); return &pt[ptx]; } if (!create) return NULL; struct PageInfo *page = page_alloc(ALLOC_ZERO); if (!page) return NULL; page->pp_ref = 1; pgdir[pdx] = page2pa(page) | PTE_P | PTE_U; pt = page2kva(page); //pt[ptx] = PTE_U; return &pt[ptx]; }
static Apic* mkioapic(PCMPioapic* p) { Apic *apic; if(!(p->flags & PcmpEN) || p->apicno > MaxAPICNO) return 0; /* * Map the I/O APIC. */ if(mmukmap(p->addr, 0, 1024) == 0) return 0; apic = &mpapic[p->apicno]; apic->type = PcmpIOAPIC; apic->apicno = p->apicno; apic->addr = KADDR(p->addr); apic->flags = p->flags; return apic; }
KMap* kmap(Page *page) { uintptr *pte, pa, va; int x; pa = page->pa; if(cankaddr(pa) != 0) return (KMap*)KADDR(pa); x = splhi(); va = KMAP + ((uintptr)up->kmapindex << PGSHIFT); pte = mmuwalk(m->pml4, va, 0, 1); if(pte == 0 || *pte & PTEVALID) panic("kmap: pa=%#p va=%#p", pa, va); *pte = pa | PTEWRITE|PTEVALID; up->kmapindex = (up->kmapindex + 1) % (1<<PTSHIFT); if(up->kmapindex == 0) mmuflushtlb(); splx(x); return (KMap*)va; }
uintptr* mmuwalk(uintptr* table, uintptr va, int level, int create) { uintptr pte; int i, x; x = PTLX(va, 3); for(i = 2; i >= level; i--){ pte = table[x]; if(pte & PTEVALID){ if(pte & PTESIZE) return 0; table = KADDR(PPN(pte)); } else { if(!create) return 0; table = mmucreate(table, va, i, x); } x = PTLX(va, i); } return &table[x]; }
static void free_ept_level(epte_t* eptrt, int level) { epte_t* dir = eptrt; int i; for(i=0; i<NPTENTRIES; ++i) { if(level != 0) { if(epte_present(dir[i])) { physaddr_t pa = epte_addr(dir[i]); free_ept_level((epte_t*) KADDR(pa), level-1); // free the table. page_decref(pa2page(pa)); } } else { // Last level, free the guest physical page. if(epte_present(dir[i])) { physaddr_t pa = epte_addr(dir[i]); page_decref(pa2page(pa)); } } } return; }
static Pte *boot_pgdir_walk(Pde *pgdir, u_long va, int create) { Pde *pgdir_entryp; Pte *pgtable, *pgtable_entry; pgdir_entryp = (Pde *)(&pgdir[PDX(va)]); pgtable = (Pte *)KADDR(PTE_ADDR(*pgdir_entryp)); if (*pgdir_entryp == 0) { if (create == 0) { return 0; } else { pgtable = alloc(BY2PG, BY2PG, 1); *pgdir_entryp = PADDR(pgtable) | PTE_V | PTE_R; } } pgtable_entry = (Pte *)(&pgtable[PTX(va)]); //printf("pgtable_entry = %x va = %d pgdir=%d\n",pgtable,va,pgdir); return pgtable_entry; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { pde_t *pde; //va(virtual address) point to pa(physical address) pte_t *pgtable; //same as pde struct PageInfo *pp; pde = &pgdir[PDX(va)]; // va->pgdir if(*pde & PTE_P) { pgtable = (KADDR(PTE_ADDR(*pde))); } else { //page table page not exist if(!create || !(pp = page_alloc(ALLOC_ZERO)) || !(pgtable = (pte_t*)page2kva(pp))) return NULL; pp->pp_ref++; *pde = PADDR(pgtable) | PTE_P | PTE_W | PTE_U; } return &pgtable[PTX(va)]; }
int page_alloc(struct Page **pp) { // Fill this function in struct Page *ppage_temp; ppage_temp = LIST_FIRST(&page_free_list); //printf("%x\n",ppage_temp); //printf("pages__%x\n",ppage_temp); if (ppage_temp != NULL) { *pp = ppage_temp; LIST_REMOVE(ppage_temp, pp_link); page_initpp(*pp); bzero((void *)KADDR(page2pa(ppage_temp)), BY2PG); return 0; } return -E_NO_MEM; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // need to handle permission!!! uintptr_t pd_index=0, pt_index=0; physaddr_t pa_ptba, pa_pte, pde_perm, pte_perm; pte_t *va_ptba=NULL, *va_pte=NULL; struct PageInfo *req_page; pd_index = PDX(va); //check address va_ptba+pt_index if it is correct pointer arithmatic or not. // permissions given for directory table entry are PTE_P and PTE_W //*(pgdir+pd_index) = *(pgdir+pd_index) | PTE_P | PTE_W ; pa_ptba = *(pgdir+pd_index); if(!(pa_ptba & PTE_P)) { // setting up page table for requested virtual address. if(create) { req_page = page_alloc(ALLOC_ZERO); if(req_page==NULL) return NULL; req_page->pp_ref++; pa_ptba = page2pa(req_page) | PTE_P | PTE_U | PTE_W; *(pgdir+pd_index) = pa_ptba; } else return NULL; } pde_perm = PGOFF(pa_ptba); pa_ptba = PTE_ADDR(pa_ptba); va_ptba = KADDR(pa_ptba); pt_index = PTX(va); va_pte = va_ptba + pt_index; return va_pte; }
static void archreset(void) { i8042reset(); /* * Often the BIOS hangs during restart if a conventional 8042 * warm-boot sequence is tried. The following is Intel specific and * seems to perform a cold-boot, but at least it comes back. * And sometimes there is no keyboard... * * The reset register (0xcf9) is usually in one of the bridge * chips. The actual location and sequence could be extracted from * ACPI but why bother, this is the end of the line anyway. */ print("Takes a licking and keeps on ticking...\n"); *(ushort*)KADDR(0x472) = 0x1234; /* BIOS warm-boot flag */ outb(0xcf9, 0x02); outb(0xcf9, 0x06); for(;;) idle(); }
PageStruct* allocate_page(){ /* Thus function pulls the first available page from the page free list and returns a virtual addresss corresponding to that free page */ PageStruct* pageToReturn = page_free_list; if(pageToReturn){ // printf("Allocating page: %p\t", pageToPhysicalAddress(pageToReturn)); free_pages--; page_free_list = page_free_list->next; pageToReturn->next = NULL; kmemset((uint64_t*)KADDR(pageToPhysicalAddress(pageToReturn)),0,PGSIZE); } else{ printf("ERROR!!! No pages to allocate in the free list\n"); while(1); } return pageToReturn; }
void dumpptepg(int lvl, uintptr_t pa) { PTE *pte; int tab, i; tab = 4 - lvl; pte = UINT2PTR(KADDR(pa)); for(i = 0; i < PTSZ/sizeof(PTE); i++) if(pte[i] & PteP) { tabs(tab); print("l%d %#p[%#05x]: %#ullx\n", lvl, pa, i, pte[i]); /* skip kernel mappings */ if((pte[i]&PteU) == 0) { tabs(tab+1); print("...kern...\n"); continue; } if(lvl > 2) dumpptepg(lvl-1, PPN(pte[i])); } }
void mkmultiboot(void) { MMap *lmmap; /* reuse the bios table memory */ multibootheader = (Mbi *)KADDR(BIOSTABLES); memset(multibootheader, 0, sizeof *multibootheader); lmmap = (MMap *)(multibootheader + 1); memmove(lmmap, mmap, sizeof mmap); multibootheader->cmdline = PADDR(BOOTLINE); multibootheader->flags |= Fcmdline; if(nmmap != 0){ multibootheader->mmapaddr = PADDR(lmmap); multibootheader->mmaplength = nmmap*sizeof(MMap); multibootheader->flags |= Fmmap; } multibootheader = (Mbi *)PADDR(multibootheader); if(v_flag) print("PADDR(&multibootheader) %#p\n", multibootheader); }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pde_t* pgdir_entry = &pgdir[PDX(va)]; pte_t* pgtb_entry = NULL; struct PageInfo * pg = NULL; if (!(*pgdir_entry & PTE_P)){ if(create){ pg = page_alloc(1); if (!pg) return NULL; memset(page2kva(pg), 0, PGSIZE); pg->pp_ref += 1; *pgdir_entry = page2pa(pg)|PTE_P|PTE_U|PTE_W; }else{ return NULL; } } pgtb_entry = KADDR(PTE_ADDR(*pgdir_entry)); return &pgtb_entry[PTX(va)]; }