static void pt_release_pml3(pte_t *pml3, virt_t from, virt_t to) { virt_t vaddr = from; for (int i = pml3_i(from); vaddr != to; ++i) { const pte_t pte = pml3[i]; const virt_t bytes = MINU(PML2_SIZE - (vaddr & PML2_MASK), to - vaddr); const pfn_t pages = bytes >> PAGE_BITS; if (pte_present(pte)) { const phys_t paddr = pte_phys(pte); const pfn_t pfn = paddr >> PAGE_BITS; struct page *pt = pfn2page(pfn); pt_release_pml2(va(paddr), vaddr, vaddr + bytes); pt->u.refcount -= pages; if (pt->u.refcount == 0) { pml3[i] = 0; free_page_table(pt); } } vaddr += bytes; } }
phys_t get_phys_adr(virt_t vad) { phys_t pad; pte_t *pml4e = pml4 + pml4_i(vad); pte_t *pdpte = ((pte_t *) va(pte_phys(*pml4e) << 12)) + pml3_i(vad); pte_t *pde = ((pte_t *) va(pte_phys(*pdpte) << 12)) + pml2_i(vad); if (pte_large(*pde)) { pad = ((*pde & (~((1 << 21) - 1)))) | (vad & ((1 << 21) - 1)); return pad; } pte_t *pte = ((pte_t *) va(pte_phys(*pde) << 12)) + pml1_i(vad); pad = ((*pte & (~((1 << 12) - 1)))) | (vad & ((1 << 12) - 1));; return pad; }
static int pt_populate_pml3(pte_t *pml3, virt_t from, virt_t to, pte_t flags) { virt_t vaddr = from; for (int i = pml3_i(from); vaddr != to; ++i) { struct page *pt; phys_t paddr; const virt_t bytes = MINU(PML2_SIZE - (vaddr & PML2_MASK), to - vaddr); const pfn_t pages = bytes >> PAGE_BITS; if (!pte_present(pml3[i])) { pt = alloc_page_table(flags); if (!pt) { pt_release_pml3(pml3, from, vaddr); return -ENOMEM; } paddr = page_paddr(pt); pml3[i] = paddr | (flags & ~PTE_LARGE); } else { const pte_t pte = pml3[i]; paddr = pte_phys(pte); pt = pfn2page(paddr >> PAGE_BITS); } pt->u.refcount += pages; const int rc = pt_populate_pml2(va(paddr), vaddr, vaddr + bytes, flags); if (rc) { pt_release_pml3(pml3, from, vaddr); pt->u.refcount -= pages; if (pt->u.refcount == 0) { pml3[i] = 0; free_page_table(pt); } return rc; } vaddr += bytes; } return 0; }
static int pt_index(virt_t vaddr, int level) { switch (level) { case 4: return pml4_i(vaddr); case 3: return pml3_i(vaddr); case 2: return pml2_i(vaddr); case 1: return pml1_i(vaddr); } DBG_ASSERT(0 && "Unreachable"); return 0; }
void map_adr(virt_t vad, phys_t pad, int flags) { if (flags & USE_BIG_PAGE) { assert((vad & ((1 << (12 + 9)) - 1)) == 0); assert((pad & ((1 << (12 + 9)) - 1)) == 0); } else { assert((vad & ((1 << (12)) - 1)) == 0); assert((pad & ((1 << (12)) - 1)) == 0); } pte_t *pml4e = pml4 + pml4_i(vad); force_pte(pml4e, flags); pte_t *pdpte = ((pte_t *) va(pte_phys(*pml4e) << 12)) + pml3_i(vad); force_pte(pdpte, flags); pte_t *pde = ((pte_t *) va(pte_phys(*pdpte) << 12)) + pml2_i(vad); if (flags & USE_BIG_PAGE) { assert(pte_present(*pde) == false); *pde = pad | PTE_PRESENT | PTE_WRITE | PTE_LARGE; flush_tlb_addr(vad); return; } force_pte(pde, flags); pte_t *pte = ((pte_t *) va(pte_phys(*pde) << 12)) + pml1_i(vad); assert(pte_present(*pte) == false); *pte = pad | PTE_PRESENT | PTE_WRITE; if (!(flags & NOT_FLUSH_TLB)) { flush_tlb_addr(vad); } }
static inline pte_t * __pdpte(pte_t * pml4, virt_t addr) { return pte_level_addr(*__pml4e(pml4, addr)) + pml3_i(addr); }