static int pt_populate_pml2(pte_t *pml2, virt_t from, virt_t to, pte_t flags) { virt_t vaddr = from; for (int i = pml2_i(from); vaddr != to; ++i) { const virt_t bytes = MINU(PML1_SIZE - (vaddr & PML1_MASK), to - vaddr); const pfn_t pages = bytes >> PAGE_BITS; if (!pte_present(pml2[i]) && !pte_large(flags)) { struct page *pt = alloc_page_table(flags); if (!pt) { pt_release_pml2(pml2, from, vaddr); return -ENOMEM; } pt->u.refcount += pages; pml2[i] = page_paddr(pt) | flags; } else if (pte_present(pml2[i]) && !pte_large(pml2[i])) { const pfn_t pfn = pte_phys(pml2[i]) >> PAGE_BITS; struct page *pt = pfn2page(pfn); pt->u.refcount += pages; } vaddr += bytes; } return 0; }
static void buddy_smoke_test(void) { DBG_INFO("Start buddy test"); struct page *page[10]; int order[ARRAY_SIZE(page)]; for (int i = 0; i != ARRAY_SIZE(page); ++i) { page[i] = alloc_pages(i); if (page[i]) { const phys_t begin = page_paddr(page[i]); const phys_t end = begin + (PAGE_SIZE << i); DBG_INFO("allocated [%#llx-%#llx]", begin, end - 1); order[i] = i; } } for (int i = 0; i != ARRAY_SIZE(page) - 1; ++i) { if (!page[i]) break; for (int j = i + 1; j != ARRAY_SIZE(page); ++j) { if (!page[j]) break; const phys_t ibegin = page_paddr(page[i]); const phys_t iend = ibegin + (PAGE_SIZE << order[i]); const phys_t jbegin = page_paddr(page[j]); const phys_t jend = jbegin + (PAGE_SIZE << order[j]); DBG_ASSERT(!range_intersect(ibegin, iend, jbegin, jend)); } } for (int i = 0; i != ARRAY_SIZE(page); ++i) { if (!page[i]) continue; const phys_t begin = page_paddr(page[i]); const phys_t end = begin + (PAGE_SIZE << i); DBG_INFO("freed [%#llx-%#llx]", begin, end - 1); free_pages(page[i], order[i]); } DBG_INFO("Buddy test finished"); }
static struct page *alloc_page_table(pte_t flags) { struct page *page = (flags & PTE_LOW) ? __alloc_pages(0, NT_LOW) : alloc_pages(0); if (page) { memset(va(page_paddr(page)), 0, PAGE_SIZE); page->u.refcount = 0; } return page; }
void setup_paging(void) { struct page *page = alloc_page_table(PTE_LOW); DBG_ASSERT(page != 0); const phys_t paddr = page_paddr(page); pte_t *pt = va(paddr); DBG_ASSERT(setup_fixed_mapping(pt) == 0); DBG_ASSERT(setup_kernel_mapping(pt) == 0); DBG_ASSERT(setup_kmap_mapping(pt) == 0); store_pml4(paddr); }
static int pt_populate_pml4(pte_t *pml4, virt_t from, virt_t to, pte_t flags) { virt_t vaddr = from; for (int i = pml4_i(from); vaddr < to; ++i) { struct page *pt; phys_t paddr; const virt_t bytes = MINU(PML3_SIZE - (vaddr & PML3_MASK), to - vaddr); const pfn_t pages = bytes >> PAGE_BITS; if (!pte_present(pml4[i])) { pt = alloc_page_table(flags); if (!pt) { pt_release_pml4(pml4, from, vaddr); return -ENOMEM; } paddr = page_paddr(pt); pml4[i] = paddr | (flags & ~PTE_LARGE); } else { const pte_t pte = pml4[i]; paddr = pte_phys(pte); pt = pfn2page(paddr >> PAGE_BITS); } pt->u.refcount += pages; const int rc = pt_populate_pml3(va(paddr), vaddr, vaddr + bytes, flags); if (rc) { pt_release_pml4(pml4, from, vaddr); pt->u.refcount -= pages; if (pt->u.refcount == 0) { pml4[i] = 0; free_page_table(pt); } return rc; } vaddr += bytes; } return 0; }
void *kmap(struct page **pages, size_t count) { struct kmap_range *range = kmap_alloc_range(count); if (!range) return 0; const virt_t from = kmap2virt(range); const virt_t to = from + (count << PAGE_BITS); pte_t *pt = va(load_pml4()); struct pt_iter iter; size_t i = 0; for_each_slot_in_range(pt, from, to, iter) { const phys_t paddr = page_paddr(pages[i++]); const int level = iter.level; const int idx = iter.idx[level]; iter.pt[level][idx] = paddr | PTE_WRITE | PTE_PRESENT; flush_tlb_addr(iter.addr); } return (void *)from; }