void map_adr(virt_t vad, phys_t pad, int flags) { if (flags & USE_BIG_PAGE) { assert((vad & ((1 << (12 + 9)) - 1)) == 0); assert((pad & ((1 << (12 + 9)) - 1)) == 0); } else { assert((vad & ((1 << (12)) - 1)) == 0); assert((pad & ((1 << (12)) - 1)) == 0); } pte_t *pml4e = pml4 + pml4_i(vad); force_pte(pml4e, flags); pte_t *pdpte = ((pte_t *) va(pte_phys(*pml4e) << 12)) + pml3_i(vad); force_pte(pdpte, flags); pte_t *pde = ((pte_t *) va(pte_phys(*pdpte) << 12)) + pml2_i(vad); if (flags & USE_BIG_PAGE) { assert(pte_present(*pde) == false); *pde = pad | PTE_PRESENT | PTE_WRITE | PTE_LARGE; flush_tlb_addr(vad); return; } force_pte(pde, flags); pte_t *pte = ((pte_t *) va(pte_phys(*pde) << 12)) + pml1_i(vad); assert(pte_present(*pte) == false); *pte = pad | PTE_PRESENT | PTE_WRITE; if (!(flags & NOT_FLUSH_TLB)) { flush_tlb_addr(vad); } }
void kunmap(void *vaddr) { struct kmap_range *range = virt2kmap((virt_t)vaddr); const pfn_t count = range->pages; const virt_t from = (virt_t)vaddr; const virt_t to = from + (count << PAGE_BITS); pte_t *pt = va(load_pml4()); virt_t virt = from; struct pt_iter iter; for_each_slot_in_range(pt, from, to, iter) { const int level = iter.level; const int idx = iter.idx[level]; iter.pt[level][idx] = 0; flush_tlb_addr(virt); virt += PAGE_SIZE; } kmap_free_range(range, range->pages); }
void *kmap(struct page **pages, size_t count) { struct kmap_range *range = kmap_alloc_range(count); if (!range) return 0; const virt_t from = kmap2virt(range); const virt_t to = from + (count << PAGE_BITS); pte_t *pt = va(load_pml4()); struct pt_iter iter; size_t i = 0; for_each_slot_in_range(pt, from, to, iter) { const phys_t paddr = page_paddr(pages[i++]); const int level = iter.level; const int idx = iter.idx[level]; iter.pt[level][idx] = paddr | PTE_WRITE | PTE_PRESENT; flush_tlb_addr(iter.addr); } return (void *)from; }