static inline npg_pdpe_t* __npg_get_pdpe(offset_t addr) { vm_pgmem_t *pg = npg_get_active_paging(); npg_pml4e_t *pml4e = &pg->pml4[pml4_idx(addr)]; if(!npg_present(pml4e)) return 0; return __npg_get_pdpe_nocheck(pml4e, addr); }
/* ** resolve pdpe (allocate pdp if needed) */ static inline npg_pdpe_t* __npg_resolve_pdpe(offset_t addr, uint64_t attr) { vm_pgmem_t *pg = npg_get_active_paging(); npg_pml4e_t *pml4e; npg_pdpe_t *pdp; pml4e = &pg->pml4[pml4_idx(addr)]; if(!npg_present(pml4e)) { pdp = __npg_new_pdp(); /* upper-level entry has full pvl */ npg_set_entry(pml4e, attr|npg_dft_pvl, page_nr(pdp)); } else pdp = (npg_pdpe_t*)page_addr(pml4e->addr); return &pdp[pdp_idx(addr)]; }
/* ** lmode: 1GB, 2MB and 4KB pages ** cr4.pse is ignored ** 1GB cpuid feature must be checked */ static inline int pg_walk_lmode(cr3_reg_t *cr3, offset_t vaddr, offset_t *paddr, size_t *psz, int chk) { pml4e_t *pml4, *pml4e; pdpe_t *pdp, *pdpe; pde64_t *pd, *pde; pte64_t *pt, *pte; pml4 = (pml4e_t*)page_addr(cr3->pml4.addr); if(chk && vmm_area(pml4)) { debug(PG_W, "pml4 in vmm area\n"); return 0; } pml4e = &pml4[pml4_idx(vaddr)]; debug(PG_W, "pml4e @ 0x%X = %X\n", (offset_t)pml4e, pml4e->raw); if(!pg_present(pml4e)) { debug(PG_W, "pml4e not present\n"); return 0; } pdp = (pdpe_t*)page_addr(pml4e->addr); if(chk && vmm_area(pdp)) { debug(PG_W, "pdp in vmm area\n"); return 0; } pdpe = &pdp[pdp_idx(vaddr)]; debug(PG_W, "pdpe @ 0x%X = 0x%X\n", (offset_t)pdpe, pdpe->raw); if(!pg_present(pdpe)) { debug(PG_W, "pdpe not present\n"); return 0; } if(info->vmm.cpu.skillz.pg_1G && pg_large(pdpe)) { *paddr = pg_1G_addr((offset_t)pdpe->page.addr) + pg_1G_offset(vaddr); *psz = PG_1G_SIZE; goto __prepare_addr; } pd = (pde64_t*)page_addr(pdpe->addr); if(chk && vmm_area(pd)) { debug(PG_W, "pd in vmm area\n"); return 0; } pde = &pd[pd64_idx(vaddr)]; debug(PG_W, "pde @ 0x%X = 0x%X\n", (offset_t)pde, pde->raw); if(!pg_present(pde)) { debug(PG_W, "pde not present\n"); return 0; } if(pg_large(pde)) { *paddr = pg_2M_addr((offset_t)pde->page.addr) + pg_2M_offset(vaddr); *psz = PG_2M_SIZE; goto __prepare_addr; } pt = (pte64_t*)page_addr(pde->addr); if(chk && vmm_area(pt)) { debug(PG_W, "pt in vmm area\n"); return 0; } pte = &pt[pt64_idx(vaddr)]; debug(PG_W, "pte @ 0x%X = 0x%X\n", (offset_t)pte, pte->raw); if(!pg_present(pte)) { debug(PG_W, "pte not present\n"); return 0; } *paddr = pg_4K_addr((offset_t)pte->addr) + pg_4K_offset(vaddr); *psz = PG_4K_SIZE; __prepare_addr: if(chk && vmm_area(*paddr)) { debug(PG_W, "paddr 0x%x in vmm area\n", *paddr); return 0; } debug(PG_W, "lmode vaddr 0x%X -> paddr 0x%X\n", vaddr, *paddr); return 1; }
/* ** lmode: 1GB, 2MB and 4KB pages ** cr4.pse is ignored ** 1GB cpuid feature must be checked */ static int __pg_walk_lmode(cr3_reg_t *cr3, offset_t vaddr, pg_wlk_t *wlk) { pml4e_t *pml4, *pml4e; pdpe_t *pdp, *pdpe; pde64_t *pd, *pde; pte64_t *pt, *pte; wlk->attr = 0; pml4 = (pml4e_t*)page_addr(cr3->pml4.addr); if(vmm_area_range(pml4, PG_4K_SIZE)) { debug(PG_WLK, "pml4 in vmm area\n"); return VM_FAIL; } pml4e = &pml4[pml4_idx(vaddr)]; debug(PG_WLK, "pml4e @ 0x%X = %X\n", (offset_t)pml4e, pml4e->raw); if(!pg_present(pml4e)) { debug(PG_WLK, "pml4e not present\n"); wlk->type = PG_WALK_TYPE_PML4E; wlk->entry = (void*)pml4e; return VM_FAULT; } wlk->u = pml4e->lvl; wlk->r = 1; wlk->w = pml4e->rw; wlk->x = pg64_executable(pml4e); pdp = (pdpe_t*)page_addr(pml4e->addr); if(vmm_area_range(pdp, PG_4K_SIZE)) { debug(PG_WLK, "pdp in vmm area\n"); return VM_FAIL; } pdpe = &pdp[pdp_idx(vaddr)]; debug(PG_WLK, "pdpe @ 0x%X = 0x%X\n", (offset_t)pdpe, pdpe->raw); if(!pg_present(pdpe)) { debug(PG_WLK, "pdpe not present\n"); wlk->type = PG_WALK_TYPE_PDPE; wlk->entry = (void*)pdpe; return VM_FAULT; } wlk->u &= pdpe->lvl; wlk->w &= pdpe->rw; wlk->x &= pg64_executable(pdpe); if(info->vmm.cpu.skillz.pg_1G && pg_large(pdpe)) { wlk->addr = pg_1G_addr((offset_t)pdpe->page.addr) + pg_1G_offset(vaddr); wlk->type = PG_WALK_TYPE_PDPE; wlk->size = PG_1G_SIZE; wlk->entry = (void*)pdpe; goto __success; } pd = (pde64_t*)page_addr(pdpe->addr); if(vmm_area_range(pd, PG_4K_SIZE)) { debug(PG_WLK, "pd64 in vmm area\n"); return VM_FAIL; } pde = &pd[pd64_idx(vaddr)]; debug(PG_WLK, "pde64 @ 0x%X = 0x%X\n", (offset_t)pde, pde->raw); if(!pg_present(pde)) { debug(PG_WLK, "pde not present\n"); wlk->type = PG_WALK_TYPE_PDE64; wlk->entry = (void*)pde; return VM_FAULT; } wlk->u &= pde->lvl; wlk->w &= pde->rw; wlk->x &= pg64_executable(pde); if(pg_large(pde)) { wlk->addr = pg_2M_addr((offset_t)pde->page.addr) + pg_2M_offset(vaddr); wlk->type = PG_WALK_TYPE_PDE64; wlk->size = PG_2M_SIZE; wlk->entry = (void*)pde; goto __success; } pt = (pte64_t*)page_addr(pde->addr); if(vmm_area_range(pt, PG_4K_SIZE)) { debug(PG_WLK, "pt64 in vmm area\n"); return VM_FAIL; } pte = &pt[pt64_idx(vaddr)]; debug(PG_WLK, "pte64 @ 0x%X = 0x%X\n", (offset_t)pte, pte->raw); if(!pg_present(pte)) { debug(PG_WLK, "pte not present\n"); wlk->type = PG_WALK_TYPE_PTE64; wlk->entry = (void*)pte; return VM_FAULT; } wlk->addr = pg_4K_addr((offset_t)pte->addr) + pg_4K_offset(vaddr); wlk->type = PG_WALK_TYPE_PTE64; wlk->size = PG_4K_SIZE; wlk->entry = (void*)pte; wlk->u &= pte->lvl; wlk->w &= pte->rw; wlk->x &= pg64_executable(pte); __success: debug(PG_WLK, "lmode vaddr 0x%X -> guest paddr 0x%X\n", vaddr, wlk->addr); return VM_DONE; }