/* ** vmm uses 1GB/2MB pages if possible ** vm uses fine-grain paging */ static inline void pmem_pg_predict(pg_cnt_t *vmm, pg_cnt_t *vm) { vmm->pdp = pdp_nr(info->hrd.mem.top - 1) + 1; vm->pdp = vmm->pdp; if(info->vmm.cpu.skillz.pg_1G) vmm->pd = 0; else vmm->pd = pd64_nr(info->hrd.mem.top - 1) + 1; if(info->vm.cpu.skillz.pg_1G) vm->pd = 2; else vm->pd = vmm->pd; vmm->pt = 0; if(info->vm.cpu.skillz.pg_2M) vm->pt = 2; else vm->pt = pt64_nr(info->hrd.mem.top - 1) + 1; debug(PMEM, "vmm needs %d pd and %d pt\n" "vm needs %d pd and %d pt\n" , vmm->pd, vmm->pt , vm->pd, vm->pt); }
/* ** we map more than available ** since we only use 1GB or 2MB pages */ static void vmm_pagemem_init() { pml4e_t *pml4; pdpe_t *pdp; pde64_t *pd; cr3_reg_t cr3; offset_t pfn; uint32_t i, j, k; offset_t limit; size_t pdp_nr, pd_nr, pt_nr; size_t pml4e_max, pdpe_max, pde_max; pml4 = info->vmm.cpu.pg.pml4; limit = info->hrd.mem.top - 1; pdp_nr = pdp_nr(limit) + 1; pd_nr = pd64_nr(limit) + 1; pt_nr = pt64_nr(limit) + 1; pfn = 0; pml4e_max = pdp_nr; /* only one pml4 */ for(i=0 ; i<pml4e_max ; i++) { pdp = info->vmm.cpu.pg.pdp[i]; pg_set_entry(&pml4[i], PG_KRN|PG_RW, page_nr(pdp)); pdpe_max = min(pd_nr, PDPE_PER_PDP); pd_nr -= pdpe_max; for(j=0 ; j<pdpe_max ; j++) { if(info->vmm.cpu.skillz.pg_1G) pg_set_large_entry(&pdp[j], PG_KRN|PG_RW, pfn++); else { pd = info->vmm.cpu.pg.pd[j]; pg_set_entry(&pdp[j], PG_KRN|PG_RW, page_nr(pd)); pde_max = min(pt_nr, PDE64_PER_PD); pt_nr -= pde_max; for(k=0 ; k<pde_max ; k++) pg_set_large_entry(&pd[k], PG_KRN|PG_RW, pfn++); } } } cr3.raw = 0UL; cr3.pml4.addr = page_nr(pml4); set_cr3(cr3.raw); }