static int ept_clear_l2_epte(epte_t *epte) { int i, j; epte_t *pmd = (epte_t *) epte_page_vaddr(*epte); if (*epte == __EPTE_NONE) return 0; for (i = 0; i < PTRS_PER_PMD; i++) { epte_t *pte = (epte_t *) epte_page_vaddr(pmd[i]); if (!epte_present(pmd[i])) continue; if (epte_flags(pmd[i]) & __EPTE_SZ) { free_ept_page(pmd[i]); continue; } for (j = 0; j < PTRS_PER_PTE; j++) { if (!epte_present(pte[j])) continue; free_ept_page(pte[j]); } free_page((uintptr_t) pte); } free_page((uintptr_t) pmd); *epte = __EPTE_NONE; return 1; }
static void vmx_free_ept(unsigned long ept_root) { epte_t *pgd = (epte_t *) __va(ept_root); int i, j, k, l; for (i = 0; i < PTRS_PER_PGD; i++) { epte_t *pud = (epte_t *) epte_page_vaddr(pgd[i]); if (!epte_present(pgd[i])) continue; for (j = 0; j < PTRS_PER_PUD; j++) { epte_t *pmd = (epte_t *) epte_page_vaddr(pud[j]); if (!epte_present(pud[j])) continue; if (epte_flags(pud[j]) & __EPTE_SZ) { free_ept_page(pud[j]); continue; } for (k = 0; k < PTRS_PER_PMD; k++) { epte_t *pte = (epte_t *) epte_page_vaddr(pmd[k]); if (!epte_present(pmd[k])) continue; if (epte_flags(pmd[k]) & __EPTE_SZ) { free_ept_page(pmd[k]); continue; } for (l = 0; l < PTRS_PER_PTE; l++) { if (!epte_present(pte[l])) continue; free_ept_page(pte[l]); } free_page((unsigned long) pte); } free_page((unsigned long) pmd); } free_page((unsigned long) pud); } free_page((unsigned long) pgd); }
static int ept_clear_l1_epte(epte_t *epte) { int i; epte_t *pte = (epte_t *) epte_page_vaddr(*epte); if (*epte == __EPTE_NONE) return 0; for (i = 0; i < PTRS_PER_PTE; i++) { if (!epte_present(pte[i])) continue; free_ept_page(pte[i]); } free_page((uintptr_t) pte); *epte = __EPTE_NONE; return 1; }
static int ept_lookup_gpa(struct vmx_vcpu *vcpu, void *gpa, int level, int create, epte_t **epte_out) { int i; epte_t *dir = (epte_t *) __va(vcpu->ept_root); for (i = EPT_LEVELS - 1; i > level; i--) { int idx = ADDR_TO_IDX(gpa, i); if (!epte_present(dir[idx])) { void *page; if (!create) return -ENOENT; page = (void *) __get_free_page(GFP_ATOMIC); if (!page) return -ENOMEM; memset(page, 0, PAGE_SIZE); dir[idx] = epte_addr(virt_to_phys(page)) | __EPTE_FULL; } if (epte_big(dir[idx])) { if (i != 1) return -EINVAL; level = i; break; } dir = (epte_t *) epte_page_vaddr(dir[idx]); } *epte_out = &dir[ADDR_TO_IDX(gpa, level)]; return 0; }
int test_ept_map(void) { struct Env *srcenv, *dstenv; struct PageInfo *pp; epte_t *epte; int r; int pp_ref; int i; epte_t* dir; /* Initialize source env */ if ((r = env_alloc(&srcenv, 0)) < 0) panic("Failed to allocate env (%d)\n", r); if (!(pp = page_alloc(ALLOC_ZERO))) panic("Failed to allocate page (%d)\n", r); if ((r = page_insert(srcenv->env_pml4e, pp, UTEMP, 0)) < 0) panic("Failed to insert page (%d)\n", r); curenv = srcenv; /* Check if sys_ept_map correctly verify the target env */ if ((r = env_alloc(&dstenv, srcenv->env_id)) < 0) panic("Failed to allocate env (%d)\n", r); if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0) cprintf("EPT map to non-guest env failed as expected (%d).\n", r); else panic("sys_ept_map success on non-guest env.\n"); /*env_destroy(dstenv);*/ if ((r = env_guest_alloc(&dstenv, srcenv->env_id)) < 0) panic("Failed to allocate guest env (%d)\n", r); dstenv->env_vmxinfo.phys_sz = (uint64_t)UTEMP + PGSIZE; /* Check if sys_ept_map can verify srcva correctly */ if ((r = _export_sys_ept_map(srcenv->env_id, (void *)UTOP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0) cprintf("EPT map from above UTOP area failed as expected (%d).\n", r); else panic("sys_ept_map from above UTOP area success\n"); if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP+1, dstenv->env_id, UTEMP, __EPTE_READ)) < 0) cprintf("EPT map from unaligned srcva failed as expected (%d).\n", r); else panic("sys_ept_map from unaligned srcva success\n"); /* Check if sys_ept_map can verify guest_pa correctly */ if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP + PGSIZE, __EPTE_READ)) < 0) cprintf("EPT map to out-of-boundary area failed as expected (%d).\n", r); else panic("sys_ept_map success on out-of-boundary area\n"); if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP-1, __EPTE_READ)) < 0) cprintf("EPT map to unaligned guest_pa failed as expected (%d).\n", r); else panic("sys_ept_map success on unaligned guest_pa\n"); /* Check if the sys_ept_map can verify the permission correctly */ if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, 0)) < 0) cprintf("EPT map with empty perm parameter failed as expected (%d).\n", r); else panic("sys_ept_map success on empty perm\n"); if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_WRITE)) < 0) cprintf("EPT map with write perm parameter failed as expected (%d).\n", r); else panic("sys_ept_map success on write perm\n"); pp_ref = pp->pp_ref; /* Check if the sys_ept_map can succeed on correct setup */ if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0) panic("Failed to do sys_ept_map (%d)\n", r); else cprintf("sys_ept_map finished normally.\n"); if (pp->pp_ref != pp_ref + 1) panic("Failed on checking pp_ref[%d]\n",pp_ref); else cprintf("pp_ref incremented correctly\n"); /* Check if the sys_ept_map can handle remapping correctly */ pp_ref = pp->pp_ref; if ((r = _export_sys_ept_map(srcenv->env_id, UTEMP, dstenv->env_id, UTEMP, __EPTE_READ)) < 0) cprintf("sys_ept_map finished normally.\n"); else panic("sys_ept_map success on remapping the same page\n"); /* Check if the sys_ept_map reset the pp_ref after failed on remapping the same page */ if (pp->pp_ref == pp_ref) cprintf("sys_ept_map handled pp_ref correctly.\n"); else panic("sys_ept_map failed to handle pp_ref.\n"); /* Check if ept_lookup_gpa can handle empty eptrt correctly */ if ((r = ept_lookup_gpa(NULL, UTEMP, 0, &epte)) < 0) cprintf("EPT lookup with a null eptrt failed as expected\n"); else panic ("ept_lookup_gpa success on null eptrt\n"); /* Check if the mapping is valid */ if ((r = ept_lookup_gpa(dstenv->env_pml4e, UTEMP, 0, &epte)) < 0) panic("Failed on ept_lookup_gpa (%d)\n", r); if (page2pa(pp) != (epte_addr(*epte))) panic("EPT mapping address mismatching (%x vs %x).\n", page2pa(pp), epte_addr(*epte)); else cprintf("EPT mapping address looks good: %x vs %x.\n", page2pa(pp), epte_addr(*epte)); /* Check if the map_hva2gpa handle the overwrite correctly */ if ((r = ept_map_hva2gpa(dstenv->env_pml4e, page2kva(pp), UTEMP, __EPTE_READ, 0)) < 0) cprintf("map_hva2gpa handle not overwriting correctly\n"); else panic("map_hva2gpa success on overwriting with non-overwrite parameter\n"); /* Check if the map_hva2gpa can map a page */ if ((r = ept_map_hva2gpa(dstenv->env_pml4e, page2kva(pp), UTEMP, __EPTE_READ, 1)) < 0) panic ("Failed on mapping a page from kva to gpa\n"); else cprintf("map_hva2gpa success on mapping a page\n"); /* Check if the map_hva2gpa set permission correctly */ if ((r = ept_lookup_gpa(dstenv->env_pml4e, UTEMP, 0, &epte)) < 0) panic("Failed on ept_lookup_gpa (%d)\n", r); if (((uint64_t)*epte & (~EPTE_ADDR)) == (__EPTE_READ | __EPTE_TYPE( EPTE_TYPE_WB ) | __EPTE_IPAT)) cprintf("map_hva2gpa success on perm check\n"); else panic("map_hva2gpa didn't set permission correctly\n"); /* Go through the extended page table to check if the immediate mappings are correct */ dir = dstenv->env_pml4e; for ( i = EPT_LEVELS - 1; i > 0; --i ) { int idx = ADDR_TO_IDX(UTEMP, i); if (!epte_present(dir[idx])) { panic("Failed to find page table item at the immediate level %d.", i); } if (!(dir[idx] & __EPTE_FULL)) { panic("Permission check failed at immediate level %d.", i); } dir = (epte_t *) epte_page_vaddr(dir[idx]); } cprintf("EPT immediate mapping check passed\n"); /* stop running after test, as this is just a test run. */ panic("Cheers! sys_ept_map seems to work correctly.\n"); return 0; }