static void ept_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn); int ret; epte_t *epte; unsigned long pos = start; bool sync_needed = false; pr_debug("ept: invalidate_range_start start %lx end %lx\n", start, end); spin_lock(&vcpu->ept_lock); while (pos < end) { ret = ept_lookup(vcpu, mm, (void *) pos, 0, 0, &epte); if (!ret) { pos += epte_big(*epte) ? HUGE_PAGE_SIZE : PAGE_SIZE; ept_clear_epte(epte); sync_needed = true; } else pos += PAGE_SIZE; } spin_unlock(&vcpu->ept_lock); if (sync_needed) vmx_ept_sync_vcpu(vcpu); }
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write, unsigned long gpa, unsigned long hva) { int ret; epte_t *epte, flags; struct page *page; ret = get_user_pages_fast(hva, 1, make_write, &page); if (ret != 1) { printk(KERN_ERR "ept: failed to get user page %lx\n", hva); return ret; } spin_lock(&vcpu->ept_lock); ret = ept_lookup_gpa(vcpu, (void *) gpa, PageHuge(page) ? 1 : 0, 1, &epte); if (ret) { spin_unlock(&vcpu->ept_lock); printk(KERN_ERR "ept: failed to lookup EPT entry\n"); return ret; } if (epte_present(*epte)) { if (!epte_big(*epte) && PageHuge(page)) ept_clear_l1_epte(epte); else ept_clear_epte(epte); } flags = __EPTE_READ | __EPTE_EXEC | __EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT; if (make_write) flags |= __EPTE_WRITE; if (vcpu->ept_ad_enabled) { /* premark A/D to avoid extra memory references */ flags |= __EPTE_A; if (make_write) flags |= __EPTE_D; } if (PageHuge(page)) { flags |= __EPTE_SZ; *epte = epte_addr(page_to_phys(page) & ~((1 << 21) - 1)) | flags; } else *epte = epte_addr(page_to_phys(page)) | flags; spin_unlock(&vcpu->ept_lock); return 0; }
static int ept_lookup_gpa(struct vmx_vcpu *vcpu, void *gpa, int level, int create, epte_t **epte_out) { int i; epte_t *dir = (epte_t *) __va(vcpu->ept_root); for (i = EPT_LEVELS - 1; i > level; i--) { int idx = ADDR_TO_IDX(gpa, i); if (!epte_present(dir[idx])) { void *page; if (!create) return -ENOENT; page = (void *) __get_free_page(GFP_ATOMIC); if (!page) return -ENOMEM; memset(page, 0, PAGE_SIZE); dir[idx] = epte_addr(virt_to_phys(page)) | __EPTE_FULL; } if (epte_big(dir[idx])) { if (i != 1) return -EINVAL; level = i; break; } dir = (epte_t *) epte_page_vaddr(dir[idx]); } *epte_out = &dir[ADDR_TO_IDX(gpa, level)]; return 0; }
static int ept_set_epte(struct vmx_vcpu *vcpu, int make_write, unsigned long gpa, unsigned long hva) { int ret; epte_t *epte, flags; struct page *page; unsigned huge_shift; int level; ret = get_user_pages_fast(hva, 1, make_write, &page); if (ret != 1) { ret = ept_set_pfnmap_epte(vcpu, make_write, gpa, hva); if (ret) printk(KERN_ERR "ept: failed to get user page %lx\n", hva); return ret; } spin_lock(&vcpu->ept_lock); huge_shift = compound_order(compound_head(page)) + PAGE_SHIFT; level = 0; if (huge_shift == 30) level = 2; else if (huge_shift == 21) level = 1; ret = ept_lookup_gpa(vcpu, (void *) gpa, level, 1, &epte); if (ret) { spin_unlock(&vcpu->ept_lock); put_page(page); printk(KERN_ERR "ept: failed to lookup EPT entry\n"); return ret; } if (epte_present(*epte)) { if (!epte_big(*epte) && level == 2) ept_clear_l2_epte(epte); else if (!epte_big(*epte) && level == 1) ept_clear_l1_epte(epte); else ept_clear_epte(epte); } flags = __EPTE_READ | __EPTE_EXEC | __EPTE_TYPE(EPTE_TYPE_WB) | __EPTE_IPAT; if (make_write) flags |= __EPTE_WRITE; if (vcpu->ept_ad_enabled) { /* premark A/D to avoid extra memory references */ flags |= __EPTE_A; if (make_write) flags |= __EPTE_D; } if (level) { struct page *tmp = page; page = compound_head(page); get_page(page); put_page(tmp); flags |= __EPTE_SZ; } *epte = epte_addr(page_to_phys(page)) | flags; spin_unlock(&vcpu->ept_lock); return 0; }