static void ept_mmu_notifier_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn); pr_debug("ept: invalidate_page addr %lx\n", address); ept_invalidate_page(vcpu, mm, address); }
static int ept_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address) { struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn); pr_debug("ept: clear_flush_young addr %lx\n", address); if (!vcpu->ept_ad_enabled) return ept_invalidate_page(vcpu, mm, address); else return ept_check_page_accessed(vcpu, mm, address, true); }
static void ept_mmu_notifier_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long address, pte_t pte) { struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn); pr_debug("ept: change_pte addr %lx flags %lx\n", address, pte_flags(pte)); /* * NOTE: Recent linux kernels (seen on 3.7 at least) hold a lock * while calling this notifier, making it impossible to call * get_user_pages_fast(). As a result, we just invalidate the * page so that the mapping can be recreated later during a fault. */ ept_invalidate_page(vcpu, mm, address); }
static int ept_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) { int ret = 0; struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn); pr_debug("ept: clear_flush_young start %lx end %lx\n", start, end); if (!vcpu->ept_ad_enabled) { for (; start < end; start += PAGE_SIZE) ret |= ept_invalidate_page(vcpu, mm, start); } else { for (; start < end; start += PAGE_SIZE) ret |= ept_check_page_accessed(vcpu, mm, start, true); } return ret; }