Пример #1
0
static void ept_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
						    struct mm_struct *mm,
						    unsigned long start,
						    unsigned long end)
{
	struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn);
	int ret;
	epte_t *epte;
	unsigned long pos = start;
	bool sync_needed = false;

	pr_debug("ept: invalidate_range_start start %lx end %lx\n", start, end);

	spin_lock(&vcpu->ept_lock);
	while (pos < end) {
		ret = ept_lookup(vcpu, mm, (void *) pos, 0, 0, &epte);
		if (!ret) {
			pos += epte_big(*epte) ? HUGE_PAGE_SIZE : PAGE_SIZE;
			ept_clear_epte(epte);
			sync_needed = true;
		} else
			pos += PAGE_SIZE;
	}
	spin_unlock(&vcpu->ept_lock);

	if (sync_needed)
		vmx_ept_sync_vcpu(vcpu);
}
Пример #2
0
static void ept_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
					     struct mm_struct *mm,
					     unsigned long address)
{
	struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn);

	pr_debug("ept: invalidate_page addr %lx\n", address);

	ept_invalidate_page(vcpu, mm, address);
}
Пример #3
0
static int ept_mmu_notifier_test_young(struct mmu_notifier *mn,
				       struct mm_struct *mm,
				       unsigned long address)
{
	struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn);

	pr_debug("ept: test_young addr %lx\n", address);

	if (!vcpu->ept_ad_enabled)
		return ept_check_page_mapped(vcpu, mm, address);
	else
		return ept_check_page_accessed(vcpu, mm, address, false);
}
Пример #4
0
static int ept_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long address)
{
	struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn);

	pr_debug("ept: clear_flush_young addr %lx\n", address);

	if (!vcpu->ept_ad_enabled)
		return ept_invalidate_page(vcpu, mm, address);
	else
		return ept_check_page_accessed(vcpu, mm, address, true);
}
Пример #5
0
static void ept_mmu_notifier_change_pte(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long address,
					pte_t pte)
{
	struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn);

	pr_debug("ept: change_pte addr %lx flags %lx\n", address, pte_flags(pte));

	/*
	 * NOTE: Recent linux kernels (seen on 3.7 at least) hold a lock
	 * while calling this notifier, making it impossible to call
	 * get_user_pages_fast(). As a result, we just invalidate the
	 * page so that the mapping can be recreated later during a fault.
	 */
	ept_invalidate_page(vcpu, mm, address);
}
Пример #6
0
static int ept_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
					      struct mm_struct *mm,
					      unsigned long start,
					      unsigned long end)
{
	int ret = 0;
	struct vmx_vcpu *vcpu = mmu_notifier_to_vmx(mn);

	pr_debug("ept: clear_flush_young start %lx end %lx\n", start, end);

	if (!vcpu->ept_ad_enabled) {
		for (; start < end; start += PAGE_SIZE)
			ret |= ept_invalidate_page(vcpu, mm, start);
	} else {
		for (; start < end; start += PAGE_SIZE)
			ret |= ept_check_page_accessed(vcpu, mm, start, true);
	}

	return ret;
}