Пример #1
0
void
vt_paging_clear_all (void)
{
	if (current->u.vt.ept)
		vt_ept_clear_all ();
	if (!current->u.vt.unrestricted_guest)
		cpu_mmu_spt_clear_all ();
}
Пример #2
0
void
vt_ept_map_1mb (void)
{
	ulong gphys;
	vt_ept_clear_all ();
	for (gphys = 0; gphys < 0x100000; gphys += PAGESIZE) {
		mmio_lock ();
		if (!mmio_access_page (gphys, false))
			vt_ept_map_page (false, gphys);
		mmio_unlock ();
	}
}
Пример #3
0
void
vt_paging_pg_change (void)
{
	ulong tmp;
	u64 tmp64;
	bool ept_enable, use_spt;
	ulong cr3;

	ept_enable = ept_enabled ();
	use_spt = !ept_enable;
#ifdef CPU_MMU_SPT_DISABLE
	if (current->u.vt.vr.pg) {
		ulong rflags;
		ulong acr;

		/* If both EPT and "unrestricted guest" were enabled,
		 * the CS could be a data segment.  But
		 * CPU_MMU_SPT_DISABLE disables EPT while the guest
		 * enables paging.  So if the CS is a data segment
		 * here, make it a code segment. */
		if (!ept_enable || !current->u.vt.unrestricted_guest)
			goto cs_is_ok;
		asm_vmread (VMCS_GUEST_CS_ACCESS_RIGHTS, &acr);
		if ((acr & 0xF) != SEGDESC_TYPE_RDWR_DATA_A)
			goto cs_is_ok;
		/* The CS can be a data segment in virtual 8086
		 * mode. */
		asm_vmread (VMCS_GUEST_RFLAGS, &rflags);
		if (rflags & RFLAGS_VM_BIT)
			goto cs_is_ok;
		asm_vmwrite (VMCS_GUEST_CS_ACCESS_RIGHTS,
			     (acr & ~0xF) | SEGDESC_TYPE_EXECREAD_CODE_A);
	cs_is_ok:
		ept_enable = false;
		use_spt = false;
	}
#endif
	if (current->u.vt.ept) {
		asm_vmread (VMCS_PROC_BASED_VMEXEC_CTL2, &tmp);
		tmp &= ~(VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_EPT_BIT |
			 VMCS_PROC_BASED_VMEXEC_CTL2_UNRESTRICTED_GUEST_BIT);
		tmp |= ept_enable ?
			VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_EPT_BIT |
			(current->u.vt.unrestricted_guest ?
			 VMCS_PROC_BASED_VMEXEC_CTL2_UNRESTRICTED_GUEST_BIT :
			 0) : 0;
		tmp |= current->u.vt.unrestricted_guest &&
			current->u.vt.pcid_available &&
			current->u.vt.enable_invpcid_available ?
			VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_INVPCID_BIT : 0;
		asm_vmwrite (VMCS_PROC_BASED_VMEXEC_CTL2, tmp);
		asm_vmread (VMCS_VMEXIT_CTL, &tmp);
		if (ept_enable)
			tmp |= (VMCS_VMEXIT_CTL_SAVE_IA32_PAT_BIT |
				VMCS_VMEXIT_CTL_LOAD_IA32_PAT_BIT);
		else
			tmp &= ~(VMCS_VMEXIT_CTL_SAVE_IA32_PAT_BIT |
				 VMCS_VMEXIT_CTL_LOAD_IA32_PAT_BIT);
		asm_vmwrite (VMCS_VMEXIT_CTL, tmp);
		asm_vmread (VMCS_VMENTRY_CTL, &tmp);
		if (ept_enable)
			tmp |= VMCS_VMENTRY_CTL_LOAD_IA32_PAT_BIT;
		else
			tmp &= ~VMCS_VMENTRY_CTL_LOAD_IA32_PAT_BIT;
		asm_vmwrite (VMCS_VMENTRY_CTL, tmp);
		if (ept_enable) {
			asm_rdmsr64 (MSR_IA32_PAT, &tmp64);
			asm_vmwrite64 (VMCS_HOST_IA32_PAT, tmp64);
			cache_get_gpat (&tmp64);
			asm_vmwrite64 (VMCS_GUEST_IA32_PAT, tmp64);
		}
	}
	asm_vmread (VMCS_PROC_BASED_VMEXEC_CTL, &tmp);
	if (use_spt)
		tmp |= VMCS_PROC_BASED_VMEXEC_CTL_INVLPGEXIT_BIT;
	else
		tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_INVLPGEXIT_BIT;
	if (current->u.vt.cr3exit_controllable) {
		if (use_spt && current->u.vt.cr3exit_off) {
			cr3 = vt_read_cr3 ();
			tmp |= VMCS_PROC_BASED_VMEXEC_CTL_CR3LOADEXIT_BIT;
			tmp |= VMCS_PROC_BASED_VMEXEC_CTL_CR3STOREEXIT_BIT;
			current->u.vt.cr3exit_off = false;
			vt_write_cr3 (cr3);
		} else if (!use_spt && !current->u.vt.cr3exit_off) {
			cr3 = vt_read_cr3 ();
			tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_CR3LOADEXIT_BIT;
			tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_CR3STOREEXIT_BIT;
			current->u.vt.cr3exit_off = true;
			vt_write_cr3 (cr3);
		}
	}
	asm_vmwrite (VMCS_PROC_BASED_VMEXEC_CTL, tmp);
	tmp = vt_read_cr0 ();
	asm_vmwrite (VMCS_GUEST_CR0, vt_paging_apply_fixed_cr0 (tmp));
	if (use_spt)
		asm_vmwrite (VMCS_GUEST_CR3, current->u.vt.spt_cr3);
	else
		vt_update_vmcs_guest_cr3 ();
	tmp = vt_read_cr4 ();
	asm_vmwrite (VMCS_GUEST_CR4, vt_paging_apply_fixed_cr4 (tmp));
	current->u.vt.handle_pagefault = use_spt;
	vt_update_exception_bmp ();
	if (ept_enable)
		vt_ept_clear_all ();
}