void vt_ept_updatecr3 (void) { ulong cr3, cr4; u32 tmpl, tmph; u64 tmp64; vt_paging_flush_guest_tlb (); if (!current->u.vt.lma && current->u.vt.vr.pg) { asm_vmread (VMCS_CR4_READ_SHADOW, &cr4); if (cr4 & CR4_PAE_BIT) { asm_vmread (VMCS_GUEST_CR3, &cr3); cr3 &= 0xFFFFFFE0; read_gphys_q (cr3 + 0x0, &tmp64, 0); conv64to32 (tmp64, &tmpl, &tmph); asm_vmwrite (VMCS_GUEST_PDPTE0, tmpl); asm_vmwrite (VMCS_GUEST_PDPTE0_HIGH, tmph); read_gphys_q (cr3 + 0x8, &tmp64, 0); conv64to32 (tmp64, &tmpl, &tmph); asm_vmwrite (VMCS_GUEST_PDPTE1, tmpl); asm_vmwrite (VMCS_GUEST_PDPTE1_HIGH, tmph); read_gphys_q (cr3 + 0x10, &tmp64, 0); conv64to32 (tmp64, &tmpl, &tmph); asm_vmwrite (VMCS_GUEST_PDPTE2, tmpl); asm_vmwrite (VMCS_GUEST_PDPTE2_HIGH, tmph); read_gphys_q (cr3 + 0x18, &tmp64, 0); conv64to32 (tmp64, &tmpl, &tmph); asm_vmwrite (VMCS_GUEST_PDPTE3, tmpl); asm_vmwrite (VMCS_GUEST_PDPTE3_HIGH, tmph); } } }
void vt_ept_init (void) { struct vt_ept *ept; int i; ept = alloc (sizeof *ept); alloc_page (&ept->ncr3tbl, &ept->ncr3tbl_phys); memset (ept->ncr3tbl, 0, PAGESIZE); for (i = 0; i < NUM_OF_EPTBL; i++) alloc_page (&ept->tbl[i], &ept->tbl_phys[i]); ept->cnt = 0; current->u.vt.ept = ept; asm_vmwrite (VMCS_EPT_POINTER, ept->ncr3tbl_phys | VMCS_EPT_POINTER_EPT_WB | VMCS_EPT_PAGEWALK_LENGTH_4); asm_vmwrite (VMCS_EPT_POINTER_HIGH, 0); }
static void vt_update_vmcs_guest_cr3 (void) { struct vt *p = ¤t->u.vt; if (!p->cr3exit_off) asm_vmwrite (VMCS_GUEST_CR3, p->vr.cr3); }
static void add_ip (void) { ulong ip, len; asm_vmread (VMCS_GUEST_RIP, &ip); asm_vmread (VMCS_VMEXIT_INSTRUCTION_LEN, &len); ip += len; asm_vmwrite (VMCS_GUEST_RIP, ip); }
void vt_paging_spt_setcr3 (ulong cr3) { current->u.vt.spt_cr3 = cr3; #ifdef CPU_MMU_SPT_DISABLE if (current->u.vt.vr.pg) return; #endif if (!ept_enabled ()) asm_vmwrite (VMCS_GUEST_CR3, cr3); }
void vt_paging_pg_change (void) { ulong tmp; u64 tmp64; bool ept_enable, use_spt; ulong cr3; ept_enable = ept_enabled (); use_spt = !ept_enable; #ifdef CPU_MMU_SPT_DISABLE if (current->u.vt.vr.pg) { ulong rflags; ulong acr; /* If both EPT and "unrestricted guest" were enabled, * the CS could be a data segment. But * CPU_MMU_SPT_DISABLE disables EPT while the guest * enables paging. So if the CS is a data segment * here, make it a code segment. */ if (!ept_enable || !current->u.vt.unrestricted_guest) goto cs_is_ok; asm_vmread (VMCS_GUEST_CS_ACCESS_RIGHTS, &acr); if ((acr & 0xF) != SEGDESC_TYPE_RDWR_DATA_A) goto cs_is_ok; /* The CS can be a data segment in virtual 8086 * mode. */ asm_vmread (VMCS_GUEST_RFLAGS, &rflags); if (rflags & RFLAGS_VM_BIT) goto cs_is_ok; asm_vmwrite (VMCS_GUEST_CS_ACCESS_RIGHTS, (acr & ~0xF) | SEGDESC_TYPE_EXECREAD_CODE_A); cs_is_ok: ept_enable = false; use_spt = false; } #endif if (current->u.vt.ept) { asm_vmread (VMCS_PROC_BASED_VMEXEC_CTL2, &tmp); tmp &= ~(VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_EPT_BIT | VMCS_PROC_BASED_VMEXEC_CTL2_UNRESTRICTED_GUEST_BIT); tmp |= ept_enable ? VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_EPT_BIT | (current->u.vt.unrestricted_guest ? VMCS_PROC_BASED_VMEXEC_CTL2_UNRESTRICTED_GUEST_BIT : 0) : 0; tmp |= current->u.vt.unrestricted_guest && current->u.vt.pcid_available && current->u.vt.enable_invpcid_available ? VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_INVPCID_BIT : 0; asm_vmwrite (VMCS_PROC_BASED_VMEXEC_CTL2, tmp); asm_vmread (VMCS_VMEXIT_CTL, &tmp); if (ept_enable) tmp |= (VMCS_VMEXIT_CTL_SAVE_IA32_PAT_BIT | VMCS_VMEXIT_CTL_LOAD_IA32_PAT_BIT); else tmp &= ~(VMCS_VMEXIT_CTL_SAVE_IA32_PAT_BIT | VMCS_VMEXIT_CTL_LOAD_IA32_PAT_BIT); asm_vmwrite (VMCS_VMEXIT_CTL, tmp); asm_vmread (VMCS_VMENTRY_CTL, &tmp); if (ept_enable) tmp |= VMCS_VMENTRY_CTL_LOAD_IA32_PAT_BIT; else tmp &= ~VMCS_VMENTRY_CTL_LOAD_IA32_PAT_BIT; asm_vmwrite (VMCS_VMENTRY_CTL, tmp); if (ept_enable) { asm_rdmsr64 (MSR_IA32_PAT, &tmp64); asm_vmwrite64 (VMCS_HOST_IA32_PAT, tmp64); cache_get_gpat (&tmp64); asm_vmwrite64 (VMCS_GUEST_IA32_PAT, tmp64); } } asm_vmread (VMCS_PROC_BASED_VMEXEC_CTL, &tmp); if (use_spt) tmp |= VMCS_PROC_BASED_VMEXEC_CTL_INVLPGEXIT_BIT; else tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_INVLPGEXIT_BIT; if (current->u.vt.cr3exit_controllable) { if (use_spt && current->u.vt.cr3exit_off) { cr3 = vt_read_cr3 (); tmp |= VMCS_PROC_BASED_VMEXEC_CTL_CR3LOADEXIT_BIT; tmp |= VMCS_PROC_BASED_VMEXEC_CTL_CR3STOREEXIT_BIT; current->u.vt.cr3exit_off = false; vt_write_cr3 (cr3); } else if (!use_spt && !current->u.vt.cr3exit_off) { cr3 = vt_read_cr3 (); tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_CR3LOADEXIT_BIT; tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_CR3STOREEXIT_BIT; current->u.vt.cr3exit_off = true; vt_write_cr3 (cr3); } } asm_vmwrite (VMCS_PROC_BASED_VMEXEC_CTL, tmp); tmp = vt_read_cr0 (); asm_vmwrite (VMCS_GUEST_CR0, vt_paging_apply_fixed_cr0 (tmp)); if (use_spt) asm_vmwrite (VMCS_GUEST_CR3, current->u.vt.spt_cr3); else vt_update_vmcs_guest_cr3 (); tmp = vt_read_cr4 (); asm_vmwrite (VMCS_GUEST_CR4, vt_paging_apply_fixed_cr4 (tmp)); current->u.vt.handle_pagefault = use_spt; vt_update_exception_bmp (); if (ept_enable) vt_ept_clear_all (); }