// Creates a breakpoint object and fill out basic fields _Use_decl_annotations_ static std::unique_ptr<PatchInformation> SbppCreateBreakpoint(void* address) { auto info = std::make_unique<PatchInformation>(); auto reusable_info = SbppFindPatchByPage(address); if (reusable_info) { // Found an existing brekapoint object targetting the same page as this one. // re-use shadow pages. info->shadow_page_base_for_rw = reusable_info->shadow_page_base_for_rw; info->shadow_page_base_for_exec = reusable_info->shadow_page_base_for_exec; } else { // This breakpoint is for a page that is not currently set any breakpoint // (ie not shadowed). Creates shadow pages. info->shadow_page_base_for_rw = std::make_shared<Page>(); info->shadow_page_base_for_exec = std::make_shared<Page>(); auto page_base = PAGE_ALIGN(address); RtlCopyMemory(info->shadow_page_base_for_rw.get()->page, page_base, PAGE_SIZE); RtlCopyMemory(info->shadow_page_base_for_exec.get()->page, page_base, PAGE_SIZE); } info->patch_address = address; info->pa_base_for_rw = UtilPaFromVa(info->shadow_page_base_for_rw.get()->page); info->pa_base_for_exec = UtilPaFromVa(info->shadow_page_base_for_exec.get()->page); // Set an actual breakpoint (0xcc) onto the shadow page for EXEC SbppEmbedBreakpoint(info->shadow_page_base_for_exec.get()->page + BYTE_OFFSET(address)); return info; }
// Execute a non-image region as a test _Use_decl_annotations_ void MmonExecuteDoggyRegion() { PAGED_CODE(); #pragma prefast(suppress : 30030, "Allocating executable POOL_TYPE memory") auto code = reinterpret_cast<UCHAR *>(ExAllocatePoolWithTag( NonPagedPoolExecute, PAGE_SIZE, kHyperPlatformCommonPoolTag)); if (!code) { return; } RtlZeroMemory(code, PAGE_SIZE); HYPERPLATFORM_LOG_DEBUG("PoolCode = %p, Pa = %016llx", code, UtilPaFromVa(code)); code[0] = 0x90; // nop code[1] = 0x90; // nop if (IsX64()) { code[2] = 0xc3; // ret } else { code[2] = 0xc2; code[3] = 0x04; // retn 4 } KeInvalidateAllCaches(); // Runs code on all processors at once auto function = reinterpret_cast<PKIPI_BROADCAST_WORKER>(code); KeIpiGenericCall(function, 0); ExFreePoolWithTag(code, kHyperPlatformCommonPoolTag); }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { // Apply FIXED bits const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } UtilInveptAll(); return true; }
// Stop showing a shadow page _Use_decl_annotations_ static void SbppDisablePageShadowing( const PatchInformation& info, EptData* ept_data) { // Replace with a fake copy const auto pa_base = UtilPaFromVa(PAGE_ALIGN(info.patch_address)); const auto ept_pt_entry = EptGetEptPtEntry(ept_data, pa_base); ept_pt_entry->fields.execute_access = true; ept_pt_entry->fields.write_access = true; ept_pt_entry->fields.read_access = true; ept_pt_entry->fields.physial_address = UtilPfnFromPa(pa_base); UtilInveptAll(); }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { PAGED_CODE(); // Apply FIXED bits // See: VMX-FIXED BITS IN CR0 // IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning // Values 1 1 bit of CRx is fixed to 1 // Values 0 1 bit of CRx is flexible // Values 0 0 bit of CRx is fixed to 0 const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; Cr0 cr0_original = cr0; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0 = %08x", cr0_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1 = %08x", cr0_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR0 = %08x", cr0_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR0 = %08x", cr0.all); // See: VMX-FIXED BITS IN CR4 const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; Cr4 cr4_original = cr4; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0 = %08x", cr4_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1 = %08x", cr4_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR4 = %08x", cr4_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR4 = %08x", cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use // of the INVEPT Instruction UtilInveptGlobal(); UtilInvvpidAllContext(); return true; }
// Show a shadowed page for read and write _Use_decl_annotations_ static void SbppEnablePageShadowingForRW( const PatchInformation& info, EptData* ept_data) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address)); // Allow a guest to read and write as well as execute the address. Show the // copied page for read/write that does not have an breakpoint but reflects // all modification by a guest if that happened. ept_pt_entry->fields.write_access = true; ept_pt_entry->fields.read_access = true; ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_rw); UtilInveptAll(); }
// Show a shadowed page for execution _Use_decl_annotations_ static void SbppEnablePageShadowingForExec( const PatchInformation& info, EptData* ept_data) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address)); // Allow the VMM to redirect read and write access to the address by dening // those accesses and handling them on EPT violation ept_pt_entry->fields.write_access = false; ept_pt_entry->fields.read_access = false; // Only execute is allowed now to the adresss. Show the copied page for exec // that has an actual breakpoint to the guest. ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_exec); UtilInveptAll(); }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpInitializeVMCS( ProcessorData *processor_data) { // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmcs_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmcs_region_pa = UtilPaFromVa(processor_data->vmcs_region); if (__vmx_vmclear(&vmcs_region_pa)) { return false; } if (__vmx_vmptrld(&vmcs_region_pa)) { return false; } // The launch state of current VMCS is "clear" return true; }
// Execute a non-image region as a test _Use_decl_annotations_ void MmonExecuteDoggyRegion() { PAGED_CODE(); #pragma warning(push) #pragma warning(disable : 30030) auto code = reinterpret_cast<UCHAR *>(ExAllocatePoolWithTag( NonPagedPoolExecute, PAGE_SIZE, kHyperPlatformCommonPoolTag)); #pragma warning(pop) if (!code) { return; } RtlZeroMemory(code, PAGE_SIZE); HYPERPLATFORM_LOG_DEBUG("PoolCode = %p, Pa = %016llx", code, UtilPaFromVa(code)); code[0] = 0x90; // nop code[1] = 0x90; // nop code[2] = 0xc3; // ret MmonpInvalidateInstructionCache(code, PAGE_SIZE); auto function = reinterpret_cast<void (*)(void)>(code); function(); ExFreePoolWithTag(code, kHyperPlatformCommonPoolTag); }
// Allocate and initialize all EPT entries associated with the physical_address _Use_decl_annotations_ EptCommonEntry *EptpConstructTablesEx( EptCommonEntry *table, ULONG table_level, ULONG64 physical_address, EptData *ept_data, EptCommonEntry* reserved) { switch (table_level) { case 4: { // table == PML4 (512 GB) const auto pxe_index = EptpAddressToPxeIndex(physical_address); const auto ept_pml4_entry = &table[pxe_index]; if (!ept_pml4_entry->all) { if (!reserved) { const auto ept_pdpt = EptpAllocateEptEntry(ept_data); if (!ept_pdpt) { return nullptr; } EptpInitTableEntry(ept_pml4_entry, table_level, UtilPaFromVa(ept_pdpt)); } else { const auto ept01_pml4_entry = &reserved[pxe_index]; ept_pml4_entry->all = ept01_pml4_entry->all; reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pml4_entry->fields.physial_address)); } } return EptpConstructTablesEx( reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(ept_pml4_entry->fields.physial_address)), table_level - 1, physical_address, ept_data, reserved); } case 3: { // table == PDPT (1 GB) const auto ppe_index = EptpAddressToPpeIndex(physical_address); const auto ept_pdpt_entry = &table[ppe_index]; if (!ept_pdpt_entry->all) { if (!reserved) { const auto ept_pdt = EptpAllocateEptEntry(ept_data); if (!ept_pdt) { return nullptr; } EptpInitTableEntry(ept_pdpt_entry, table_level, UtilPaFromVa(ept_pdt)); } else { const auto ept01_pdpt_entry = &reserved[ppe_index]; ept_pdpt_entry->all = ept01_pdpt_entry->all; reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pdpt_entry->fields.physial_address)); } } return EptpConstructTablesEx( reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(ept_pdpt_entry->fields.physial_address)), table_level - 1, physical_address, ept_data, reserved); } case 2: { // table == PDT (2 MB) const auto pde_index = EptpAddressToPdeIndex(physical_address); const auto ept_pdt_entry = &table[pde_index]; if (!ept_pdt_entry->all) { if (!reserved) { const auto ept_pt = EptpAllocateEptEntry(ept_data); if (!ept_pt) { return nullptr; } EptpInitTableEntry(ept_pdt_entry, table_level, UtilPaFromVa(ept_pt)); } else { const auto ept01_pdt_entry = &reserved[pde_index]; ept_pdt_entry->all = ept01_pdt_entry->all; reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pdt_entry->fields.physial_address)); } } return EptpConstructTablesEx( reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(ept_pdt_entry->fields.physial_address)), table_level - 1, physical_address, ept_data, reserved); } case 1: { // table == PT (4 KB) const auto pte_index = EptpAddressToPteIndex(physical_address); const auto ept_pt_entry = &table[pte_index]; // NT_ASSERT(!ept_pt_entry->all); if (!ept_pt_entry->all) { if (!reserved) { EptpInitTableEntry(ept_pt_entry, table_level, physical_address); } else { const auto ept01_pt_entry = &reserved[pte_index]; ept_pt_entry->all = ept01_pt_entry->all; } } return ept_pt_entry; } default: HYPERPLATFORM_COMMON_DBG_BREAK(); return nullptr; } }
// Builds EPT, allocates pre-allocated enties, initializes and returns EptData _Use_decl_annotations_ EptData *EptInitialization() { PAGED_CODE(); static const auto kEptPageWalkLevel = 4ul; // Allocate ept_data const auto ept_data = reinterpret_cast<EptData *>(ExAllocatePoolWithTag( NonPagedPool, sizeof(EptData), kHyperPlatformCommonPoolTag)); if (!ept_data) { return nullptr; } RtlZeroMemory(ept_data, sizeof(EptData)); // Allocate EptPointer const auto ept_poiner = reinterpret_cast<EptPointer *>(ExAllocatePoolWithTag( NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag)); if (!ept_poiner) { ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } RtlZeroMemory(ept_poiner, PAGE_SIZE); // Allocate EPT_PML4 and initialize EptPointer const auto ept_pml4 = reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag( NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag)); if (!ept_pml4) { ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } RtlZeroMemory(ept_pml4, PAGE_SIZE); ept_poiner->fields.memory_type = static_cast<ULONG64>(EptpGetMemoryType(UtilPaFromVa(ept_pml4))); ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1; ept_poiner->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(ept_pml4)); // Initialize all EPT entries for all physical memory pages const auto pm_ranges = UtilGetPhysicalMemoryRanges(); for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs; ++run_index) { const auto run = &pm_ranges->run[run_index]; const auto base_addr = run->base_page * PAGE_SIZE; for (auto page_index = 0ull; page_index < run->page_count; ++page_index) { const auto indexed_addr = base_addr + page_index * PAGE_SIZE; const auto ept_pt_entry = EptpConstructTables(ept_pml4, 4, indexed_addr, nullptr); if (!ept_pt_entry) { EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } } } // Initialize an EPT entry for APIC_BASE. It is required to allocated it now // for some reasons, or else, system hangs. const Ia32ApicBaseMsr apic_msr = {UtilReadMsr64(Msr::kIa32ApicBase)}; if (!EptpConstructTables(ept_pml4, 4, apic_msr.fields.apic_base * PAGE_SIZE, nullptr)) { EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } // Allocate preallocated_entries const auto preallocated_entries_size = sizeof(EptCommonEntry *) * kEptpNumberOfPreallocatedEntries; const auto preallocated_entries = reinterpret_cast<EptCommonEntry **>( ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size, kHyperPlatformCommonPoolTag)); if (!preallocated_entries) { EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } RtlZeroMemory(preallocated_entries, preallocated_entries_size); // And fill preallocated_entries with newly created entries for (auto i = 0ul; i < kEptpNumberOfPreallocatedEntries; ++i) { const auto ept_entry = EptpAllocateEptEntry(nullptr); if (!ept_entry) { EptpFreeUnusedPreAllocatedEntries(preallocated_entries, 0); EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } preallocated_entries[i] = ept_entry; } // Initialization completed ept_data->ept_pointer = ept_poiner; ept_data->ept_pml4 = ept_pml4; ept_data->preallocated_entries = preallocated_entries; ept_data->preallocated_entries_count = 0; return ept_data; }
NTSTATUS EptpBuildNestedEpt( ULONG_PTR vmcs12_va, EptData* ept_data12, EptData* ept_data02) { do { EptCommonEntry* Pml4Entry = NULL; EptPointer* Ept02Ptr = NULL; EptPointer* Ept12Ptr = NULL; ULONG64 _Ept12Ptr = vmcs12_va; if (!vmcs12_va || !ept_data12 || !ept_data02) { break; } Ept12Ptr = (EptPointer*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptp'); if (!Ept12Ptr) { break; } RtlZeroMemory(Ept12Ptr, PAGE_SIZE); Ept02Ptr = (EptPointer*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptp'); if (!Ept02Ptr) { ExFreePool(Ept12Ptr); break; } RtlZeroMemory(Ept02Ptr, PAGE_SIZE); Pml4Entry = (EptCommonEntry*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'pml4'); if (!Pml4Entry) { ExFreePool(Ept12Ptr); ExFreePool(Ept02Ptr); break; } RtlZeroMemory(Pml4Entry, PAGE_SIZE); Ept12Ptr->all = _Ept12Ptr; Pml4Entry->fields.read_access = false; Pml4Entry->fields.execute_access = false; Pml4Entry->fields.memory_type = 0; Pml4Entry->fields.write_access = false; Ept02Ptr->fields.memory_type = static_cast<ULONG>(memory_type::kWriteBack); Ept02Ptr->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(Pml4Entry)); Ept02Ptr->fields.page_walk_length = 4 - 1; Ept02Ptr->fields.enable_accessed_and_dirty_flags = false; ept_data02->ept_pointer = Ept02Ptr; ept_data02->ept_pml4 = Pml4Entry; ept_data12->ept_pointer = Ept12Ptr; ept_data12->ept_pml4 = (EptCommonEntry*)UtilVaFromPfn(Ept12Ptr->fields.pml4_address); //vmcs0-2 with ept0-2 } while (FALSE); return STATUS_SUCCESS; }
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE _Use_decl_annotations_ static bool VmpSetupVMCS( const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) { Gdtr gdtr = {}; __sgdt(&gdtr); Idtr idtr = {}; __sidt(&idtr); // See: Algorithms for Determining VMX Capabilities const auto use_true_msrs = Ia32VmxBasicMsr{ UtilReadMsr64( Msr::kIa32VmxBasic)}.fields.vmx_capability_hint; VmxVmEntryControls vm_entryctl_requested = {}; vm_entryctl_requested.fields.ia32e_mode_guest = IsX64(); VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls, vm_entryctl_requested.all)}; VmxVmExitControls vm_exitctl_requested = {}; vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true; vm_exitctl_requested.fields.host_address_space_size = IsX64(); VmxVmExitControls vm_exitctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls, vm_exitctl_requested.all)}; VmxPinBasedControls vm_pinctl_requested = {}; VmxPinBasedControls vm_pinctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls : Msr::kIa32VmxPinbasedCtls, vm_pinctl_requested.all)}; VmxProcessorBasedControls vm_procctl_requested = {}; vm_procctl_requested.fields.invlpg_exiting = false; vm_procctl_requested.fields.rdtsc_exiting = false; vm_procctl_requested.fields.cr3_load_exiting = true; vm_procctl_requested.fields.cr8_load_exiting = false; // NB: very frequent vm_procctl_requested.fields.mov_dr_exiting = true; vm_procctl_requested.fields.use_msr_bitmaps = true; vm_procctl_requested.fields.activate_secondary_control = true; VmxProcessorBasedControls vm_procctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls : Msr::kIa32VmxProcBasedCtls, vm_procctl_requested.all)}; VmxSecondaryProcessorBasedControls vm_procctl2_requested = {}; vm_procctl2_requested.fields.enable_ept = true; vm_procctl2_requested.fields.enable_rdtscp = true; // required for Win10 vm_procctl2_requested.fields.descriptor_table_exiting = true; // required for Win10 vm_procctl2_requested.fields.enable_xsaves_xstors = true; VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue( Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)}; // Set up CR0 and CR4 bitmaps // - Where a bit is masked, the shadow bit appears // - Where a bit is not masked, the actual bit appears // VM-exit occurs when a guest modifies any of those fields Cr0 cr0_mask = {}; Cr4 cr4_mask = {}; // See: PDPTE Registers // If PAE paging would be in use following an execution of MOV to CR0 or MOV // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are // loaded from the address in CR3. if (UtilIsX86Pae()) { cr0_mask.fields.pg = true; cr0_mask.fields.cd = true; cr0_mask.fields.nw = true; cr4_mask.fields.pae = true; cr4_mask.fields.pge = true; cr4_mask.fields.pse = true; cr4_mask.fields.smep = true; } const auto exception_bitmap = // 1 << InterruptionVector::kBreakpointException | // 1 << InterruptionVector::kGeneralProtectionException | // 1 << InterruptionVector::kPageFaultException | 0; // clang-format off /* 16-Bit Control Field */ /* 16-Bit Guest-State Fields */ auto error = VmxStatus::kOk; error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES()); error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS()); error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS()); error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS()); error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS()); error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS()); error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR()); error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR()); /* 16-Bit Host-State Fields */ // RPL and TI have to be 0 error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8); error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= UtilVmWrite64(VmcsField::kIoBitmapA, 0); error |= UtilVmWrite64(VmcsField::kIoBitmapB, 0); error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap)); error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data)); /* 64-Bit Guest-State Fields */ error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64); error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl)); if (UtilIsX86Pae()) { UtilLoadPdptes(__readcr3()); } /* 32-Bit Control Fields */ error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all); error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all); error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap); error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMask, 0); error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMatch, 0); error |= UtilVmWrite(VmcsField::kCr3TargetCount, 0); error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all); error |= UtilVmWrite(VmcsField::kVmExitMsrStoreCount, 0); error |= UtilVmWrite(VmcsField::kVmExitMsrLoadCount, 0); error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all); error |= UtilVmWrite(VmcsField::kVmEntryMsrLoadCount, 0); error |= UtilVmWrite(VmcsField::kVmEntryIntrInfoField, 0); error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all); /* 32-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit); error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit); error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestInterruptibilityInfo, 0); error |= UtilVmWrite(VmcsField::kGuestActivityState, 0); error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* 32-Bit Host-State Field */ error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* Natural-Width Control Fields */ error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all); error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all); error |= UtilVmWrite(VmcsField::kCr0ReadShadow, __readcr0()); error |= UtilVmWrite(VmcsField::kCr4ReadShadow, __readcr4()); /* Natural-Width Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kGuestEsBase, 0); error |= UtilVmWrite(VmcsField::kGuestCsBase, 0); error |= UtilVmWrite(VmcsField::kGuestSsBase, 0); error |= UtilVmWrite(VmcsField::kGuestDsBase, 0); error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7)); error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer); error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer); error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags()); error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); /* Natural-Width Host-State Fields */ error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer); error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint)); // clang-format on const auto vmx_status = static_cast<VmxStatus>(error); return vmx_status == VmxStatus::kOk; }