// Interrupt _Use_decl_annotations_ static void VmmpHandleException( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const VmExitInterruptionInformationField exception = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrInfo))}; if (static_cast<interruption_type>(exception.fields.interruption_type) == interruption_type::kHardwareException) { // Hardware exception if (static_cast<InterruptionVector>(exception.fields.vector) == InterruptionVector::kPageFaultException) { // #PF const PageFaultErrorCode fault_code = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode))}; const auto fault_address = UtilVmRead(VmcsField::kExitQualification); VmEntryInterruptionInformationField inject = {}; inject.fields.interruption_type = exception.fields.interruption_type; inject.fields.vector = exception.fields.vector; inject.fields.deliver_error_code = true; inject.fields.valid = true; AsmWriteCR2(fault_address); UtilVmWrite(VmcsField::kVmEntryExceptionErrorCode, fault_code.all); UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all); HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #PF Fault= %p Code= 0x%2x", guest_context->ip, fault_address, fault_code); } else if (static_cast<InterruptionVector>(exception.fields.vector) == InterruptionVector::kGeneralProtectionException) { // # GP const auto error_code = static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode)); VmEntryInterruptionInformationField inject = {}; inject.fields.interruption_type = exception.fields.interruption_type; inject.fields.vector = exception.fields.vector; inject.fields.deliver_error_code = true; inject.fields.valid = true; UtilVmWrite(VmcsField::kVmEntryExceptionErrorCode, error_code); UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all); HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #GP Code= 0x%2x", guest_context->ip, error_code); } else { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); } } else if (static_cast<interruption_type>( exception.fields.interruption_type) == interruption_type::kSoftwareException) { // Software exception if (static_cast<InterruptionVector>(exception.fields.vector) == InterruptionVector::kBreakpointException) { // #BP VmEntryInterruptionInformationField inject = {}; inject.fields.interruption_type = exception.fields.interruption_type; inject.fields.vector = exception.fields.vector; inject.fields.deliver_error_code = false; inject.fields.valid = true; UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all); UtilVmWrite(VmcsField::kVmEntryInstructionLen, 1); HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #BP ", guest_context->ip); } else { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); } } else { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); } }
// Sets rip to the next instruction _Use_decl_annotations_ static void VmmpAdjustGuestInstructionPointer( ULONG_PTR guest_ip) { const auto exit_instruction_length = UtilVmRead(VmcsField::kVmExitInstructionLen); UtilVmWrite(VmcsField::kGuestRip, guest_ip + exit_instruction_length); }
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE _Use_decl_annotations_ static bool VmpSetupVmcs( const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) { PAGED_CODE(); Gdtr gdtr = {}; __sgdt(&gdtr); Idtr idtr = {}; __sidt(&idtr); // See: Algorithms for Determining VMX Capabilities const auto use_true_msrs = Ia32VmxBasicMsr{ UtilReadMsr64( Msr::kIa32VmxBasic)}.fields.vmx_capability_hint; VmxVmEntryControls vm_entryctl_requested = {}; vm_entryctl_requested.fields.load_debug_controls = true; vm_entryctl_requested.fields.ia32e_mode_guest = IsX64(); VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls, vm_entryctl_requested.all)}; VmxVmExitControls vm_exitctl_requested = {}; vm_exitctl_requested.fields.host_address_space_size = IsX64(); vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true; VmxVmExitControls vm_exitctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls, vm_exitctl_requested.all)}; VmxPinBasedControls vm_pinctl_requested = {}; VmxPinBasedControls vm_pinctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls : Msr::kIa32VmxPinbasedCtls, vm_pinctl_requested.all)}; VmxProcessorBasedControls vm_procctl_requested = {}; vm_procctl_requested.fields.invlpg_exiting = false; vm_procctl_requested.fields.rdtsc_exiting = false; vm_procctl_requested.fields.cr3_load_exiting = true; vm_procctl_requested.fields.cr8_load_exiting = false; // NB: very frequent vm_procctl_requested.fields.mov_dr_exiting = true; vm_procctl_requested.fields.use_io_bitmaps = true; vm_procctl_requested.fields.use_msr_bitmaps = true; vm_procctl_requested.fields.activate_secondary_control = true; VmxProcessorBasedControls vm_procctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls : Msr::kIa32VmxProcBasedCtls, vm_procctl_requested.all)}; VmxSecondaryProcessorBasedControls vm_procctl2_requested = {}; vm_procctl2_requested.fields.enable_ept = true; vm_procctl2_requested.fields.descriptor_table_exiting = true; vm_procctl2_requested.fields.enable_rdtscp = true; // for Win10 vm_procctl2_requested.fields.enable_vpid = true; vm_procctl2_requested.fields.enable_xsaves_xstors = true; // for Win10 VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue( Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)}; // NOTE: Comment in any of those as needed const auto exception_bitmap = // 1 << InterruptionVector::kBreakpointException | // 1 << InterruptionVector::kGeneralProtectionException | // 1 << InterruptionVector::kPageFaultException | 0; // Set up CR0 and CR4 bitmaps // - Where a bit is masked, the shadow bit appears // - Where a bit is not masked, the actual bit appears // VM-exit occurs when a guest modifies any of those fields Cr0 cr0_mask = {}; Cr0 cr0_shadow = {__readcr0()}; Cr4 cr4_mask = {}; Cr4 cr4_shadow = {__readcr4()}; // For example, when we want to hide CR4.VMXE from the guest, comment in below // cr4_mask.fields.vmxe = true; // cr4_shadow.fields.vmxe = false; // See: PDPTE Registers // If PAE paging would be in use following an execution of MOV to CR0 or MOV // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are // loaded from the address in CR3. if (UtilIsX86Pae()) { cr0_mask.fields.pg = true; cr0_mask.fields.cd = true; cr0_mask.fields.nw = true; cr4_mask.fields.pae = true; cr4_mask.fields.pge = true; cr4_mask.fields.pse = true; cr4_mask.fields.smep = true; } // clang-format off auto error = VmxStatus::kOk; /* 16-Bit Control Field */ error |= UtilVmWrite(VmcsField::kVirtualProcessorId, KeGetCurrentProcessorNumberEx(nullptr) + 1); /* 16-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES()); error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS()); error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS()); error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS()); error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS()); error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS()); error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR()); error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR()); /* 16-Bit Host-State Fields */ // RPL and TI have to be 0 error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8); error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= UtilVmWrite64(VmcsField::kIoBitmapA, UtilPaFromVa(processor_data->shared_data->io_bitmap_a)); error |= UtilVmWrite64(VmcsField::kIoBitmapB, UtilPaFromVa(processor_data->shared_data->io_bitmap_b)); error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap)); error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data)); /* 64-Bit Guest-State Fields */ error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64); error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl)); if (UtilIsX86Pae()) { UtilLoadPdptes(__readcr3()); } /* 32-Bit Control Fields */ error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all); error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all); error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap); error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all); error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all); error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all); /* 32-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit); error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit); error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* 32-Bit Host-State Field */ error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* Natural-Width Control Fields */ error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all); error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all); error |= UtilVmWrite(VmcsField::kCr0ReadShadow, cr0_shadow.all); error |= UtilVmWrite(VmcsField::kCr4ReadShadow, cr4_shadow.all); /* Natural-Width Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kGuestEsBase, 0); error |= UtilVmWrite(VmcsField::kGuestCsBase, 0); error |= UtilVmWrite(VmcsField::kGuestSsBase, 0); error |= UtilVmWrite(VmcsField::kGuestDsBase, 0); error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7)); error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer); error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer); error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags()); error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); /* Natural-Width Host-State Fields */ error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer); error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint)); // clang-format on const auto vmx_status = static_cast<VmxStatus>(error); return vmx_status == VmxStatus::kOk; }
// MOV to / from CRx _Use_decl_annotations_ static void VmmpHandleCrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const MovCrQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto register_used = VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context); switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) { case MovCrAccessType::kMoveToCr: switch (exit_qualification.fields.control_register) { // CR0 <- Reg case 0: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr0, *register_used); UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used); break; } // CR3 <- Reg case 3: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(*register_used); } UtilVmWrite(VmcsField::kGuestCr3, *register_used); break; } // CR4 <- Reg case 4: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr4, *register_used); UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used); break; } // CR8 <- Reg case 8: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); guest_context->cr8 = *register_used; break; } default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); break; } break; case MovCrAccessType::kMoveFromCr: switch (exit_qualification.fields.control_register) { // Reg <- CR3 case 3: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); *register_used = UtilVmRead(VmcsField::kGuestCr3); break; } // Reg <- CR8 case 8: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); *register_used = guest_context->cr8; break; } default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); break; } break; // Unimplemented case MovCrAccessType::kClts: case MovCrAccessType::kLmsw: default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// LLDT, LTR, SLDT, and STR _Use_decl_annotations_ static void VmmpHandleLdtrOrTrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const LdtrOrTrInstInformation exit_qualification = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))}; // Calculate an address or a register to be used for the instruction const auto displacement = UtilVmRead(VmcsField::kExitQualification); ULONG_PTR operation_address = 0; if (exit_qualification.fields.register_access) { // Register const auto register_used = VmmpSelectRegister(exit_qualification.fields.register1, guest_context); operation_address = reinterpret_cast<ULONG_PTR>(register_used); } else { // Base ULONG_PTR base_value = 0; if (!exit_qualification.fields.base_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.base_register, guest_context); base_value = *register_used; } // Index ULONG_PTR index_value = 0; if (!exit_qualification.fields.index_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.index_register, guest_context); index_value = *register_used; switch (static_cast<Scaling>(exit_qualification.fields.scalling)) { case Scaling::kNoScaling: index_value = index_value; break; case Scaling::kScaleBy2: index_value = index_value * 2; break; case Scaling::kScaleBy4: index_value = index_value * 4; break; case Scaling::kScaleBy8: index_value = index_value * 8; break; default: break; } } operation_address = base_value + index_value + displacement; if (static_cast<AddressSize>(exit_qualification.fields.address_size) == AddressSize::k32bit) { operation_address &= MAXULONG; } } // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // Emulate the instruction auto selector = reinterpret_cast<USHORT *>(operation_address); switch (static_cast<LdtrOrTrInstructionIdentity>( exit_qualification.fields.instruction_identity)) { case LdtrOrTrInstructionIdentity::kSldt: *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestLdtrSelector)); break; case LdtrOrTrInstructionIdentity::kStr: *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestTrSelector)); break; case LdtrOrTrInstructionIdentity::kLldt: UtilVmWrite(VmcsField::kGuestLdtrSelector, *selector); break; case LdtrOrTrInstructionIdentity::kLtr: UtilVmWrite(VmcsField::kGuestTrSelector, *selector); break; } __writecr3(vmm_cr3); VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// LIDT, SIDT, LGDT and SGDT _Use_decl_annotations_ static void VmmpHandleGdtrOrIdtrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const GdtrOrIdtrInstInformation exit_qualification = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))}; // Calculate an address to be used for the instruction const auto displacement = UtilVmRead(VmcsField::kExitQualification); // Base ULONG_PTR base_value = 0; if (!exit_qualification.fields.base_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.base_register, guest_context); base_value = *register_used; } // Index ULONG_PTR index_value = 0; if (!exit_qualification.fields.index_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.index_register, guest_context); index_value = *register_used; switch (static_cast<Scaling>(exit_qualification.fields.scalling)) { case Scaling::kNoScaling: index_value = index_value; break; case Scaling::kScaleBy2: index_value = index_value * 2; break; case Scaling::kScaleBy4: index_value = index_value * 4; break; case Scaling::kScaleBy8: index_value = index_value * 8; break; default: break; } } auto operation_address = base_value + index_value + displacement; if (static_cast<AddressSize>(exit_qualification.fields.address_size) == AddressSize::k32bit) { operation_address &= MAXULONG; } // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // Emulate the instruction auto descriptor_table_reg = reinterpret_cast<Idtr *>(operation_address); switch (static_cast<GdtrOrIdtrInstructionIdentity>( exit_qualification.fields.instruction_identity)) { case GdtrOrIdtrInstructionIdentity::kSgdt: descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestGdtrBase); descriptor_table_reg->limit = static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestGdtrLimit)); break; case GdtrOrIdtrInstructionIdentity::kSidt: descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestIdtrBase); descriptor_table_reg->limit = static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestIdtrLimit)); break; case GdtrOrIdtrInstructionIdentity::kLgdt: UtilVmWrite(VmcsField::kGuestGdtrBase, descriptor_table_reg->base); UtilVmWrite(VmcsField::kGuestGdtrLimit, descriptor_table_reg->limit); break; case GdtrOrIdtrInstructionIdentity::kLidt: UtilVmWrite(VmcsField::kGuestIdtrBase, descriptor_table_reg->base); UtilVmWrite(VmcsField::kGuestIdtrLimit, descriptor_table_reg->limit); break; } __writecr3(vmm_cr3); VmmpAdjustGuestInstructionPointer(guest_context->ip); }