// Dumps guest's selectors /*_Use_decl_annotations_*/ static void VmmpDumpGuestSelectors() { HYPERPLATFORM_LOG_DEBUG_SAFE( "es %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestEsSelector), UtilVmRead(VmcsField::kGuestEsBase), UtilVmRead(VmcsField::kGuestEsLimit), UtilVmRead(VmcsField::kGuestEsArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE( "cs %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestCsSelector), UtilVmRead(VmcsField::kGuestCsBase), UtilVmRead(VmcsField::kGuestCsLimit), UtilVmRead(VmcsField::kGuestCsArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE( "ss %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestSsSelector), UtilVmRead(VmcsField::kGuestSsBase), UtilVmRead(VmcsField::kGuestSsLimit), UtilVmRead(VmcsField::kGuestSsArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE( "ds %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestDsSelector), UtilVmRead(VmcsField::kGuestDsBase), UtilVmRead(VmcsField::kGuestDsLimit), UtilVmRead(VmcsField::kGuestDsArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE( "fs %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestFsSelector), UtilVmRead(VmcsField::kGuestFsBase), UtilVmRead(VmcsField::kGuestFsLimit), UtilVmRead(VmcsField::kGuestFsArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE( "gs %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestGsSelector), UtilVmRead(VmcsField::kGuestGsBase), UtilVmRead(VmcsField::kGuestGsLimit), UtilVmRead(VmcsField::kGuestGsArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE("ld %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestLdtrSelector), UtilVmRead(VmcsField::kGuestLdtrBase), UtilVmRead(VmcsField::kGuestLdtrLimit), UtilVmRead(VmcsField::kGuestLdtrArBytes)); HYPERPLATFORM_LOG_DEBUG_SAFE( "tr %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestTrSelector), UtilVmRead(VmcsField::kGuestTrBase), UtilVmRead(VmcsField::kGuestTrLimit), UtilVmRead(VmcsField::kGuestTrArBytes)); }
// Sets rip to the next instruction _Use_decl_annotations_ static void VmmpAdjustGuestInstructionPointer( ULONG_PTR guest_ip) { const auto exit_instruction_length = UtilVmRead(VmcsField::kVmExitInstructionLen); UtilVmWrite(VmcsField::kGuestRip, guest_ip + exit_instruction_length); }
// LLDT, LTR, SLDT, and STR _Use_decl_annotations_ static void VmmpHandleLdtrOrTrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const LdtrOrTrAccessQualification exit_qualification = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))}; // Calculate an address or a register to be used for the instruction const auto displacement = UtilVmRead(VmcsField::kExitQualification); ULONG_PTR operation_address = 0; if (exit_qualification.fields.register_access) { // Register const auto register_used = VmmpSelectRegister(exit_qualification.fields.register1, guest_context); operation_address = reinterpret_cast<ULONG_PTR>(register_used); } else { // Base ULONG_PTR base_value = 0; if (!exit_qualification.fields.base_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.base_register, guest_context); base_value = *register_used; } // Index ULONG_PTR index_value = 0; if (!exit_qualification.fields.index_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.index_register, guest_context); index_value = *register_used; switch ( static_cast<GdtrOrIdtrScaling>(exit_qualification.fields.scalling)) { case GdtrOrIdtrScaling::kNoScaling: index_value = index_value; break; case GdtrOrIdtrScaling::kScaleBy2: index_value = index_value * 2; break; case GdtrOrIdtrScaling::kScaleBy4: index_value = index_value * 4; break; case GdtrOrIdtrScaling::kScaleBy8: index_value = index_value * 8; break; default: break; } } operation_address = base_value + index_value + displacement; if (static_cast<GdtrOrIdtrAaddressSize>( exit_qualification.fields.address_size) == GdtrOrIdtrAaddressSize::k32bit) { operation_address &= MAXULONG; } } // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // Emulate the instruction auto selector = reinterpret_cast<USHORT *>(operation_address); switch (static_cast<LdtrOrTrInstructionIdentity>( exit_qualification.fields.instruction_identity)) { case LdtrOrTrInstructionIdentity::kSldt: *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestLdtrSelector)); break; case LdtrOrTrInstructionIdentity::kStr: *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestTrSelector)); break; case LdtrOrTrInstructionIdentity::kLldt: UtilVmWrite(VmcsField::kGuestLdtrSelector, *selector); break; case LdtrOrTrInstructionIdentity::kLtr: UtilVmWrite(VmcsField::kGuestTrSelector, *selector); break; } __writecr3(vmm_cr3); VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// MOV to / from CRx _Use_decl_annotations_ static void VmmpHandleCrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const MovCrQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto register_used = VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context); switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) { case MovCrAccessType::kMoveToCr: { switch (exit_qualification.fields.control_register) { // CR0 <- Reg case 0: if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr0, *register_used); UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used); break; // CR3 <- Reg case 3: if (UtilIsX86Pae()) { UtilLoadPdptes(*register_used); } UtilVmWrite(VmcsField::kGuestCr3, *register_used); break; // CR4 <- Reg case 4: if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr4, *register_used); UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used); break; // CR8 <- Reg case 8: guest_context->cr8 = *register_used; break; default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); break; } } break; // Note that MOV from CRx should never cause VM-exit with the current // settings. This is just for case when you enable it. case MovCrAccessType::kMoveFromCr: { switch (exit_qualification.fields.control_register) { // Reg <- CR3 case 3: *register_used = UtilVmRead(VmcsField::kGuestCr3); break; // Reg <- CR8 case 8: *register_used = guest_context->cr8; break; default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); break; } } break; // Unimplemented case MovCrAccessType::kClts: case MovCrAccessType::kLmsw: default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// LIDT, SIDT, LGDT and SGDT _Use_decl_annotations_ static void VmmpHandleGdtrOrIdtrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const GdtrOrIdtrAccessQualification exit_qualification = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))}; // Calculate an address to be used for the instruction const auto displacement = UtilVmRead(VmcsField::kExitQualification); // Base ULONG_PTR base_value = 0; if (!exit_qualification.fields.base_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.base_register, guest_context); base_value = *register_used; } // Index ULONG_PTR index_value = 0; if (!exit_qualification.fields.index_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.index_register, guest_context); index_value = *register_used; switch ( static_cast<GdtrOrIdtrScaling>(exit_qualification.fields.scalling)) { case GdtrOrIdtrScaling::kNoScaling: index_value = index_value; break; case GdtrOrIdtrScaling::kScaleBy2: index_value = index_value * 2; break; case GdtrOrIdtrScaling::kScaleBy4: index_value = index_value * 4; break; case GdtrOrIdtrScaling::kScaleBy8: index_value = index_value * 8; break; default: break; } } auto operation_address = base_value + index_value + displacement; if (static_cast<GdtrOrIdtrAaddressSize>( exit_qualification.fields.address_size) == GdtrOrIdtrAaddressSize::k32bit) { operation_address &= MAXULONG; } // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // Emulate the instruction auto descriptor_table_reg = reinterpret_cast<Idtr *>(operation_address); switch (static_cast<GdtrOrIdtrInstructionIdentity>( exit_qualification.fields.instruction_identity)) { case GdtrOrIdtrInstructionIdentity::kSgdt: descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestGdtrBase); descriptor_table_reg->limit = static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestGdtrLimit)); break; case GdtrOrIdtrInstructionIdentity::kSidt: descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestIdtrBase); descriptor_table_reg->limit = static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestIdtrLimit)); break; case GdtrOrIdtrInstructionIdentity::kLgdt: UtilVmWrite(VmcsField::kGuestGdtrBase, descriptor_table_reg->base); UtilVmWrite(VmcsField::kGuestGdtrLimit, descriptor_table_reg->limit); break; case GdtrOrIdtrInstructionIdentity::kLidt: UtilVmWrite(VmcsField::kGuestIdtrBase, descriptor_table_reg->base); UtilVmWrite(VmcsField::kGuestIdtrLimit, descriptor_table_reg->limit); break; } __writecr3(vmm_cr3); VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// Interrupt _Use_decl_annotations_ static void VmmpHandleException( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const VmExitInterruptionInformationField exception = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrInfo))}; if (static_cast<interruption_type>(exception.fields.interruption_type) == interruption_type::kHardwareException) { // Hardware exception if (static_cast<InterruptionVector>(exception.fields.vector) == InterruptionVector::kPageFaultException) { // #PF const PageFaultErrorCode fault_code = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode))}; const auto fault_address = UtilVmRead(VmcsField::kExitQualification); VmEntryInterruptionInformationField inject = {}; inject.fields.interruption_type = exception.fields.interruption_type; inject.fields.vector = exception.fields.vector; inject.fields.deliver_error_code = true; inject.fields.valid = true; AsmWriteCR2(fault_address); UtilVmWrite(VmcsField::kVmEntryExceptionErrorCode, fault_code.all); UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all); HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #PF Fault= %p Code= 0x%2x", guest_context->ip, fault_address, fault_code); } else if (static_cast<InterruptionVector>(exception.fields.vector) == InterruptionVector::kGeneralProtectionException) { // # GP const auto error_code = static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode)); VmEntryInterruptionInformationField inject = {}; inject.fields.interruption_type = exception.fields.interruption_type; inject.fields.vector = exception.fields.vector; inject.fields.deliver_error_code = true; inject.fields.valid = true; UtilVmWrite(VmcsField::kVmEntryExceptionErrorCode, error_code); UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all); HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #GP Code= 0x%2x", guest_context->ip, error_code); } else { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); } } else if (static_cast<interruption_type>( exception.fields.interruption_type) == interruption_type::kSoftwareException) { // Software exception if (static_cast<InterruptionVector>(exception.fields.vector) == InterruptionVector::kBreakpointException) { // #BP VmEntryInterruptionInformationField inject = {}; inject.fields.interruption_type = exception.fields.interruption_type; inject.fields.vector = exception.fields.vector; inject.fields.deliver_error_code = false; inject.fields.valid = true; UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all); UtilVmWrite(VmcsField::kVmEntryInstructionLen, 1); HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #BP ", guest_context->ip); } else { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); } } else { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); } }
// Dispatches VM-exit to a corresponding handler _Use_decl_annotations_ static void VmmpHandleVmExit( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const VmExitInformation exit_reason = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitReason))}; if (kVmmpEnableRecordVmExit) { // Save them for ease of trouble shooting const auto processor = KeGetCurrentProcessorNumber(); auto &index = g_vmmp_next_history_index[processor]; auto &history = g_vmmp_vm_exit_history[processor][index]; history.gp_regs = *guest_context->gp_regs; history.ip = guest_context->ip; history.exit_reason = exit_reason; history.exit_qualification = UtilVmRead(VmcsField::kExitQualification); history.instruction_info = UtilVmRead(VmcsField::kVmxInstructionInfo); if (++index == kVmmpNumberOfRecords) { index = 0; } } switch (exit_reason.fields.reason) { case VmxExitReason::kExceptionOrNmi: VmmpHandleException(guest_context); break; case VmxExitReason::kTripleFault: VmmpHandleTripleFault(guest_context); break; case VmxExitReason::kCpuid: VmmpHandleCpuid(guest_context); break; case VmxExitReason::kInvd: VmmpHandleInvalidateInternalCaches(guest_context); break; case VmxExitReason::kInvlpg: VmmpHandleInvalidateTLBEntry(guest_context); break; case VmxExitReason::kRdtsc: VmmpHandleRdtsc(guest_context); break; case VmxExitReason::kCrAccess: VmmpHandleCrAccess(guest_context); break; case VmxExitReason::kDrAccess: VmmpHandleDrAccess(guest_context); break; case VmxExitReason::kMsrRead: VmmpHandleMsrReadAccess(guest_context); break; case VmxExitReason::kMsrWrite: VmmpHandleMsrWriteAccess(guest_context); break; case VmxExitReason::kGdtrOrIdtrAccess: VmmpHandleGdtrOrIdtrAccess(guest_context); break; case VmxExitReason::kLdtrOrTrAccess: VmmpHandleLdtrOrTrAccess(guest_context); break; case VmxExitReason::kEptViolation: VmmpHandleEptViolation(guest_context); break; case VmxExitReason::kEptMisconfig: VmmpHandleEptMisconfig(guest_context); break; case VmxExitReason::kVmcall: VmmpHandleVmCall(guest_context); break; case VmxExitReason::kVmclear: case VmxExitReason::kVmlaunch: case VmxExitReason::kVmptrld: case VmxExitReason::kVmptrst: case VmxExitReason::kVmread: case VmxExitReason::kVmresume: case VmxExitReason::kVmwrite: case VmxExitReason::kVmoff: case VmxExitReason::kVmon: VmmpHandleVmx(guest_context); break; case VmxExitReason::kRdtscp: VmmpHandleRdtscp(guest_context); break; case VmxExitReason::kXsetbv: VmmpHandleXsetbv(guest_context); break; default: VmmpHandleUnexpectedExit(guest_context); break; } }
// Handles #BP. Determinas if the #BP is caused by a shadow breakpoint, and if // so, runs its handler, switchs a page view to read/write shadow page and sets // the monitor trap flag to execute only one instruction where is located on the // read/write shadow page. Then saves the breakpoint info as the last event. _Use_decl_annotations_ bool SbpHandleBreakpoint(EptData* ept_data, void* guest_ip, GpRegisters* gp_regs) { if (!SbppIsSbpActive()) { return false; } const auto info = SbppFindPatchInfoByAddress(guest_ip); if (!info) { return false; } if (!SbppIsShadowBreakpoint(*info)) { return false; } // DdiMon is unable to handle it if (KeGetCurrentIrql() > DISPATCH_LEVEL) { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); } // VMM has to change the current CR3 to a guest's CR3 in order to access // memory address because VMM runs with System's CR3 saved in and restored // from // VmcsField::kHostCr3, while a guest's CR3 is depends on thread contexts. // Without using guest's CR3, it is likely that any use-address space is // inaccessible from a VMM ending up with a bug check. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); if (info->type == BreakpointType::kPre) { // Pre breakpoint __writecr3(guest_cr3); info->handler(*info, ept_data, gp_regs, UtilVmRead(VmcsField::kGuestRsp)); __writecr3(vmm_cr3); SbppEnablePageShadowingForRW(*info, ept_data); SbppSetMonitorTrapFlag(true); SbppSaveLastPatchInfo(*info); } else { // Post breakpoint if (info->target_tid == PsGetCurrentThreadId()) { // It is a target thread. Execute the post handler and let it continue // subsequence instructions. __writecr3(guest_cr3); info->handler(*info, ept_data, gp_regs, UtilVmRead(VmcsField::kGuestRsp)); __writecr3(vmm_cr3); SbppDisablePageShadowing(*info, ept_data); SbppDeleteBreakpointFromList(*info); } else { // It is not. Let it allow to run one instruction without breakpoint SbppEnablePageShadowingForRW(*info, ept_data); SbppSetMonitorTrapFlag(true); SbppSaveLastPatchInfo(*info); } } // Yes, it was caused by shadow breakpoint. Do not deliver the #BP to a guest. return true; }
// MOV to / from CRx _Use_decl_annotations_ static void VmmpHandleCrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const MovCrQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto register_used = VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context); switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) { case MovCrAccessType::kMoveToCr: switch (exit_qualification.fields.control_register) { // CR0 <- Reg case 0: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr0, *register_used); UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used); break; } // CR3 <- Reg case 3: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(*register_used); } UtilVmWrite(VmcsField::kGuestCr3, *register_used); break; } // CR4 <- Reg case 4: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr4, *register_used); UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used); break; } // CR8 <- Reg case 8: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); guest_context->cr8 = *register_used; break; } default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); break; } break; case MovCrAccessType::kMoveFromCr: switch (exit_qualification.fields.control_register) { // Reg <- CR3 case 3: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); *register_used = UtilVmRead(VmcsField::kGuestCr3); break; } // Reg <- CR8 case 8: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); *register_used = guest_context->cr8; break; } default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); break; } break; // Unimplemented case MovCrAccessType::kClts: case MovCrAccessType::kLmsw: default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } VmmpAdjustGuestInstructionPointer(guest_context->ip); }