void GetGuestState() { PHYSICAL_ADDRESS HighestAcceptableAddress; HighestAcceptableAddress.QuadPart = 0xFFFFFFFF00000000; g_GuestState.CR0 = __readcr0(); g_GuestState.CR3 = __readcr3(); g_GuestState.CR4 = __readcr4() | CR4_VMXE; g_GuestState.RFLAGS = __readeflags(); g_GuestState.Cs = __readcs(); g_GuestState.Ds = __readds(); g_GuestState.Es = __reades(); g_GuestState.Ss = __readss(); g_GuestState.Fs = __readfs(); g_GuestState.Gs = __readgs(); g_GuestState.Ldtr = __sldt(); g_GuestState.Tr = __str(); __sgdt(&(g_GuestState.Gdtr)); __sidt(&(g_GuestState.Idtr)); g_GuestState.S_CS = __readmsr(IA32_SYSENTER_CS); g_GuestState.SEIP = __readmsr(IA64_SYSENTER_EIP); g_GuestState.SESP = __readmsr(IA32_SYSENTER_ESP); g_GuestState.VMXON = MmAllocateNonCachedMemory(PAGE_SIZE); RtlZeroMemory(g_GuestState.VMXON, PAGE_SIZE); g_GuestState.VMCS = MmAllocateNonCachedMemory(PAGE_SIZE); RtlZeroMemory(g_GuestState.VMCS, PAGE_SIZE); g_GuestState.hvStack = // 分配的是非页面内存, 且保证在物理内存中是连续的, MmFreeContiguousMemory MmAllocateContiguousMemory(PAGE_SIZE * 2, HighestAcceptableAddress); RtlZeroMemory(g_GuestState.hvStack, PAGE_SIZE * 2); }
NTSTATUS ksm_init(void) { NTSTATUS status; #ifndef DBG /* This prevents loading in a nested environment. */ int info[4]; __cpuid(info, 1); if (!(info[2] & (1 << 16))) return STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR; if (__readcr4() & X86_CR4_VMXE) return STATUS_HV_FEATURE_UNAVAILABLE; #endif if (!ept_check_capabilitiy()) return STATUS_HV_FEATURE_UNAVAILABLE; if (!(__readmsr(MSR_IA32_FEATURE_CONTROL) & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)) return STATUS_HV_FEATURE_UNAVAILABLE; ksm.hotplug_cpu = KeRegisterProcessorChangeCallback(ksm_hotplug_cpu, &status, 0); if (!ksm.hotplug_cpu) return status; status = init_msr_bitmap(&ksm); if (!NT_SUCCESS(status)) return status; /* Caller cr3 (could be user) */ ksm.origin_cr3 = __readcr3(); ksm_init_phi_list(); STATIC_CALL_DPC(__call_init, &ksm); return STATIC_DPC_RET(); }
// __asm__ blocks are only checked for inline functions that end up being // emitted, so call functions with __asm__ blocks to make sure their inline // assembly parses. void f() { __movsb(0, 0, 0); __movsd(0, 0, 0); __movsw(0, 0, 0); __stosd(0, 0, 0); __stosw(0, 0, 0); #ifdef _M_X64 __movsq(0, 0, 0); __stosq(0, 0, 0); #endif int info[4]; __cpuid(info, 0); __cpuidex(info, 0, 0); _xgetbv(0); __halt(); __nop(); __readmsr(0); // FIXME: Call these in 64-bit too once the intrinsics have been fixed to // work there, PR19301 #ifndef _M_X64 __readcr3(); __writecr3(0); #endif #ifdef _M_ARM __dmb(_ARM_BARRIER_ISHST); #endif }
static NTSTATUS __ksm_init_cpu(struct ksm *k) { NTSTATUS status = set_lock_bit(); if (!NT_SUCCESS(status)) return status; k->kernel_cr3 = __readcr3(); return __vmx_vminit(vcpu_init, k) ? STATUS_SUCCESS : STATUS_UNSUCCESSFUL; }
// Perform IO instruction according with parameters _Use_decl_annotations_ static void VmmpIoWrapper(bool to_memory, bool is_string, SIZE_T size_of_access, unsigned short port, void *address, unsigned long count) { NT_ASSERT(size_of_access == 1 || size_of_access == 2 || size_of_access == 4); // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // clang-format off if (to_memory) { if (is_string) { // IN switch (size_of_access) { case 1: *reinterpret_cast<UCHAR*>(address) = __inbyte(port); break; case 2: *reinterpret_cast<USHORT*>(address) = __inword(port); break; case 4: *reinterpret_cast<ULONG*>(address) = __indword(port); break; } } else { // INS switch (size_of_access) { case 1: __inbytestring(port, reinterpret_cast<UCHAR*>(address), count); break; case 2: __inwordstring(port, reinterpret_cast<USHORT*>(address), count); break; case 4: __indwordstring(port, reinterpret_cast<ULONG*>(address), count); break; } } } else { if (is_string) { // OUT switch (size_of_access) { case 1: __outbyte(port, *reinterpret_cast<UCHAR*>(address)); break; case 2: __outword(port, *reinterpret_cast<USHORT*>(address)); break; case 4: __outdword(port, *reinterpret_cast<ULONG*>(address)); break; } } else { // OUTS switch (size_of_access) { case 1: __outbytestring(port, reinterpret_cast<UCHAR*>(address), count); break; case 2: __outwordstring(port, reinterpret_cast<USHORT*>(address), count); break; case 4: __outdwordstring(port, reinterpret_cast<ULONG*>(address), count); break; } } } // clang-format on __writecr3(vmm_cr3); }
void save_cpu_state(mon_guest_cpu_startup_state_t *s) { ia32_gdtr_t gdtr; ia32_idtr_t idtr; ia32_selector_t sel; ia32_segment_descriptor_t *desc; s->size_of_this_struct = sizeof(mon_guest_cpu_startup_state_t); s->version_of_this_struct = MON_GUEST_CPU_STARTUP_STATE_VERSION; __readgdtr(&gdtr); __sidt(&idtr); s->control.gdtr.base = (uint64_t)gdtr.base; s->control.gdtr.limit = (uint32_t)gdtr.limit; s->control.idtr.base = (uint64_t)idtr.base; s->control.idtr.limit = (uint32_t)idtr.limit; s->control.cr[IA32_CTRL_CR0] = __readcr0(); s->control.cr[IA32_CTRL_CR2] = __readcr2(); s->control.cr[IA32_CTRL_CR3] = __readcr3(); s->control.cr[IA32_CTRL_CR4] = __readcr4(); s->msr.msr_sysenter_cs = (uint32_t)__readmsr(IA32_MSR_SYSENTER_CS); s->msr.msr_sysenter_eip = __readmsr(IA32_MSR_SYSENTER_EIP); s->msr.msr_sysenter_esp = __readmsr(IA32_MSR_SYSENTER_ESP); s->msr.msr_efer = __readmsr(IA32_MSR_EFER); s->msr.msr_pat = __readmsr(IA32_MSR_PAT); s->msr.msr_debugctl = __readmsr(IA32_MSR_DEBUGCTL); s->msr.pending_exceptions = 0; s->msr.interruptibility_state = 0; s->msr.activity_state = 0; s->msr.smbase = 0; sel.sel16 = __readldtr(); if (sel.bits.index != 0) { return; } s->seg.segment[IA32_SEG_LDTR].attributes = 0x00010000; s->seg.segment[IA32_SEG_TR].attributes = 0x0000808b; s->seg.segment[IA32_SEG_TR].limit = 0xffffffff; save_segment_data((uint16_t)__readcs(), &s->seg.segment[IA32_SEG_CS]); save_segment_data((uint16_t)__readds(), &s->seg.segment[IA32_SEG_DS]); save_segment_data((uint16_t)__reades(), &s->seg.segment[IA32_SEG_ES]); save_segment_data((uint16_t)__readfs(), &s->seg.segment[IA32_SEG_FS]); save_segment_data((uint16_t)__readgs(), &s->seg.segment[IA32_SEG_GS]); save_segment_data((uint16_t)__readss(), &s->seg.segment[IA32_SEG_SS]); return; }
void TSS::Init(GDT * gdt) { m_back_link = 0; //m_esp0 = KERNEL_STACK_TOP; m_ss0 = SEL_KERNEL_CODE; m_cr3 = __readcr3();//登记CR3(PDBR) m_eip = 0;//(uint32)UserProcess; m_eflags = 0x00000202; m_ldt = 0;//没有LDT。处理器允许没有LDT的任务 m_trap = 0; m_iobase = 103;//没有I/O位图。0特权级事实上不需要。 //创建程序管理器的TSS描述符,并安装到GDT中 printf("TSS Init() OK\n"); Register(gdt); }
INT32 ShvLoad ( VOID ) { SHV_CALLBACK_CONTEXT callbackContext; // // Attempt to enter VMX root mode on all logical processors. This will // broadcast a DPC interrupt which will execute the callback routine in // parallel on the LPs. Send the callback routine the physical address of // the PML4 of the system process, which is what this driver entrypoint // should be executing in. // callbackContext.Cr3 = __readcr3(); callbackContext.FailureStatus = SHV_STATUS_SUCCESS; callbackContext.FailedCpu = -1; callbackContext.InitCount = 0; ShvOsRunCallbackOnProcessors(ShvVpLoadCallback, &callbackContext); // // Check if all LPs are now hypervised. Return the failure code of at least // one of them. // // Note that each VP is responsible for freeing its VP data on failure. // if (callbackContext.InitCount != ShvOsGetActiveProcessorCount()) { ShvOsDebugPrint("The SHV failed to initialize (0x%lX) Failed CPU: %d\n", callbackContext.FailureStatus, callbackContext.FailedCpu); return callbackContext.FailureStatus; } // // Indicate success. // ShvOsDebugPrint("The SHV has been installed.\n"); return SHV_STATUS_SUCCESS; }
_Use_decl_annotations_ EXTERN_C static bool VminitpSetupVMCS( const PER_PROCESSOR_DATA *ProcessorData, ULONG_PTR GuestStackPointer, ULONG_PTR GuestInstructionPointer, ULONG_PTR VmmStackPointer) { unsigned char error = 0; GDTR gdtr = {}; __sgdt(&gdtr); IDTR idtr = {}; __sidt(&idtr); VMX_VM_ENTER_CONTROLS vmEnterCtlRequested = {}; vmEnterCtlRequested.Fields.IA32eModeGuest = true; VMX_VM_ENTER_CONTROLS vmEnterCtl = { VminitpAdjustControlValue(IA32_VMX_ENTRY_CTLS, vmEnterCtlRequested.All)}; VMX_VM_EXIT_CONTROLS vmExitCtlRequested = {}; vmExitCtlRequested.Fields.AcknowledgeInterruptOnExit = true; vmExitCtlRequested.Fields.HostAddressSpaceSize = true; VMX_VM_EXIT_CONTROLS vmExitCtl = { VminitpAdjustControlValue(IA32_VMX_EXIT_CTLS, vmExitCtlRequested.All)}; VMX_PIN_BASED_CONTROLS vmPinCtlRequested = {}; VMX_PIN_BASED_CONTROLS vmPinCtl = { VminitpAdjustControlValue(IA32_VMX_PINBASED_CTLS, vmPinCtlRequested.All)}; VMX_CPU_BASED_CONTROLS vmCpuCtlRequested = {}; vmCpuCtlRequested.Fields.RDTSCExiting = true; vmCpuCtlRequested.Fields.CR3LoadExiting = true; // MOV to CR3 vmCpuCtlRequested.Fields.CR8LoadExiting = true; // MOV to CR8 vmCpuCtlRequested.Fields.MovDRExiting = true; vmCpuCtlRequested.Fields.UseMSRBitmaps = true; vmCpuCtlRequested.Fields.ActivateSecondaryControl = true; VMX_CPU_BASED_CONTROLS vmCpuCtl = {VminitpAdjustControlValue( IA32_VMX_PROCBASED_CTLS, vmCpuCtlRequested.All)}; VMX_SECONDARY_CPU_BASED_CONTROLS vmCpuCtl2Requested = {}; vmCpuCtl2Requested.Fields.EnableRDTSCP = true; vmCpuCtl2Requested.Fields.DescriptorTableExiting = true; VMX_CPU_BASED_CONTROLS vmCpuCtl2 = {VminitpAdjustControlValue( IA32_VMX_PROCBASED_CTLS2, vmCpuCtl2Requested.All)}; // Set up the MSR bitmap // Activate VM-exit for RDMSR against all MSRs const auto bitMapReadLow = reinterpret_cast<UCHAR *>(ProcessorData->MsrBitmap); const auto bitMapReadHigh = bitMapReadLow + 1024; RtlFillMemory(bitMapReadLow, 1024, 0xff); // read 0 - 1fff RtlFillMemory(bitMapReadHigh, 1024, 0xff); // read c0000000 - c0001fff // But ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8) RTL_BITMAP bitMapReadLowHeader = {}; RtlInitializeBitMap(&bitMapReadLowHeader, reinterpret_cast<PULONG>(bitMapReadLow), 1024 * 8); RtlClearBits(&bitMapReadLowHeader, 0xe7, 2); // But ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102) RTL_BITMAP bitMapReadHighHeader = {}; RtlInitializeBitMap(&bitMapReadHighHeader, reinterpret_cast<PULONG>(bitMapReadHigh), 1024 * 8); RtlClearBits(&bitMapReadHighHeader, 0x101, 2); const auto msrBitmapPA = MmGetPhysicalAddress(ProcessorData->MsrBitmap); // Set up CR0 and CR4 bitmaps // Where a bit is masked, the shadow bit appears // Where a bit is not masked, the actual bit appears CR0_REG cr0mask = {}; cr0mask.Fields.WP = true; CR4_REG cr4mask = {}; cr4mask.Fields.PGE = true; // clang-format off /* 16-Bit Control Field */ /* 16-Bit Guest-State Fields */ error |= __vmx_vmwrite(GUEST_ES_SELECTOR, AsmReadES()); error |= __vmx_vmwrite(GUEST_CS_SELECTOR, AsmReadCS()); error |= __vmx_vmwrite(GUEST_SS_SELECTOR, AsmReadSS()); error |= __vmx_vmwrite(GUEST_DS_SELECTOR, AsmReadDS()); error |= __vmx_vmwrite(GUEST_FS_SELECTOR, AsmReadFS()); error |= __vmx_vmwrite(GUEST_GS_SELECTOR, AsmReadGS()); error |= __vmx_vmwrite(GUEST_LDTR_SELECTOR, AsmReadLDTR()); error |= __vmx_vmwrite(GUEST_TR_SELECTOR, AsmReadTR()); /* 16-Bit Host-State Fields */ error |= __vmx_vmwrite(HOST_ES_SELECTOR, AsmReadES() & 0xf8); // RPL and TI error |= __vmx_vmwrite(HOST_CS_SELECTOR, AsmReadCS() & 0xf8); // have to be 0 error |= __vmx_vmwrite(HOST_SS_SELECTOR, AsmReadSS() & 0xf8); error |= __vmx_vmwrite(HOST_DS_SELECTOR, AsmReadDS() & 0xf8); error |= __vmx_vmwrite(HOST_FS_SELECTOR, AsmReadFS() & 0xf8); error |= __vmx_vmwrite(HOST_GS_SELECTOR, AsmReadGS() & 0xf8); error |= __vmx_vmwrite(HOST_TR_SELECTOR, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= __vmx_vmwrite(IO_BITMAP_A, 0); error |= __vmx_vmwrite(IO_BITMAP_B, 0); error |= __vmx_vmwrite(MSR_BITMAP, msrBitmapPA.QuadPart); error |= __vmx_vmwrite(TSC_OFFSET, 0); /* 64-Bit Guest-State Fields */ error |= __vmx_vmwrite(VMCS_LINK_POINTER, 0xffffffffffffffff); error |= __vmx_vmwrite(GUEST_IA32_DEBUGCTL, __readmsr(IA32_DEBUGCTL)); /* 32-Bit Control Fields */ error |= __vmx_vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmPinCtl.All); error |= __vmx_vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmCpuCtl.All); error |= __vmx_vmwrite(SECONDARY_VM_EXEC_CONTROL, vmCpuCtl2.All); error |= __vmx_vmwrite(EXCEPTION_BITMAP, 0); error |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); error |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0); error |= __vmx_vmwrite(CR3_TARGET_COUNT, 0); error |= __vmx_vmwrite(VM_EXIT_CONTROLS, vmExitCtl.All); error |= __vmx_vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); error |= __vmx_vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); error |= __vmx_vmwrite(VM_ENTRY_CONTROLS, vmEnterCtl.All); error |= __vmx_vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); error |= __vmx_vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); /* 32-Bit Guest-State Fields */ error |= __vmx_vmwrite(GUEST_ES_LIMIT, GetSegmentLimit(AsmReadES())); error |= __vmx_vmwrite(GUEST_CS_LIMIT, GetSegmentLimit(AsmReadCS())); error |= __vmx_vmwrite(GUEST_SS_LIMIT, GetSegmentLimit(AsmReadSS())); error |= __vmx_vmwrite(GUEST_DS_LIMIT, GetSegmentLimit(AsmReadDS())); error |= __vmx_vmwrite(GUEST_FS_LIMIT, GetSegmentLimit(AsmReadFS())); error |= __vmx_vmwrite(GUEST_GS_LIMIT, GetSegmentLimit(AsmReadGS())); error |= __vmx_vmwrite(GUEST_LDTR_LIMIT, GetSegmentLimit(AsmReadLDTR())); error |= __vmx_vmwrite(GUEST_TR_LIMIT, GetSegmentLimit(AsmReadTR())); error |= __vmx_vmwrite(GUEST_GDTR_LIMIT, gdtr.Limit); error |= __vmx_vmwrite(GUEST_IDTR_LIMIT, idtr.Limit); error |= __vmx_vmwrite(GUEST_ES_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadES())); error |= __vmx_vmwrite(GUEST_CS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadCS())); error |= __vmx_vmwrite(GUEST_SS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadSS())); error |= __vmx_vmwrite(GUEST_DS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadDS())); error |= __vmx_vmwrite(GUEST_FS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadFS())); error |= __vmx_vmwrite(GUEST_GS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadGS())); error |= __vmx_vmwrite(GUEST_LDTR_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadLDTR())); error |= __vmx_vmwrite(GUEST_TR_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadTR())); error |= __vmx_vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); error |= __vmx_vmwrite(GUEST_ACTIVITY_STATE, 0); error |= __vmx_vmwrite(GUEST_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS)); /* 32-Bit Host-State Field */ error |= __vmx_vmwrite(HOST_IA32_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS)); /* Natural-Width Control Fields */ error |= __vmx_vmwrite(CR0_GUEST_HOST_MASK, cr0mask.All); error |= __vmx_vmwrite(CR4_GUEST_HOST_MASK, cr4mask.All); error |= __vmx_vmwrite(CR0_READ_SHADOW, __readcr0()); error |= __vmx_vmwrite(CR4_READ_SHADOW, __readcr4()); error |= __vmx_vmwrite(CR3_TARGET_VALUE0, 0); error |= __vmx_vmwrite(CR3_TARGET_VALUE1, 0); error |= __vmx_vmwrite(CR3_TARGET_VALUE2, 0); error |= __vmx_vmwrite(CR3_TARGET_VALUE3, 0); /* Natural-Width Guest-State Fields */ error |= __vmx_vmwrite(GUEST_CR0, __readcr0()); error |= __vmx_vmwrite(GUEST_CR3, __readcr3()); error |= __vmx_vmwrite(GUEST_CR4, __readcr4()); error |= __vmx_vmwrite(GUEST_ES_BASE, 0); error |= __vmx_vmwrite(GUEST_CS_BASE, 0); error |= __vmx_vmwrite(GUEST_SS_BASE, 0); error |= __vmx_vmwrite(GUEST_DS_BASE, 0); error |= __vmx_vmwrite(GUEST_FS_BASE, __readmsr(IA32_FS_BASE)); error |= __vmx_vmwrite(GUEST_GS_BASE, __readmsr(IA32_GS_BASE)); error |= __vmx_vmwrite(GUEST_LDTR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadLDTR())); error |= __vmx_vmwrite(GUEST_TR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadTR())); error |= __vmx_vmwrite(GUEST_GDTR_BASE, gdtr.Address); error |= __vmx_vmwrite(GUEST_IDTR_BASE, idtr.Address); error |= __vmx_vmwrite(GUEST_DR7, __readdr(7)); error |= __vmx_vmwrite(GUEST_RSP, GuestStackPointer); error |= __vmx_vmwrite(GUEST_RIP, GuestInstructionPointer); error |= __vmx_vmwrite(GUEST_RFLAGS, __readeflags()); error |= __vmx_vmwrite(GUEST_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP)); error |= __vmx_vmwrite(GUEST_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP)); /* Natural-Width Host-State Fields */ error |= __vmx_vmwrite(HOST_CR0, __readcr0()); error |= __vmx_vmwrite(HOST_CR3, __readcr3()); error |= __vmx_vmwrite(HOST_CR4, __readcr4()); error |= __vmx_vmwrite(HOST_FS_BASE, __readmsr(IA32_FS_BASE)); error |= __vmx_vmwrite(HOST_GS_BASE, __readmsr(IA32_GS_BASE)); error |= __vmx_vmwrite(HOST_TR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadTR())); error |= __vmx_vmwrite(HOST_GDTR_BASE, gdtr.Address); error |= __vmx_vmwrite(HOST_IDTR_BASE, idtr.Address); error |= __vmx_vmwrite(HOST_IA32_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP)); error |= __vmx_vmwrite(HOST_IA32_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP)); error |= __vmx_vmwrite(HOST_RSP, VmmStackPointer); error |= __vmx_vmwrite(HOST_RIP, reinterpret_cast<size_t>(AsmVmmEntryPoint)); // clang-format on const auto vmxStatus = static_cast<VMX_STATUS>(error); return vmxStatus == VMX_OK; }
// LLDT, LTR, SLDT, and STR _Use_decl_annotations_ static void VmmpHandleLdtrOrTrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const LdtrOrTrAccessQualification exit_qualification = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))}; // Calculate an address or a register to be used for the instruction const auto displacement = UtilVmRead(VmcsField::kExitQualification); ULONG_PTR operation_address = 0; if (exit_qualification.fields.register_access) { // Register const auto register_used = VmmpSelectRegister(exit_qualification.fields.register1, guest_context); operation_address = reinterpret_cast<ULONG_PTR>(register_used); } else { // Base ULONG_PTR base_value = 0; if (!exit_qualification.fields.base_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.base_register, guest_context); base_value = *register_used; } // Index ULONG_PTR index_value = 0; if (!exit_qualification.fields.index_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.index_register, guest_context); index_value = *register_used; switch ( static_cast<GdtrOrIdtrScaling>(exit_qualification.fields.scalling)) { case GdtrOrIdtrScaling::kNoScaling: index_value = index_value; break; case GdtrOrIdtrScaling::kScaleBy2: index_value = index_value * 2; break; case GdtrOrIdtrScaling::kScaleBy4: index_value = index_value * 4; break; case GdtrOrIdtrScaling::kScaleBy8: index_value = index_value * 8; break; default: break; } } operation_address = base_value + index_value + displacement; if (static_cast<GdtrOrIdtrAaddressSize>( exit_qualification.fields.address_size) == GdtrOrIdtrAaddressSize::k32bit) { operation_address &= MAXULONG; } } // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // Emulate the instruction auto selector = reinterpret_cast<USHORT *>(operation_address); switch (static_cast<LdtrOrTrInstructionIdentity>( exit_qualification.fields.instruction_identity)) { case LdtrOrTrInstructionIdentity::kSldt: *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestLdtrSelector)); break; case LdtrOrTrInstructionIdentity::kStr: *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestTrSelector)); break; case LdtrOrTrInstructionIdentity::kLldt: UtilVmWrite(VmcsField::kGuestLdtrSelector, *selector); break; case LdtrOrTrInstructionIdentity::kLtr: UtilVmWrite(VmcsField::kGuestTrSelector, *selector); break; } __writecr3(vmm_cr3); VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// LIDT, SIDT, LGDT and SGDT _Use_decl_annotations_ static void VmmpHandleGdtrOrIdtrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const GdtrOrIdtrAccessQualification exit_qualification = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))}; // Calculate an address to be used for the instruction const auto displacement = UtilVmRead(VmcsField::kExitQualification); // Base ULONG_PTR base_value = 0; if (!exit_qualification.fields.base_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.base_register, guest_context); base_value = *register_used; } // Index ULONG_PTR index_value = 0; if (!exit_qualification.fields.index_register_invalid) { const auto register_used = VmmpSelectRegister( exit_qualification.fields.index_register, guest_context); index_value = *register_used; switch ( static_cast<GdtrOrIdtrScaling>(exit_qualification.fields.scalling)) { case GdtrOrIdtrScaling::kNoScaling: index_value = index_value; break; case GdtrOrIdtrScaling::kScaleBy2: index_value = index_value * 2; break; case GdtrOrIdtrScaling::kScaleBy4: index_value = index_value * 4; break; case GdtrOrIdtrScaling::kScaleBy8: index_value = index_value * 8; break; default: break; } } auto operation_address = base_value + index_value + displacement; if (static_cast<GdtrOrIdtrAaddressSize>( exit_qualification.fields.address_size) == GdtrOrIdtrAaddressSize::k32bit) { operation_address &= MAXULONG; } // Update CR3 with that of the guest since below code is going to access // memory. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); __writecr3(guest_cr3); // Emulate the instruction auto descriptor_table_reg = reinterpret_cast<Idtr *>(operation_address); switch (static_cast<GdtrOrIdtrInstructionIdentity>( exit_qualification.fields.instruction_identity)) { case GdtrOrIdtrInstructionIdentity::kSgdt: descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestGdtrBase); descriptor_table_reg->limit = static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestGdtrLimit)); break; case GdtrOrIdtrInstructionIdentity::kSidt: descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestIdtrBase); descriptor_table_reg->limit = static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestIdtrLimit)); break; case GdtrOrIdtrInstructionIdentity::kLgdt: UtilVmWrite(VmcsField::kGuestGdtrBase, descriptor_table_reg->base); UtilVmWrite(VmcsField::kGuestGdtrLimit, descriptor_table_reg->limit); break; case GdtrOrIdtrInstructionIdentity::kLidt: UtilVmWrite(VmcsField::kGuestIdtrBase, descriptor_table_reg->base); UtilVmWrite(VmcsField::kGuestIdtrLimit, descriptor_table_reg->limit); break; } __writecr3(vmm_cr3); VmmpAdjustGuestInstructionPointer(guest_context->ip); }
NTSTATUS DriverEntry (IN PDRIVER_OBJECT DriverObject, IN PUNICODE_STRING RegistryPath) { UNICODE_STRING DeviceName, DeviceLink; NTSTATUS NtStatus; PDEVICE_OBJECT DeviceObject = NULL; PDEVICE_EXTENSION extension; WinDbgPrint("WinPMEM - " PMEM_VERSION " - Physical memory acquisition\n"); #if PMEM_WRITE_ENABLED == 1 WinDbgPrint("WinPMEM write support available!"); #endif WinDbgPrint("Copyright (c) 2017, Michael Cohen <*****@*****.**>\n"); // Initialize import tables: if(PmemGetProcAddresses() != STATUS_SUCCESS) { WinDbgPrint("Failed to initialize import table. Aborting.\n"); goto error; }; RtlInitUnicodeString (&DeviceName, L"\\Device\\" PMEM_DEVICE_NAME); // We create our secure device. // http://msdn.microsoft.com/en-us/library/aa490540.aspx NtStatus = IoCreateDeviceSecure(DriverObject, sizeof(DEVICE_EXTENSION), &DeviceName, FILE_DEVICE_UNKNOWN, FILE_DEVICE_SECURE_OPEN, FALSE, &SDDL_DEVOBJ_SYS_ALL_ADM_ALL, &GUID_DEVCLASS_PMEM_DUMPER, &DeviceObject); if (!NT_SUCCESS(NtStatus)) { WinDbgPrint ("IoCreateDevice failed. => %08X\n", NtStatus); return NtStatus; } DriverObject->MajorFunction[IRP_MJ_CREATE] = wddCreate; DriverObject->MajorFunction[IRP_MJ_CLOSE] = wddClose; DriverObject->MajorFunction[IRP_MJ_DEVICE_CONTROL] = wddDispatchDeviceControl; DriverObject->MajorFunction[IRP_MJ_READ] = PmemRead; #if PMEM_WRITE_ENABLED == 1 { // Make sure that the drivers with write support are clearly marked as such. static char TAG[] = "Write Supported"; } // Support writing. DriverObject->MajorFunction[IRP_MJ_WRITE] = PmemWrite; #endif DriverObject->DriverUnload = IoUnload; // Use buffered IO - a bit slower but simpler to implement, and more // efficient for small reads. SetFlag(DeviceObject->Flags, DO_BUFFERED_IO ); ClearFlag(DeviceObject->Flags, DO_DIRECT_IO ); ClearFlag(DeviceObject->Flags, DO_DEVICE_INITIALIZING); RtlInitUnicodeString (&DeviceLink, L"\\DosDevices\\" PMEM_DEVICE_NAME); NtStatus = IoCreateSymbolicLink (&DeviceLink, &DeviceName); if (!NT_SUCCESS(NtStatus)) { WinDbgPrint ("IoCreateSymbolicLink failed. => %08X\n", NtStatus); IoDeleteDevice (DeviceObject); } // Populate globals in kernel context. CR3.QuadPart = __readcr3(); // Initialize the device extension with safe defaults. extension = DeviceObject->DeviceExtension; extension->mode = ACQUISITION_MODE_PHYSICAL_MEMORY; extension->MemoryHandle = 0; #if _WIN64 // Disable pte mapping for 32 bit systems. extension->pte_mmapper = pte_mmap_windows_new(); extension->pte_mmapper->loglevel = PTE_ERR; extension->mode = ACQUISITION_MODE_PTE_MMAP; #else extension->pte_mmapper = NULL; #endif WinDbgPrint("Driver intialization completed."); return NtStatus; error: return STATUS_UNSUCCESSFUL; }
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE _Use_decl_annotations_ static bool VmpSetupVMCS( const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) { Gdtr gdtr = {}; __sgdt(&gdtr); Idtr idtr = {}; __sidt(&idtr); // See: Algorithms for Determining VMX Capabilities const auto use_true_msrs = Ia32VmxBasicMsr{ UtilReadMsr64( Msr::kIa32VmxBasic)}.fields.vmx_capability_hint; VmxVmEntryControls vm_entryctl_requested = {}; vm_entryctl_requested.fields.ia32e_mode_guest = IsX64(); VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls, vm_entryctl_requested.all)}; VmxVmExitControls vm_exitctl_requested = {}; vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true; vm_exitctl_requested.fields.host_address_space_size = IsX64(); VmxVmExitControls vm_exitctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls, vm_exitctl_requested.all)}; VmxPinBasedControls vm_pinctl_requested = {}; VmxPinBasedControls vm_pinctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls : Msr::kIa32VmxPinbasedCtls, vm_pinctl_requested.all)}; VmxProcessorBasedControls vm_procctl_requested = {}; vm_procctl_requested.fields.invlpg_exiting = false; vm_procctl_requested.fields.rdtsc_exiting = false; vm_procctl_requested.fields.cr3_load_exiting = true; vm_procctl_requested.fields.cr8_load_exiting = false; // NB: very frequent vm_procctl_requested.fields.mov_dr_exiting = true; vm_procctl_requested.fields.use_msr_bitmaps = true; vm_procctl_requested.fields.activate_secondary_control = true; VmxProcessorBasedControls vm_procctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls : Msr::kIa32VmxProcBasedCtls, vm_procctl_requested.all)}; VmxSecondaryProcessorBasedControls vm_procctl2_requested = {}; vm_procctl2_requested.fields.enable_ept = true; vm_procctl2_requested.fields.enable_rdtscp = true; // required for Win10 vm_procctl2_requested.fields.descriptor_table_exiting = true; // required for Win10 vm_procctl2_requested.fields.enable_xsaves_xstors = true; VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue( Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)}; // Set up CR0 and CR4 bitmaps // - Where a bit is masked, the shadow bit appears // - Where a bit is not masked, the actual bit appears // VM-exit occurs when a guest modifies any of those fields Cr0 cr0_mask = {}; Cr4 cr4_mask = {}; // See: PDPTE Registers // If PAE paging would be in use following an execution of MOV to CR0 or MOV // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are // loaded from the address in CR3. if (UtilIsX86Pae()) { cr0_mask.fields.pg = true; cr0_mask.fields.cd = true; cr0_mask.fields.nw = true; cr4_mask.fields.pae = true; cr4_mask.fields.pge = true; cr4_mask.fields.pse = true; cr4_mask.fields.smep = true; } const auto exception_bitmap = // 1 << InterruptionVector::kBreakpointException | // 1 << InterruptionVector::kGeneralProtectionException | // 1 << InterruptionVector::kPageFaultException | 0; // clang-format off /* 16-Bit Control Field */ /* 16-Bit Guest-State Fields */ auto error = VmxStatus::kOk; error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES()); error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS()); error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS()); error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS()); error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS()); error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS()); error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR()); error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR()); /* 16-Bit Host-State Fields */ // RPL and TI have to be 0 error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8); error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= UtilVmWrite64(VmcsField::kIoBitmapA, 0); error |= UtilVmWrite64(VmcsField::kIoBitmapB, 0); error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap)); error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data)); /* 64-Bit Guest-State Fields */ error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64); error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl)); if (UtilIsX86Pae()) { UtilLoadPdptes(__readcr3()); } /* 32-Bit Control Fields */ error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all); error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all); error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap); error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMask, 0); error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMatch, 0); error |= UtilVmWrite(VmcsField::kCr3TargetCount, 0); error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all); error |= UtilVmWrite(VmcsField::kVmExitMsrStoreCount, 0); error |= UtilVmWrite(VmcsField::kVmExitMsrLoadCount, 0); error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all); error |= UtilVmWrite(VmcsField::kVmEntryMsrLoadCount, 0); error |= UtilVmWrite(VmcsField::kVmEntryIntrInfoField, 0); error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all); /* 32-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit); error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit); error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestInterruptibilityInfo, 0); error |= UtilVmWrite(VmcsField::kGuestActivityState, 0); error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* 32-Bit Host-State Field */ error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* Natural-Width Control Fields */ error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all); error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all); error |= UtilVmWrite(VmcsField::kCr0ReadShadow, __readcr0()); error |= UtilVmWrite(VmcsField::kCr4ReadShadow, __readcr4()); /* Natural-Width Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kGuestEsBase, 0); error |= UtilVmWrite(VmcsField::kGuestCsBase, 0); error |= UtilVmWrite(VmcsField::kGuestSsBase, 0); error |= UtilVmWrite(VmcsField::kGuestDsBase, 0); error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7)); error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer); error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer); error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags()); error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); /* Natural-Width Host-State Fields */ error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer); error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint)); // clang-format on const auto vmx_status = static_cast<VmxStatus>(error); return vmx_status == VmxStatus::kOk; }
// Handles #BP. Determinas if the #BP is caused by a shadow breakpoint, and if // so, runs its handler, switchs a page view to read/write shadow page and sets // the monitor trap flag to execute only one instruction where is located on the // read/write shadow page. Then saves the breakpoint info as the last event. _Use_decl_annotations_ bool SbpHandleBreakpoint(EptData* ept_data, void* guest_ip, GpRegisters* gp_regs) { if (!SbppIsSbpActive()) { return false; } const auto info = SbppFindPatchInfoByAddress(guest_ip); if (!info) { return false; } if (!SbppIsShadowBreakpoint(*info)) { return false; } // DdiMon is unable to handle it if (KeGetCurrentIrql() > DISPATCH_LEVEL) { HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); } // VMM has to change the current CR3 to a guest's CR3 in order to access // memory address because VMM runs with System's CR3 saved in and restored // from // VmcsField::kHostCr3, while a guest's CR3 is depends on thread contexts. // Without using guest's CR3, it is likely that any use-address space is // inaccessible from a VMM ending up with a bug check. const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3); const auto vmm_cr3 = __readcr3(); if (info->type == BreakpointType::kPre) { // Pre breakpoint __writecr3(guest_cr3); info->handler(*info, ept_data, gp_regs, UtilVmRead(VmcsField::kGuestRsp)); __writecr3(vmm_cr3); SbppEnablePageShadowingForRW(*info, ept_data); SbppSetMonitorTrapFlag(true); SbppSaveLastPatchInfo(*info); } else { // Post breakpoint if (info->target_tid == PsGetCurrentThreadId()) { // It is a target thread. Execute the post handler and let it continue // subsequence instructions. __writecr3(guest_cr3); info->handler(*info, ept_data, gp_regs, UtilVmRead(VmcsField::kGuestRsp)); __writecr3(vmm_cr3); SbppDisablePageShadowing(*info, ept_data); SbppDeleteBreakpointFromList(*info); } else { // It is not. Let it allow to run one instruction without breakpoint SbppEnablePageShadowingForRW(*info, ept_data); SbppSetMonitorTrapFlag(true); SbppSaveLastPatchInfo(*info); } } // Yes, it was caused by shadow breakpoint. Do not deliver the #BP to a guest. return true; }