void GetGuestState() { PHYSICAL_ADDRESS HighestAcceptableAddress; HighestAcceptableAddress.QuadPart = 0xFFFFFFFF00000000; g_GuestState.CR0 = __readcr0(); g_GuestState.CR3 = __readcr3(); g_GuestState.CR4 = __readcr4() | CR4_VMXE; g_GuestState.RFLAGS = __readeflags(); g_GuestState.Cs = __readcs(); g_GuestState.Ds = __readds(); g_GuestState.Es = __reades(); g_GuestState.Ss = __readss(); g_GuestState.Fs = __readfs(); g_GuestState.Gs = __readgs(); g_GuestState.Ldtr = __sldt(); g_GuestState.Tr = __str(); __sgdt(&(g_GuestState.Gdtr)); __sidt(&(g_GuestState.Idtr)); g_GuestState.S_CS = __readmsr(IA32_SYSENTER_CS); g_GuestState.SEIP = __readmsr(IA64_SYSENTER_EIP); g_GuestState.SESP = __readmsr(IA32_SYSENTER_ESP); g_GuestState.VMXON = MmAllocateNonCachedMemory(PAGE_SIZE); RtlZeroMemory(g_GuestState.VMXON, PAGE_SIZE); g_GuestState.VMCS = MmAllocateNonCachedMemory(PAGE_SIZE); RtlZeroMemory(g_GuestState.VMCS, PAGE_SIZE); g_GuestState.hvStack = // 分配的是非页面内存, 且保证在物理内存中是连续的, MmFreeContiguousMemory MmAllocateContiguousMemory(PAGE_SIZE * 2, HighestAcceptableAddress); RtlZeroMemory(g_GuestState.hvStack, PAGE_SIZE * 2); }
// Modifies IDTL so that PatchGuard fires soon. _Use_decl_annotations_ NTSTATUS GMonInstallPatchCallback(void *context) { UNREFERENCED_PARAMETER(context); Idtr idt = {}; __sidt(&idt); const auto old_limit = idt.limit; idt.limit = 0xffff; __lidt(&idt); __sidt(&idt); HYPERPLATFORM_LOG_INFO("Patched IDTL %04hx => %04hx", old_limit, idt.limit); return STATUS_SUCCESS; }
void vcpu_subverted(void) { /* Post-virtualization */ struct gdtr idt; __sidt(&idt); VCPU_DEBUG("Subverted, IDT: %p 0x%X\n", idt.base, idt.limit); }
VOID LoadIDT( OUT PIDT pIdt ) { /*__asm { MOV EAX, [pIdt] SIDT [EAX] }*/ __sidt(pIdt); }
void save_cpu_state(mon_guest_cpu_startup_state_t *s) { ia32_gdtr_t gdtr; ia32_idtr_t idtr; ia32_selector_t sel; ia32_segment_descriptor_t *desc; s->size_of_this_struct = sizeof(mon_guest_cpu_startup_state_t); s->version_of_this_struct = MON_GUEST_CPU_STARTUP_STATE_VERSION; __readgdtr(&gdtr); __sidt(&idtr); s->control.gdtr.base = (uint64_t)gdtr.base; s->control.gdtr.limit = (uint32_t)gdtr.limit; s->control.idtr.base = (uint64_t)idtr.base; s->control.idtr.limit = (uint32_t)idtr.limit; s->control.cr[IA32_CTRL_CR0] = __readcr0(); s->control.cr[IA32_CTRL_CR2] = __readcr2(); s->control.cr[IA32_CTRL_CR3] = __readcr3(); s->control.cr[IA32_CTRL_CR4] = __readcr4(); s->msr.msr_sysenter_cs = (uint32_t)__readmsr(IA32_MSR_SYSENTER_CS); s->msr.msr_sysenter_eip = __readmsr(IA32_MSR_SYSENTER_EIP); s->msr.msr_sysenter_esp = __readmsr(IA32_MSR_SYSENTER_ESP); s->msr.msr_efer = __readmsr(IA32_MSR_EFER); s->msr.msr_pat = __readmsr(IA32_MSR_PAT); s->msr.msr_debugctl = __readmsr(IA32_MSR_DEBUGCTL); s->msr.pending_exceptions = 0; s->msr.interruptibility_state = 0; s->msr.activity_state = 0; s->msr.smbase = 0; sel.sel16 = __readldtr(); if (sel.bits.index != 0) { return; } s->seg.segment[IA32_SEG_LDTR].attributes = 0x00010000; s->seg.segment[IA32_SEG_TR].attributes = 0x0000808b; s->seg.segment[IA32_SEG_TR].limit = 0xffffffff; save_segment_data((uint16_t)__readcs(), &s->seg.segment[IA32_SEG_CS]); save_segment_data((uint16_t)__readds(), &s->seg.segment[IA32_SEG_DS]); save_segment_data((uint16_t)__reades(), &s->seg.segment[IA32_SEG_ES]); save_segment_data((uint16_t)__readfs(), &s->seg.segment[IA32_SEG_FS]); save_segment_data((uint16_t)__readgs(), &s->seg.segment[IA32_SEG_GS]); save_segment_data((uint16_t)__readss(), &s->seg.segment[IA32_SEG_SS]); return; }
BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) // line 1 { // line 2 --- IDTR idtr; __sidt(&idtr); if (idtr.base > 0x8003F400 && idtr.base < 0x80047400) { return FALSE; } // --- line 17 // line 19 --- PROCESSENTRY32 procentry; memset(&procentry, 0, sizeof(PROCESSENTRY32)); procentry.dwSize = sizeof(procentry); // 0x128 HANDLE h; h = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); if (h == INVALID_HANDLE_VALUE) return FALSE; // --- line 34 // line 36 --- int ret = Process32First(h, &procentry); while (ret) { // line 44 - line 51 AND line 59 - line 65 if (!wcscmp(procentry.szExeFile, L"explorer.exe")) { break; } ret = Process32Next(h, &procentry); } // --- line 65 // line 66 -- if (ret) if (procentry.th32ParentProcessID == procentry.th32ProcessID) return FALSE; // --- line 81 // line 70 --- else if (ul_reason_for_call == DLL_PROCESS_DETACH) return FALSE; // --- line 81 // line 82 if (ul_reason_for_call == DLL_PROCESS_ATTACH) CreateThread(0, 0, (LPTHREAD_START_ROUTINE)0x100032D0, 0, 0, 0); return TRUE; }
VOID FASTCALL INIT_FUNCTION KiGetMachineBootPointers(IN PKGDTENTRY *Gdt, IN PKIDTENTRY *Idt, IN PKIPCR *Pcr, IN PKTSS *Tss) { KDESCRIPTOR GdtDescriptor, IdtDescriptor; KGDTENTRY TssSelector, PcrSelector; USHORT Tr, Fs; /* Get GDT and IDT descriptors */ Ke386GetGlobalDescriptorTable(&GdtDescriptor.Limit); __sidt(&IdtDescriptor.Limit); /* Save IDT and GDT */ *Gdt = (PKGDTENTRY)GdtDescriptor.Base; *Idt = (PKIDTENTRY)IdtDescriptor.Base; /* Get TSS and FS Selectors */ Tr = Ke386GetTr(); Fs = Ke386GetFs(); /* Get PCR Selector, mask it and get its GDT Entry */ PcrSelector = *(PKGDTENTRY)((ULONG_PTR)*Gdt + (Fs & ~RPL_MASK)); /* Get the KPCR itself */ *Pcr = (PKIPCR)(ULONG_PTR)(PcrSelector.BaseLow | PcrSelector.HighWord.Bytes.BaseMid << 16 | PcrSelector.HighWord.Bytes.BaseHi << 24); /* Get TSS Selector, mask it and get its GDT Entry */ TssSelector = *(PKGDTENTRY)((ULONG_PTR)*Gdt + (Tr & ~RPL_MASK)); /* Get the KTSS itself */ *Tss = (PKTSS)(ULONG_PTR)(TssSelector.BaseLow | TssSelector.HighWord.Bytes.BaseMid << 16 | TssSelector.HighWord.Bytes.BaseHi << 24); }
_Use_decl_annotations_ EXTERN_C static bool VminitpSetupVMCS( const PER_PROCESSOR_DATA *ProcessorData, ULONG_PTR GuestStackPointer, ULONG_PTR GuestInstructionPointer, ULONG_PTR VmmStackPointer) { unsigned char error = 0; GDTR gdtr = {}; __sgdt(&gdtr); IDTR idtr = {}; __sidt(&idtr); VMX_VM_ENTER_CONTROLS vmEnterCtlRequested = {}; vmEnterCtlRequested.Fields.IA32eModeGuest = true; VMX_VM_ENTER_CONTROLS vmEnterCtl = { VminitpAdjustControlValue(IA32_VMX_ENTRY_CTLS, vmEnterCtlRequested.All)}; VMX_VM_EXIT_CONTROLS vmExitCtlRequested = {}; vmExitCtlRequested.Fields.AcknowledgeInterruptOnExit = true; vmExitCtlRequested.Fields.HostAddressSpaceSize = true; VMX_VM_EXIT_CONTROLS vmExitCtl = { VminitpAdjustControlValue(IA32_VMX_EXIT_CTLS, vmExitCtlRequested.All)}; VMX_PIN_BASED_CONTROLS vmPinCtlRequested = {}; VMX_PIN_BASED_CONTROLS vmPinCtl = { VminitpAdjustControlValue(IA32_VMX_PINBASED_CTLS, vmPinCtlRequested.All)}; VMX_CPU_BASED_CONTROLS vmCpuCtlRequested = {}; vmCpuCtlRequested.Fields.RDTSCExiting = true; vmCpuCtlRequested.Fields.CR3LoadExiting = true; // MOV to CR3 vmCpuCtlRequested.Fields.CR8LoadExiting = true; // MOV to CR8 vmCpuCtlRequested.Fields.MovDRExiting = true; vmCpuCtlRequested.Fields.UseMSRBitmaps = true; vmCpuCtlRequested.Fields.ActivateSecondaryControl = true; VMX_CPU_BASED_CONTROLS vmCpuCtl = {VminitpAdjustControlValue( IA32_VMX_PROCBASED_CTLS, vmCpuCtlRequested.All)}; VMX_SECONDARY_CPU_BASED_CONTROLS vmCpuCtl2Requested = {}; vmCpuCtl2Requested.Fields.EnableRDTSCP = true; vmCpuCtl2Requested.Fields.DescriptorTableExiting = true; VMX_CPU_BASED_CONTROLS vmCpuCtl2 = {VminitpAdjustControlValue( IA32_VMX_PROCBASED_CTLS2, vmCpuCtl2Requested.All)}; // Set up the MSR bitmap // Activate VM-exit for RDMSR against all MSRs const auto bitMapReadLow = reinterpret_cast<UCHAR *>(ProcessorData->MsrBitmap); const auto bitMapReadHigh = bitMapReadLow + 1024; RtlFillMemory(bitMapReadLow, 1024, 0xff); // read 0 - 1fff RtlFillMemory(bitMapReadHigh, 1024, 0xff); // read c0000000 - c0001fff // But ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8) RTL_BITMAP bitMapReadLowHeader = {}; RtlInitializeBitMap(&bitMapReadLowHeader, reinterpret_cast<PULONG>(bitMapReadLow), 1024 * 8); RtlClearBits(&bitMapReadLowHeader, 0xe7, 2); // But ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102) RTL_BITMAP bitMapReadHighHeader = {}; RtlInitializeBitMap(&bitMapReadHighHeader, reinterpret_cast<PULONG>(bitMapReadHigh), 1024 * 8); RtlClearBits(&bitMapReadHighHeader, 0x101, 2); const auto msrBitmapPA = MmGetPhysicalAddress(ProcessorData->MsrBitmap); // Set up CR0 and CR4 bitmaps // Where a bit is masked, the shadow bit appears // Where a bit is not masked, the actual bit appears CR0_REG cr0mask = {}; cr0mask.Fields.WP = true; CR4_REG cr4mask = {}; cr4mask.Fields.PGE = true; // clang-format off /* 16-Bit Control Field */ /* 16-Bit Guest-State Fields */ error |= __vmx_vmwrite(GUEST_ES_SELECTOR, AsmReadES()); error |= __vmx_vmwrite(GUEST_CS_SELECTOR, AsmReadCS()); error |= __vmx_vmwrite(GUEST_SS_SELECTOR, AsmReadSS()); error |= __vmx_vmwrite(GUEST_DS_SELECTOR, AsmReadDS()); error |= __vmx_vmwrite(GUEST_FS_SELECTOR, AsmReadFS()); error |= __vmx_vmwrite(GUEST_GS_SELECTOR, AsmReadGS()); error |= __vmx_vmwrite(GUEST_LDTR_SELECTOR, AsmReadLDTR()); error |= __vmx_vmwrite(GUEST_TR_SELECTOR, AsmReadTR()); /* 16-Bit Host-State Fields */ error |= __vmx_vmwrite(HOST_ES_SELECTOR, AsmReadES() & 0xf8); // RPL and TI error |= __vmx_vmwrite(HOST_CS_SELECTOR, AsmReadCS() & 0xf8); // have to be 0 error |= __vmx_vmwrite(HOST_SS_SELECTOR, AsmReadSS() & 0xf8); error |= __vmx_vmwrite(HOST_DS_SELECTOR, AsmReadDS() & 0xf8); error |= __vmx_vmwrite(HOST_FS_SELECTOR, AsmReadFS() & 0xf8); error |= __vmx_vmwrite(HOST_GS_SELECTOR, AsmReadGS() & 0xf8); error |= __vmx_vmwrite(HOST_TR_SELECTOR, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= __vmx_vmwrite(IO_BITMAP_A, 0); error |= __vmx_vmwrite(IO_BITMAP_B, 0); error |= __vmx_vmwrite(MSR_BITMAP, msrBitmapPA.QuadPart); error |= __vmx_vmwrite(TSC_OFFSET, 0); /* 64-Bit Guest-State Fields */ error |= __vmx_vmwrite(VMCS_LINK_POINTER, 0xffffffffffffffff); error |= __vmx_vmwrite(GUEST_IA32_DEBUGCTL, __readmsr(IA32_DEBUGCTL)); /* 32-Bit Control Fields */ error |= __vmx_vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmPinCtl.All); error |= __vmx_vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmCpuCtl.All); error |= __vmx_vmwrite(SECONDARY_VM_EXEC_CONTROL, vmCpuCtl2.All); error |= __vmx_vmwrite(EXCEPTION_BITMAP, 0); error |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); error |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0); error |= __vmx_vmwrite(CR3_TARGET_COUNT, 0); error |= __vmx_vmwrite(VM_EXIT_CONTROLS, vmExitCtl.All); error |= __vmx_vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); error |= __vmx_vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); error |= __vmx_vmwrite(VM_ENTRY_CONTROLS, vmEnterCtl.All); error |= __vmx_vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); error |= __vmx_vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); /* 32-Bit Guest-State Fields */ error |= __vmx_vmwrite(GUEST_ES_LIMIT, GetSegmentLimit(AsmReadES())); error |= __vmx_vmwrite(GUEST_CS_LIMIT, GetSegmentLimit(AsmReadCS())); error |= __vmx_vmwrite(GUEST_SS_LIMIT, GetSegmentLimit(AsmReadSS())); error |= __vmx_vmwrite(GUEST_DS_LIMIT, GetSegmentLimit(AsmReadDS())); error |= __vmx_vmwrite(GUEST_FS_LIMIT, GetSegmentLimit(AsmReadFS())); error |= __vmx_vmwrite(GUEST_GS_LIMIT, GetSegmentLimit(AsmReadGS())); error |= __vmx_vmwrite(GUEST_LDTR_LIMIT, GetSegmentLimit(AsmReadLDTR())); error |= __vmx_vmwrite(GUEST_TR_LIMIT, GetSegmentLimit(AsmReadTR())); error |= __vmx_vmwrite(GUEST_GDTR_LIMIT, gdtr.Limit); error |= __vmx_vmwrite(GUEST_IDTR_LIMIT, idtr.Limit); error |= __vmx_vmwrite(GUEST_ES_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadES())); error |= __vmx_vmwrite(GUEST_CS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadCS())); error |= __vmx_vmwrite(GUEST_SS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadSS())); error |= __vmx_vmwrite(GUEST_DS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadDS())); error |= __vmx_vmwrite(GUEST_FS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadFS())); error |= __vmx_vmwrite(GUEST_GS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadGS())); error |= __vmx_vmwrite(GUEST_LDTR_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadLDTR())); error |= __vmx_vmwrite(GUEST_TR_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadTR())); error |= __vmx_vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); error |= __vmx_vmwrite(GUEST_ACTIVITY_STATE, 0); error |= __vmx_vmwrite(GUEST_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS)); /* 32-Bit Host-State Field */ error |= __vmx_vmwrite(HOST_IA32_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS)); /* Natural-Width Control Fields */ error |= __vmx_vmwrite(CR0_GUEST_HOST_MASK, cr0mask.All); error |= __vmx_vmwrite(CR4_GUEST_HOST_MASK, cr4mask.All); error |= __vmx_vmwrite(CR0_READ_SHADOW, __readcr0()); error |= __vmx_vmwrite(CR4_READ_SHADOW, __readcr4()); error |= __vmx_vmwrite(CR3_TARGET_VALUE0, 0); error |= __vmx_vmwrite(CR3_TARGET_VALUE1, 0); error |= __vmx_vmwrite(CR3_TARGET_VALUE2, 0); error |= __vmx_vmwrite(CR3_TARGET_VALUE3, 0); /* Natural-Width Guest-State Fields */ error |= __vmx_vmwrite(GUEST_CR0, __readcr0()); error |= __vmx_vmwrite(GUEST_CR3, __readcr3()); error |= __vmx_vmwrite(GUEST_CR4, __readcr4()); error |= __vmx_vmwrite(GUEST_ES_BASE, 0); error |= __vmx_vmwrite(GUEST_CS_BASE, 0); error |= __vmx_vmwrite(GUEST_SS_BASE, 0); error |= __vmx_vmwrite(GUEST_DS_BASE, 0); error |= __vmx_vmwrite(GUEST_FS_BASE, __readmsr(IA32_FS_BASE)); error |= __vmx_vmwrite(GUEST_GS_BASE, __readmsr(IA32_GS_BASE)); error |= __vmx_vmwrite(GUEST_LDTR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadLDTR())); error |= __vmx_vmwrite(GUEST_TR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadTR())); error |= __vmx_vmwrite(GUEST_GDTR_BASE, gdtr.Address); error |= __vmx_vmwrite(GUEST_IDTR_BASE, idtr.Address); error |= __vmx_vmwrite(GUEST_DR7, __readdr(7)); error |= __vmx_vmwrite(GUEST_RSP, GuestStackPointer); error |= __vmx_vmwrite(GUEST_RIP, GuestInstructionPointer); error |= __vmx_vmwrite(GUEST_RFLAGS, __readeflags()); error |= __vmx_vmwrite(GUEST_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP)); error |= __vmx_vmwrite(GUEST_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP)); /* Natural-Width Host-State Fields */ error |= __vmx_vmwrite(HOST_CR0, __readcr0()); error |= __vmx_vmwrite(HOST_CR3, __readcr3()); error |= __vmx_vmwrite(HOST_CR4, __readcr4()); error |= __vmx_vmwrite(HOST_FS_BASE, __readmsr(IA32_FS_BASE)); error |= __vmx_vmwrite(HOST_GS_BASE, __readmsr(IA32_GS_BASE)); error |= __vmx_vmwrite(HOST_TR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadTR())); error |= __vmx_vmwrite(HOST_GDTR_BASE, gdtr.Address); error |= __vmx_vmwrite(HOST_IDTR_BASE, idtr.Address); error |= __vmx_vmwrite(HOST_IA32_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP)); error |= __vmx_vmwrite(HOST_IA32_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP)); error |= __vmx_vmwrite(HOST_RSP, VmmStackPointer); error |= __vmx_vmwrite(HOST_RIP, reinterpret_cast<size_t>(AsmVmmEntryPoint)); // clang-format on const auto vmxStatus = static_cast<VMX_STATUS>(error); return vmxStatus == VMX_OK; }
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE _Use_decl_annotations_ static bool VmpSetupVMCS( const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) { Gdtr gdtr = {}; __sgdt(&gdtr); Idtr idtr = {}; __sidt(&idtr); // See: Algorithms for Determining VMX Capabilities const auto use_true_msrs = Ia32VmxBasicMsr{ UtilReadMsr64( Msr::kIa32VmxBasic)}.fields.vmx_capability_hint; VmxVmEntryControls vm_entryctl_requested = {}; vm_entryctl_requested.fields.ia32e_mode_guest = IsX64(); VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls, vm_entryctl_requested.all)}; VmxVmExitControls vm_exitctl_requested = {}; vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true; vm_exitctl_requested.fields.host_address_space_size = IsX64(); VmxVmExitControls vm_exitctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls, vm_exitctl_requested.all)}; VmxPinBasedControls vm_pinctl_requested = {}; VmxPinBasedControls vm_pinctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls : Msr::kIa32VmxPinbasedCtls, vm_pinctl_requested.all)}; VmxProcessorBasedControls vm_procctl_requested = {}; vm_procctl_requested.fields.invlpg_exiting = false; vm_procctl_requested.fields.rdtsc_exiting = false; vm_procctl_requested.fields.cr3_load_exiting = true; vm_procctl_requested.fields.cr8_load_exiting = false; // NB: very frequent vm_procctl_requested.fields.mov_dr_exiting = true; vm_procctl_requested.fields.use_msr_bitmaps = true; vm_procctl_requested.fields.activate_secondary_control = true; VmxProcessorBasedControls vm_procctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls : Msr::kIa32VmxProcBasedCtls, vm_procctl_requested.all)}; VmxSecondaryProcessorBasedControls vm_procctl2_requested = {}; vm_procctl2_requested.fields.enable_ept = true; vm_procctl2_requested.fields.enable_rdtscp = true; // required for Win10 vm_procctl2_requested.fields.descriptor_table_exiting = true; // required for Win10 vm_procctl2_requested.fields.enable_xsaves_xstors = true; VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue( Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)}; // Set up CR0 and CR4 bitmaps // - Where a bit is masked, the shadow bit appears // - Where a bit is not masked, the actual bit appears // VM-exit occurs when a guest modifies any of those fields Cr0 cr0_mask = {}; Cr4 cr4_mask = {}; // See: PDPTE Registers // If PAE paging would be in use following an execution of MOV to CR0 or MOV // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are // loaded from the address in CR3. if (UtilIsX86Pae()) { cr0_mask.fields.pg = true; cr0_mask.fields.cd = true; cr0_mask.fields.nw = true; cr4_mask.fields.pae = true; cr4_mask.fields.pge = true; cr4_mask.fields.pse = true; cr4_mask.fields.smep = true; } const auto exception_bitmap = // 1 << InterruptionVector::kBreakpointException | // 1 << InterruptionVector::kGeneralProtectionException | // 1 << InterruptionVector::kPageFaultException | 0; // clang-format off /* 16-Bit Control Field */ /* 16-Bit Guest-State Fields */ auto error = VmxStatus::kOk; error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES()); error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS()); error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS()); error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS()); error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS()); error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS()); error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR()); error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR()); /* 16-Bit Host-State Fields */ // RPL and TI have to be 0 error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8); error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= UtilVmWrite64(VmcsField::kIoBitmapA, 0); error |= UtilVmWrite64(VmcsField::kIoBitmapB, 0); error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap)); error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data)); /* 64-Bit Guest-State Fields */ error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64); error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl)); if (UtilIsX86Pae()) { UtilLoadPdptes(__readcr3()); } /* 32-Bit Control Fields */ error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all); error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all); error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap); error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMask, 0); error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMatch, 0); error |= UtilVmWrite(VmcsField::kCr3TargetCount, 0); error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all); error |= UtilVmWrite(VmcsField::kVmExitMsrStoreCount, 0); error |= UtilVmWrite(VmcsField::kVmExitMsrLoadCount, 0); error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all); error |= UtilVmWrite(VmcsField::kVmEntryMsrLoadCount, 0); error |= UtilVmWrite(VmcsField::kVmEntryIntrInfoField, 0); error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all); /* 32-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit); error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit); error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestInterruptibilityInfo, 0); error |= UtilVmWrite(VmcsField::kGuestActivityState, 0); error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* 32-Bit Host-State Field */ error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* Natural-Width Control Fields */ error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all); error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all); error |= UtilVmWrite(VmcsField::kCr0ReadShadow, __readcr0()); error |= UtilVmWrite(VmcsField::kCr4ReadShadow, __readcr4()); /* Natural-Width Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kGuestEsBase, 0); error |= UtilVmWrite(VmcsField::kGuestCsBase, 0); error |= UtilVmWrite(VmcsField::kGuestSsBase, 0); error |= UtilVmWrite(VmcsField::kGuestDsBase, 0); error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7)); error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer); error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer); error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags()); error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); /* Natural-Width Host-State Fields */ error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer); error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint)); // clang-format on const auto vmx_status = static_cast<VmxStatus>(error); return vmx_status == VmxStatus::kOk; }
VOID NTAPI KiInitializePcr(IN PKIPCR Pcr, IN ULONG ProcessorNumber, IN PKTHREAD IdleThread, IN PVOID DpcStack) { KDESCRIPTOR GdtDescriptor = {{0},0,0}, IdtDescriptor = {{0},0,0}; PKGDTENTRY64 TssEntry; USHORT Tr = 0; /* Zero out the PCR */ RtlZeroMemory(Pcr, sizeof(KIPCR)); /* Set pointers to ourselves */ Pcr->Self = (PKPCR)Pcr; Pcr->CurrentPrcb = &Pcr->Prcb; /* Set the PCR Version */ Pcr->MajorVersion = PCR_MAJOR_VERSION; Pcr->MinorVersion = PCR_MINOR_VERSION; /* Set the PRCB Version */ Pcr->Prcb.MajorVersion = 1; Pcr->Prcb.MinorVersion = 1; /* Set the Build Type */ Pcr->Prcb.BuildType = 0; #ifndef CONFIG_SMP Pcr->Prcb.BuildType |= PRCB_BUILD_UNIPROCESSOR; #endif #if DBG Pcr->Prcb.BuildType |= PRCB_BUILD_DEBUG; #endif /* Set the Processor Number and current Processor Mask */ Pcr->Prcb.Number = (UCHAR)ProcessorNumber; Pcr->Prcb.SetMember = 1ULL << ProcessorNumber; /* Get GDT and IDT descriptors */ __sgdt(&GdtDescriptor.Limit); __sidt(&IdtDescriptor.Limit); Pcr->GdtBase = (PVOID)GdtDescriptor.Base; Pcr->IdtBase = (PKIDTENTRY)IdtDescriptor.Base; /* Get TSS Selector */ __str(&Tr); ASSERT(Tr == KGDT64_SYS_TSS); /* Get TSS Entry */ TssEntry = KiGetGdtEntry(Pcr->GdtBase, Tr); /* Get the KTSS itself */ Pcr->TssBase = KiGetGdtDescriptorBase(TssEntry); Pcr->Prcb.RspBase = Pcr->TssBase->Rsp0; // FIXME /* Set DPC Stack */ Pcr->Prcb.DpcStack = DpcStack; /* Setup the processor set */ Pcr->Prcb.MultiThreadProcessorSet = Pcr->Prcb.SetMember; /* Clear DR6/7 to cleanup bootloader debugging */ Pcr->Prcb.ProcessorState.SpecialRegisters.KernelDr6 = 0; Pcr->Prcb.ProcessorState.SpecialRegisters.KernelDr7 = 0; /* Set the Current Thread */ Pcr->Prcb.CurrentThread = IdleThread; /* Start us out at PASSIVE_LEVEL */ Pcr->Irql = PASSIVE_LEVEL; KeSetCurrentIrql(PASSIVE_LEVEL); }
static bool setup_vmcs(struct vcpu *vcpu, uintptr_t sp, uintptr_t ip, uintptr_t stack_base) { struct gdtr gdtr; __sgdt(&gdtr); struct gdtr idtr; __sidt(&idtr); /* Get this CPU's EPT */ struct ept *ept = &vcpu->ept; u64 cr0 = __readcr0(); u64 cr4 = __readcr4(); u64 err = 0; u16 es = __reades(); u16 cs = __readcs(); u16 ss = __readss(); u16 ds = __readds(); u16 fs = __readfs(); u16 gs = __readgs(); u16 ldt = __sldt(); u16 tr = __str(); vcpu->g_idt.base = idtr.base; vcpu->g_idt.limit = idtr.limit; struct kidt_entry64 *current = (struct kidt_entry64 *)idtr.base; struct kidt_entry64 *shadow = (struct kidt_entry64 *)vcpu->idt.base; unsigned count = idtr.limit / sizeof(*shadow); for (unsigned n = 0; n < count; ++n) memcpy(&shadow[n], ¤t[n], sizeof(*shadow)); vcpu_put_idt(vcpu, cs, X86_TRAP_VE, __ept_violation); u8 msr_off = 0; if (__readmsr(MSR_IA32_VMX_BASIC) & VMX_BASIC_TRUE_CTLS) msr_off = 0xC; u64 vm_entry = VM_ENTRY_IA32E_MODE;// | VM_ENTRY_LOAD_IA32_PAT; adjust_ctl_val(MSR_IA32_VMX_ENTRY_CTLS + msr_off, &vm_entry); u64 vm_exit = VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_HOST_ADDR_SPACE_SIZE; adjust_ctl_val(MSR_IA32_VMX_EXIT_CTLS + msr_off, &vm_exit); u64 vm_pinctl = 0; adjust_ctl_val(MSR_IA32_VMX_PINBASED_CTLS + msr_off, &vm_pinctl); u64 vm_cpuctl = CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING; adjust_ctl_val(MSR_IA32_VMX_PROCBASED_CTLS + msr_off, &vm_cpuctl); u64 vm_2ndctl = SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_TSC_SCALING | SECONDARY_EXEC_DESC_TABLE_EXITING | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_ENABLE_VMFUNC | SECONDARY_EXEC_ENABLE_VE; adjust_ctl_val(MSR_IA32_VMX_PROCBASED_CTLS2, &vm_2ndctl); /* Processor control fields */ err |= __vmx_vmwrite(PIN_BASED_VM_EXEC_CONTROL, vm_pinctl); err |= __vmx_vmwrite(CPU_BASED_VM_EXEC_CONTROL, vm_cpuctl); err |= __vmx_vmwrite(EXCEPTION_BITMAP, __EXCEPTION_BITMAP); err |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); err |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0); err |= __vmx_vmwrite(CR3_TARGET_COUNT, 0); err |= __vmx_vmwrite(VM_EXIT_CONTROLS, vm_exit); err |= __vmx_vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); err |= __vmx_vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); err |= __vmx_vmwrite(VM_ENTRY_CONTROLS, vm_entry); err |= __vmx_vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); err |= __vmx_vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); err |= __vmx_vmwrite(SECONDARY_VM_EXEC_CONTROL, vm_2ndctl); /* Control Fields */ err |= __vmx_vmwrite(IO_BITMAP_A, 0); err |= __vmx_vmwrite(IO_BITMAP_B, 0); err |= __vmx_vmwrite(MSR_BITMAP, __pa(ksm.msr_bitmap)); err |= __vmx_vmwrite(EPT_POINTER, EPTP(ept, EPTP_DEFAULT)); err |= __vmx_vmwrite(VM_FUNCTION_CTRL, VM_FUNCTION_CTL_EPTP_SWITCHING); err |= __vmx_vmwrite(EPTP_INDEX, EPTP_DEFAULT); err |= __vmx_vmwrite(EPTP_LIST_ADDRESS, __pa(ept->ptr_list)); err |= __vmx_vmwrite(VE_INFO_ADDRESS, __pa(vcpu->ve)); err |= __vmx_vmwrite(CR0_GUEST_HOST_MASK, __CR0_GUEST_HOST_MASK); err |= __vmx_vmwrite(CR4_GUEST_HOST_MASK, __CR4_GUEST_HOST_MASK); err |= __vmx_vmwrite(CR0_READ_SHADOW, cr0); err |= __vmx_vmwrite(CR4_READ_SHADOW, cr4); err |= __vmx_vmwrite(VMCS_LINK_POINTER, -1ULL); /* Guest */ err |= __vmx_vmwrite(GUEST_ES_SELECTOR, es); err |= __vmx_vmwrite(GUEST_CS_SELECTOR, cs); err |= __vmx_vmwrite(GUEST_SS_SELECTOR, ss); err |= __vmx_vmwrite(GUEST_DS_SELECTOR, ds); err |= __vmx_vmwrite(GUEST_FS_SELECTOR, fs); err |= __vmx_vmwrite(GUEST_GS_SELECTOR, gs); err |= __vmx_vmwrite(GUEST_LDTR_SELECTOR, ldt); err |= __vmx_vmwrite(GUEST_TR_SELECTOR, tr); err |= __vmx_vmwrite(GUEST_ES_LIMIT, __segmentlimit(es)); err |= __vmx_vmwrite(GUEST_CS_LIMIT, __segmentlimit(cs)); err |= __vmx_vmwrite(GUEST_SS_LIMIT, __segmentlimit(ss)); err |= __vmx_vmwrite(GUEST_DS_LIMIT, __segmentlimit(ds)); err |= __vmx_vmwrite(GUEST_FS_LIMIT, __segmentlimit(fs)); err |= __vmx_vmwrite(GUEST_GS_LIMIT, __segmentlimit(gs)); err |= __vmx_vmwrite(GUEST_LDTR_LIMIT, __segmentlimit(ldt)); err |= __vmx_vmwrite(GUEST_TR_LIMIT, __segmentlimit(tr)); err |= __vmx_vmwrite(GUEST_GDTR_LIMIT, gdtr.limit); err |= __vmx_vmwrite(GUEST_IDTR_LIMIT, idtr.limit); err |= __vmx_vmwrite(GUEST_ES_AR_BYTES, __accessright(es)); err |= __vmx_vmwrite(GUEST_CS_AR_BYTES, __accessright(cs)); err |= __vmx_vmwrite(GUEST_SS_AR_BYTES, __accessright(ss)); err |= __vmx_vmwrite(GUEST_DS_AR_BYTES, __accessright(ds)); err |= __vmx_vmwrite(GUEST_FS_AR_BYTES, __accessright(fs)); err |= __vmx_vmwrite(GUEST_GS_AR_BYTES, __accessright(gs)); err |= __vmx_vmwrite(GUEST_LDTR_AR_BYTES, __accessright(ldt)); err |= __vmx_vmwrite(GUEST_TR_AR_BYTES, __accessright(tr)); err |= __vmx_vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); err |= __vmx_vmwrite(GUEST_ACTIVITY_STATE, 0); err |= __vmx_vmwrite(GUEST_IA32_DEBUGCTL, __readmsr(MSR_IA32_DEBUGCTLMSR)); err |= __vmx_vmwrite(GUEST_SYSENTER_CS, __readmsr(MSR_IA32_SYSENTER_CS)); err |= __vmx_vmwrite(GUEST_CR0, cr0); err |= __vmx_vmwrite(GUEST_CR3, ksm.origin_cr3); err |= __vmx_vmwrite(GUEST_CR4, cr4); err |= __vmx_vmwrite(GUEST_ES_BASE, 0); err |= __vmx_vmwrite(GUEST_CS_BASE, 0); err |= __vmx_vmwrite(GUEST_SS_BASE, 0); err |= __vmx_vmwrite(GUEST_DS_BASE, 0); err |= __vmx_vmwrite(GUEST_FS_BASE, __readmsr(MSR_IA32_FS_BASE)); err |= __vmx_vmwrite(GUEST_GS_BASE, __readmsr(MSR_IA32_GS_BASE)); err |= __vmx_vmwrite(GUEST_LDTR_BASE, __segmentbase(gdtr.base, ldt)); err |= __vmx_vmwrite(GUEST_TR_BASE, __segmentbase(gdtr.base, tr)); err |= __vmx_vmwrite(GUEST_GDTR_BASE, gdtr.base); err |= __vmx_vmwrite(GUEST_IDTR_BASE, vcpu->idt.base); err |= __vmx_vmwrite(GUEST_DR7, __readdr(7)); err |= __vmx_vmwrite(GUEST_RSP, sp); err |= __vmx_vmwrite(GUEST_RIP, ip); err |= __vmx_vmwrite(GUEST_RFLAGS, __readeflags()); err |= __vmx_vmwrite(GUEST_SYSENTER_ESP, __readmsr(MSR_IA32_SYSENTER_ESP)); err |= __vmx_vmwrite(GUEST_SYSENTER_EIP, __readmsr(MSR_IA32_SYSENTER_EIP)); /* Host */ err |= __vmx_vmwrite(HOST_ES_SELECTOR, es & 0xf8); err |= __vmx_vmwrite(HOST_CS_SELECTOR, cs & 0xf8); err |= __vmx_vmwrite(HOST_SS_SELECTOR, ss & 0xf8); err |= __vmx_vmwrite(HOST_DS_SELECTOR, ds & 0xf8); err |= __vmx_vmwrite(HOST_FS_SELECTOR, fs & 0xf8); err |= __vmx_vmwrite(HOST_GS_SELECTOR, gs & 0xf8); err |= __vmx_vmwrite(HOST_TR_SELECTOR, tr & 0xf8); err |= __vmx_vmwrite(HOST_CR0, cr0); err |= __vmx_vmwrite(HOST_CR3, ksm.kernel_cr3); err |= __vmx_vmwrite(HOST_CR4, cr4); err |= __vmx_vmwrite(HOST_FS_BASE, __readmsr(MSR_IA32_FS_BASE)); err |= __vmx_vmwrite(HOST_GS_BASE, __readmsr(MSR_IA32_GS_BASE)); err |= __vmx_vmwrite(HOST_TR_BASE, __segmentbase(gdtr.base, tr)); err |= __vmx_vmwrite(HOST_GDTR_BASE, gdtr.base); err |= __vmx_vmwrite(HOST_IDTR_BASE, idtr.base); err |= __vmx_vmwrite(HOST_IA32_SYSENTER_CS, __readmsr(MSR_IA32_SYSENTER_CS)); err |= __vmx_vmwrite(HOST_IA32_SYSENTER_ESP, __readmsr(MSR_IA32_SYSENTER_ESP)); err |= __vmx_vmwrite(HOST_IA32_SYSENTER_EIP, __readmsr(MSR_IA32_SYSENTER_EIP)); err |= __vmx_vmwrite(HOST_RSP, stack_base); err |= __vmx_vmwrite(HOST_RIP, (uintptr_t)__vmx_entrypoint); return err == 0; }