_IRQL_requires_same_ static DECLSPEC_NOINLINE BOOLEAN EvtchnInterruptHandler( __in PKINTERRUPT Interrupt, __in_opt PVOID Argument ) { PXENIFACE_EVTCHN_CONTEXT Context = Argument; PROCESSOR_NUMBER ProcNumber; ULONG ProcIndex; UNREFERENCED_PARAMETER(Interrupt); ASSERT(Context != NULL); KeGetCurrentProcessorNumberEx(&ProcNumber); ProcIndex = KeGetProcessorIndexFromNumber(&ProcNumber); if (!KeInsertQueueDpc(&Context->Dpc, NULL, NULL)) { XenIfaceDebugPrint(TRACE, "NOT INSERTED: Context %p, Port %lu, FO %p, Cpu %lu\n", Context, Context->LocalPort, Context->FileObject, ProcIndex); } return TRUE; }
static __inline VOID GetAffinityForCurrentCpu(PGROUP_AFFINITY pAffinity) { PROCESSOR_NUMBER ProcNum; KeGetCurrentProcessorNumberEx(&ProcNum); pAffinity->Group = ProcNum.Group; pAffinity->Mask = 1; pAffinity->Mask <<= ProcNum.Number; }
// Virtualize the current processor _Use_decl_annotations_ static NTSTATUS VmpStartVM(void *context) { HYPERPLATFORM_LOG_INFO("Initializing VMX for the processor %d.", KeGetCurrentProcessorNumberEx(nullptr)); const auto ok = AsmInitializeVm(VmpInitializeVm, context); NT_ASSERT(VmpIsVmmInstalled() == ok); if (!ok) { return STATUS_UNSUCCESSFUL; } HYPERPLATFORM_LOG_INFO("Initialized successfully."); return STATUS_SUCCESS; }
KAFFINITY restrictCurrentThreadToSecondaryCores() throw () { // // Set thread affinity mask to restrict scheduling of the current thread // on any processor but CPU0. // KAFFINITY callerAffinity; NT_ASSERTMSG("IRQL unexpected", KeGetCurrentIrql() < DISPATCH_LEVEL); ULONG numCpus = KeQueryActiveProcessorCountEx(ALL_PROCESSOR_GROUPS); ULONG noCpu0AffinityMask = (~(ULONG(~0x0) << numCpus) & ULONG(~0x1)); callerAffinity = KeSetSystemAffinityThreadEx(KAFFINITY(noCpu0AffinityMask)); NT_ASSERTMSG("Thread affinity not set as requested", KeGetCurrentProcessorNumberEx(NULL) != 0); return callerAffinity; }
static CCHAR FindReceiveQueueForCurrentCpu(PPARANDIS_SCALING_SETTINGS RSSScalingSettings) { PROCESSOR_NUMBER CurrProcNum; ULONG CurrProcIdx; KeGetCurrentProcessorNumberEx(&CurrProcNum); CurrProcIdx = KeGetProcessorIndexFromNumber(&CurrProcNum); ASSERT(CurrProcIdx != INVALID_PROCESSOR_INDEX); if(CurrProcIdx >= RSSScalingSettings->CPUIndexMappingSize) return PARANDIS_RECEIVE_QUEUE_UNCLASSIFIED; return RSSScalingSettings->CPUIndexMapping[CurrProcIdx]; }
/* * -------------------------------------------------------------------------- * OvsDeferredActionsLevelDec -- * The function decrements the deferred action execution level * corresponding to the current processor. * -------------------------------------------------------------------------- */ VOID OvsDeferredActionsLevelDec() { ULONG index = 0; KIRQL oldIrql = KeGetCurrentIrql(); if (oldIrql < DISPATCH_LEVEL) { KeRaiseIrqlToDpcLevel(); } index = KeGetCurrentProcessorNumberEx(NULL); deferredData[index].level--; if (oldIrql < DISPATCH_LEVEL) { KeLowerIrql(oldIrql); } }
VOID ShvVpCallbackDpc ( _In_ PRKDPC Dpc, _In_opt_ PVOID Context, _In_opt_ PVOID SystemArgument1, _In_opt_ PVOID SystemArgument2 ) { PSHV_VP_DATA vpData; UNREFERENCED_PARAMETER(Dpc); // // Get the per-VP data for this logical processor // vpData = &ShvGlobalData->VpData[KeGetCurrentProcessorNumberEx(NULL)]; // // Check if we are loading, or unloading // if (ARGUMENT_PRESENT(Context)) { // // Initialize the virtual processor // ShvVpInitialize(vpData, (ULONG64)Context); } else { // // Tear down the virtual processor // ShvVpUninitialize(vpData); } // // Wait for all DPCs to synchronize at this point // KeSignalCallDpcSynchronize(SystemArgument2); // // Mark the DPC as being complete // KeSignalCallDpcDone(SystemArgument1); }
/* * -------------------------------------------------------------------------- * OvsDeferredActionsQueueGet -- * The function returns the deferred action queue corresponding to the * current processor. * -------------------------------------------------------------------------- */ POVS_DEFERRED_ACTION_QUEUE OvsDeferredActionsQueueGet() { POVS_DEFERRED_ACTION_QUEUE queue = NULL; ULONG index = 0; KIRQL oldIrql = KeGetCurrentIrql(); if (oldIrql < DISPATCH_LEVEL) { KeRaiseIrqlToDpcLevel(); } index = KeGetCurrentProcessorNumberEx(NULL); queue = &deferredData[index].queue; if (oldIrql < DISPATCH_LEVEL) { KeLowerIrql(oldIrql); } return queue; }
/* * -------------------------------------------------------------------------- * OvsDeferredActionsLevelGet -- * The function returns the deferred action execution level corresponding * to the current processor. * -------------------------------------------------------------------------- */ UINT32 OvsDeferredActionsLevelGet() { UINT32 level = 0; ULONG index = 0; KIRQL oldIrql = KeGetCurrentIrql(); if (oldIrql < DISPATCH_LEVEL) { KeRaiseIrqlToDpcLevel(); } index = KeGetCurrentProcessorNumberEx(NULL); level = deferredData[index].level; if (oldIrql < DISPATCH_LEVEL) { KeLowerIrql(oldIrql); } return level; }
VOID ShvVpInitialize ( _In_ PSHV_VP_DATA Data, _In_ ULONG64 SystemDirectoryTableBase ) { // // Store the hibernation state of the processor, which contains all the // special registers and MSRs which are what the VMCS will need as part // of its setup. This avoids using assembly sequences and manually reading // this data. // KeSaveStateForHibernate(&Data->HostState); // // Then, capture the entire register state. We will need this, as once we // launch the VM, it will begin execution at the defined guest instruction // pointer, which is being captured as part of this call. In other words, // we will return right where we were, but with all our registers corrupted // by the VMCS/VMX initialization code (as guest state does not include // register state). By saving the context here, which includes all general // purpose registers, we guarantee that we return with all of our starting // register values as well! // RtlCaptureContext(&Data->HostState.ContextFrame); // // As per the above, we might be here because the VM has actually launched. // We can check this by verifying the value of the VmxEnabled field, which // is set to 1 right before VMXLAUNCH is performed. We do not use the Data // parameter or any other local register in this function, and in fact have // defined VmxEnabled as volatile, because as per the above, our register // state is currently dirty due to the VMCALL itself. By using the global // variable combined with an API call, we also make sure that the compiler // will not optimize this access in any way, even on LTGC/Ox builds. // if (ShvGlobalData->VpData[KeGetCurrentProcessorNumberEx(NULL)].VmxEnabled == 1) { // // We now indicate that the VM has launched, and that we are about to // restore the GPRs back to their original values. This will have the // effect of putting us yet *AGAIN* at the previous line of code, but // this time the value of VmxEnabled will be two, bypassing the if and // else if checks. // ShvGlobalData->VpData[KeGetCurrentProcessorNumberEx(NULL)].VmxEnabled = 2; // // And finally, restore the context, so that all register and stack // state is finally restored. Note that by continuing to reference the // per-VP data this way, the compiler will continue to generate non- // optimized accesses, guaranteeing that no previous register state // will be used. // RtlRestoreContext(&ShvGlobalData->VpData[KeGetCurrentProcessorNumberEx(NULL)].HostState.ContextFrame, NULL); } // // If we are in this branch comparison, it means that we have not yet // attempted to launch the VM, nor that we have launched it. In other // words, this is the first time in ShvVpInitialize. Because of this, // we are free to use all register state, as it is ours to use. // else if (Data->VmxEnabled == 0) { // // First, capture the value of the PML4 for the SYSTEM process, so that // all virtual processors, regardless of which process the current LP // has interrupted, can share the correct kernel address space. // Data->SystemDirectoryTableBase = SystemDirectoryTableBase; // // Then, attempt to initialize VMX on this processor // ShvVmxLaunchOnVp(Data); } }
DECLSPEC_NORETURN EXTERN_C VOID ShvVmxEntryHandler ( _In_ PCONTEXT Context ) { SHV_VP_STATE guestContext; PSHV_VP_DATA vpData; // // Because we run with interrupts disabled during the entire hypervisor's // exit handling, raise the IRQL to HIGH_LEVEL which matches the reality of // the situation. This will block IPIs and the clock interrupt timer, which // means that it's critical to spend as little time here as possible. You // can expect CLOCK_WATCHDOG_TIMEOUT bugchecks to happen otherwise. If you // chose to enable interrupts note that this will result in further crashes // as we are not on a correct OS stack, and you will be hitting crashes if // RtlpCheckStackLimits is ever called, or if PatchGuard validates the RSP // value. // KeRaiseIrql(HIGH_LEVEL, &guestContext.GuestIrql); // // Because we had to use RCX when calling RtlCaptureContext, its true value // was actually pushed on the stack right before the call. Go dig into the // stack to find it, and overwrite the bogus value that's there now. // Context->Rcx = *(PULONG64)((ULONG_PTR)Context - sizeof(Context->Rcx)); // // Get the per-VP data for this processor. // vpData = &ShvGlobalData->VpData[KeGetCurrentProcessorNumberEx(NULL)]; // // Build a little stack context to make it easier to keep track of certain // guest state, such as the RIP/RSP/RFLAGS, and the exit reason. The rest // of the general purpose registers come from the context structure that we // captured on our own with RtlCaptureContext in the assembly entrypoint. // guestContext.GuestEFlags = ShvVmxRead(GUEST_RFLAGS); guestContext.GuestRip = ShvVmxRead(GUEST_RIP); guestContext.GuestRsp = ShvVmxRead(GUEST_RSP); guestContext.ExitReason = ShvVmxRead(VM_EXIT_REASON) & 0xFFFF; guestContext.VpRegs = Context; guestContext.ExitVm = FALSE; // // Call the generic handler // ShvVmxHandleExit(&guestContext); // // Did we hit the magic exit sequence, or should we resume back to the VM // context? // if (guestContext.ExitVm) { // // When running in VMX root mode, the processor will set limits of the // GDT and IDT to 0xFFFF (notice that there are no Host VMCS fields to // set these values). This causes problems with PatchGuard, which will // believe that the GDTR and IDTR have been modified by malware, and // eventually crash the system. Since we know what the original state // of the GDTR and IDTR was, simply restore it now. // __lgdt(&vpData->HostState.SpecialRegisters.Gdtr.Limit); __lidt(&vpData->HostState.SpecialRegisters.Idtr.Limit); // // Our DPC routine may have interrupted an arbitrary user process, and // not an idle or system thread as usually happens on an idle system. // Therefore if we return back to the original caller after turning off // VMX, it will keep our current "host" CR3 value which we set on entry // to the PML4 of the SYSTEM process. We want to return back with the // correct value of the "guest" CR3, so that the currently executing // process continues to run with its expected address space mappings. // __writecr3(ShvVmxRead(GUEST_CR3)); // // Finally, set the stack and instruction pointer to whatever location // had the instruction causing our VM-Exit, such as ShvVpUninitialize. // This will effectively act as a longjmp back to that location. // Context->Rsp = guestContext.GuestRsp; Context->Rip = (ULONG64)guestContext.GuestRip; // // Turn off VMX root mode on this logical processor. We're done here. // __vmx_off(); } else { // // Because we won't be returning back into assembly code, nothing will // ever know about the "pop rcx" that must technically be done (or more // accurately "add rsp, 4" as rcx will already be correct thanks to the // fixup earlier. In order to keep the stack sane, do that adjustment // here. // Context->Rsp += sizeof(Context->Rcx); // // Return into a VMXRESUME intrinsic, which we broke out as its own // function, in order to allow this to work. No assembly code will be // needed as RtlRestoreContext will fix all the GPRs, and what we just // did to RSP will take care of the rest. // Context->Rip = (ULONG64)ShvVmxResume; } // // Restore the IRQL back to the original level // KeLowerIrql(guestContext.GuestIrql); // // Restore the context to either ShvVmxResume, in which case the CPU's VMX // facility will do the "true" return back to the VM (but without restoring // GPRs, which is why we must do it here), or to the original guest's RIP, // which we use in case an exit was requested. In this case VMX must now be // off, and this will look like a longjmp to the original stack and RIP. // RtlRestoreContext(Context, NULL); }
// GetCurrentCoreId: Core id of the current execution uint32 GetCurrentCoreId(void) { return KeGetCurrentProcessorNumberEx(NULL); }
// Dispatches VM-exit to a corresponding handler _Use_decl_annotations_ static void VmmpHandleVmExit( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const VmExitInformation exit_reason = { static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitReason))}; if (kVmmpEnableRecordVmExit) { // Save them for ease of trouble shooting const auto processor = KeGetCurrentProcessorNumberEx(nullptr); auto &index = g_vmmp_next_history_index[processor]; auto &history = g_vmmp_vm_exit_history[processor][index]; history.gp_regs = *guest_context->gp_regs; history.ip = guest_context->ip; history.exit_reason = exit_reason; history.exit_qualification = UtilVmRead(VmcsField::kExitQualification); history.instruction_info = UtilVmRead(VmcsField::kVmxInstructionInfo); if (++index == kVmmpNumberOfRecords) { index = 0; } } switch (exit_reason.fields.reason) { case VmxExitReason::kExceptionOrNmi: VmmpHandleException(guest_context); break; case VmxExitReason::kTripleFault: VmmpHandleTripleFault(guest_context); break; case VmxExitReason::kCpuid: VmmpHandleCpuid(guest_context); break; case VmxExitReason::kInvd: VmmpHandleInvalidateInternalCaches(guest_context); break; case VmxExitReason::kInvlpg: VmmpHandleInvalidateTLBEntry(guest_context); break; case VmxExitReason::kRdtsc: VmmpHandleRdtsc(guest_context); break; case VmxExitReason::kCrAccess: VmmpHandleCrAccess(guest_context); break; case VmxExitReason::kDrAccess: VmmpHandleDrAccess(guest_context); break; case VmxExitReason::kMsrRead: VmmpHandleMsrReadAccess(guest_context); break; case VmxExitReason::kMsrWrite: VmmpHandleMsrWriteAccess(guest_context); break; case VmxExitReason::kMonitorTrapFlag: VmmpHandleMonitorTrap(guest_context); break; case VmxExitReason::kGdtrOrIdtrAccess: VmmpHandleGdtrOrIdtrAccess(guest_context); break; case VmxExitReason::kLdtrOrTrAccess: VmmpHandleLdtrOrTrAccess(guest_context); break; case VmxExitReason::kEptViolation: VmmpHandleEptViolation(guest_context); break; case VmxExitReason::kEptMisconfig: VmmpHandleEptMisconfig(guest_context); break; case VmxExitReason::kVmcall: VmmpHandleVmCall(guest_context); break; case VmxExitReason::kVmclear: case VmxExitReason::kVmlaunch: case VmxExitReason::kVmptrld: case VmxExitReason::kVmptrst: case VmxExitReason::kVmread: case VmxExitReason::kVmresume: case VmxExitReason::kVmwrite: case VmxExitReason::kVmoff: case VmxExitReason::kVmon: VmmpHandleVmx(guest_context); break; case VmxExitReason::kRdtscp: VmmpHandleRdtscp(guest_context); break; case VmxExitReason::kXsetbv: VmmpHandleXsetbv(guest_context); break; default: VmmpHandleUnexpectedExit(guest_context); break; } }
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE _Use_decl_annotations_ static bool VmpSetupVmcs( const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) { PAGED_CODE(); Gdtr gdtr = {}; __sgdt(&gdtr); Idtr idtr = {}; __sidt(&idtr); // See: Algorithms for Determining VMX Capabilities const auto use_true_msrs = Ia32VmxBasicMsr{ UtilReadMsr64( Msr::kIa32VmxBasic)}.fields.vmx_capability_hint; VmxVmEntryControls vm_entryctl_requested = {}; vm_entryctl_requested.fields.load_debug_controls = true; vm_entryctl_requested.fields.ia32e_mode_guest = IsX64(); VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls, vm_entryctl_requested.all)}; VmxVmExitControls vm_exitctl_requested = {}; vm_exitctl_requested.fields.host_address_space_size = IsX64(); vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true; VmxVmExitControls vm_exitctl = {VmpAdjustControlValue( (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls, vm_exitctl_requested.all)}; VmxPinBasedControls vm_pinctl_requested = {}; VmxPinBasedControls vm_pinctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls : Msr::kIa32VmxPinbasedCtls, vm_pinctl_requested.all)}; VmxProcessorBasedControls vm_procctl_requested = {}; vm_procctl_requested.fields.invlpg_exiting = false; vm_procctl_requested.fields.rdtsc_exiting = false; vm_procctl_requested.fields.cr3_load_exiting = true; vm_procctl_requested.fields.cr8_load_exiting = false; // NB: very frequent vm_procctl_requested.fields.mov_dr_exiting = true; vm_procctl_requested.fields.use_io_bitmaps = true; vm_procctl_requested.fields.use_msr_bitmaps = true; vm_procctl_requested.fields.activate_secondary_control = true; VmxProcessorBasedControls vm_procctl = { VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls : Msr::kIa32VmxProcBasedCtls, vm_procctl_requested.all)}; VmxSecondaryProcessorBasedControls vm_procctl2_requested = {}; vm_procctl2_requested.fields.enable_ept = true; vm_procctl2_requested.fields.descriptor_table_exiting = true; vm_procctl2_requested.fields.enable_rdtscp = true; // for Win10 vm_procctl2_requested.fields.enable_vpid = true; vm_procctl2_requested.fields.enable_xsaves_xstors = true; // for Win10 VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue( Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)}; // NOTE: Comment in any of those as needed const auto exception_bitmap = // 1 << InterruptionVector::kBreakpointException | // 1 << InterruptionVector::kGeneralProtectionException | // 1 << InterruptionVector::kPageFaultException | 0; // Set up CR0 and CR4 bitmaps // - Where a bit is masked, the shadow bit appears // - Where a bit is not masked, the actual bit appears // VM-exit occurs when a guest modifies any of those fields Cr0 cr0_mask = {}; Cr0 cr0_shadow = {__readcr0()}; Cr4 cr4_mask = {}; Cr4 cr4_shadow = {__readcr4()}; // For example, when we want to hide CR4.VMXE from the guest, comment in below // cr4_mask.fields.vmxe = true; // cr4_shadow.fields.vmxe = false; // See: PDPTE Registers // If PAE paging would be in use following an execution of MOV to CR0 or MOV // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are // loaded from the address in CR3. if (UtilIsX86Pae()) { cr0_mask.fields.pg = true; cr0_mask.fields.cd = true; cr0_mask.fields.nw = true; cr4_mask.fields.pae = true; cr4_mask.fields.pge = true; cr4_mask.fields.pse = true; cr4_mask.fields.smep = true; } // clang-format off auto error = VmxStatus::kOk; /* 16-Bit Control Field */ error |= UtilVmWrite(VmcsField::kVirtualProcessorId, KeGetCurrentProcessorNumberEx(nullptr) + 1); /* 16-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES()); error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS()); error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS()); error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS()); error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS()); error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS()); error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR()); error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR()); /* 16-Bit Host-State Fields */ // RPL and TI have to be 0 error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8); error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8); error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8); /* 64-Bit Control Fields */ error |= UtilVmWrite64(VmcsField::kIoBitmapA, UtilPaFromVa(processor_data->shared_data->io_bitmap_a)); error |= UtilVmWrite64(VmcsField::kIoBitmapB, UtilPaFromVa(processor_data->shared_data->io_bitmap_b)); error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap)); error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data)); /* 64-Bit Guest-State Fields */ error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64); error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl)); if (UtilIsX86Pae()) { UtilLoadPdptes(__readcr3()); } /* 32-Bit Control Fields */ error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all); error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all); error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap); error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all); error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all); error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all); /* 32-Bit Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit); error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit); error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS())); error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* 32-Bit Host-State Field */ error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs)); /* Natural-Width Control Fields */ error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all); error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all); error |= UtilVmWrite(VmcsField::kCr0ReadShadow, cr0_shadow.all); error |= UtilVmWrite(VmcsField::kCr4ReadShadow, cr4_shadow.all); /* Natural-Width Guest-State Fields */ error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kGuestEsBase, 0); error |= UtilVmWrite(VmcsField::kGuestCsBase, 0); error |= UtilVmWrite(VmcsField::kGuestSsBase, 0); error |= UtilVmWrite(VmcsField::kGuestDsBase, 0); error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES())); error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS())); error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS())); error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS())); error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR())); error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7)); error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer); error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer); error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags()); error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); /* Natural-Width Host-State Fields */ error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0()); error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3()); error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4()); #if defined(_AMD64_) error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase)); error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase)); #else error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS())); error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS())); #endif error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR())); error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base); error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp)); error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip)); error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer); error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint)); // clang-format on const auto vmx_status = static_cast<VmxStatus>(error); return vmx_status == VmxStatus::kOk; }
// Concatenates meta information such as the current time and a process ID to // user given log message. _Use_decl_annotations_ static NTSTATUS LogpMakePrefix( ULONG level, const char *function_name, const char *log_message, char *log_buffer, SIZE_T log_buffer_length) { char const *level_string = nullptr; switch (level) { case kLogpLevelDebug: level_string = "DBG\t"; break; case kLogpLevelInfo: level_string = "INF\t"; break; case kLogpLevelWarn: level_string = "WRN\t"; break; case kLogpLevelError: level_string = "ERR\t"; break; default: return STATUS_INVALID_PARAMETER; } auto status = STATUS_SUCCESS; char time_buffer[20] = {}; if ((g_logp_debug_flag & kLogOptDisableTime) == 0) { // Want the current time. TIME_FIELDS time_fields; LARGE_INTEGER system_time, local_time; KeQuerySystemTime(&system_time); ExSystemTimeToLocalTime(&system_time, &local_time); RtlTimeToTimeFields(&local_time, &time_fields); status = RtlStringCchPrintfA(time_buffer, RTL_NUMBER_OF(time_buffer), "%02u:%02u:%02u.%03u\t", time_fields.Hour, time_fields.Minute, time_fields.Second, time_fields.Milliseconds); if (!NT_SUCCESS(status)) { return status; } } // Want the function name char function_name_buffer[50] = {}; if ((g_logp_debug_flag & kLogOptDisableFunctionName) == 0) { const auto base_function_name = LogpFindBaseFunctionName(function_name); status = RtlStringCchPrintfA(function_name_buffer, RTL_NUMBER_OF(function_name_buffer), "%-40s\t", base_function_name); if (!NT_SUCCESS(status)) { return status; } } // Want the processor number char processro_number[10] = {}; if ((g_logp_debug_flag & kLogOptDisableProcessorNumber) == 0) { status = RtlStringCchPrintfA(processro_number, RTL_NUMBER_OF(processro_number), "#%lu\t", KeGetCurrentProcessorNumberEx(nullptr)); if (!NT_SUCCESS(status)) { return status; } } // It uses PsGetProcessId(PsGetCurrentProcess()) instead of // PsGetCurrentThreadProcessId() because the later sometimes returns // unwanted value, for example: // PID == 4 but its image name != ntoskrnl.exe // The author is guessing that it is related to attaching processes but // not quite sure. The former way works as expected. status = RtlStringCchPrintfA( log_buffer, log_buffer_length, "%s%s%s%5Iu\t%5Iu\t%-15s\t%s%s\r\n", time_buffer, level_string, processro_number, reinterpret_cast<ULONG_PTR>(PsGetProcessId(PsGetCurrentProcess())), reinterpret_cast<ULONG_PTR>(PsGetCurrentThreadId()), PsGetProcessImageFileName(PsGetCurrentProcess()), function_name_buffer, log_message); return status; }