_Use_decl_annotations_ EXTERN_C static bool VminitpEnterVmxMode( PER_PROCESSOR_DATA *ProcessorData) { // Apply FIXED bits const CR0_REG cr0Fixed0 = {__readmsr(IA32_VMX_CR0_FIXED0)}; const CR0_REG cr0Fixed1 = {__readmsr(IA32_VMX_CR0_FIXED1)}; CR0_REG cr0 = {__readcr0()}; cr0.All &= cr0Fixed1.All; cr0.All |= cr0Fixed0.All; __writecr0(cr0.All); const CR4_REG cr4Fixed0 = {__readmsr(IA32_VMX_CR4_FIXED0)}; const CR4_REG cr4Fixed1 = {__readmsr(IA32_VMX_CR4_FIXED1)}; CR4_REG cr4 = {__readcr4()}; cr4.All &= cr4Fixed1.All; cr4.All |= cr4Fixed0.All; __writecr4(cr4.All); // Write a VMCS revision identifier IA32_VMX_BASIC_MSR vmxBasicMsr = {__readmsr(IA32_VMX_BASIC)}; ProcessorData->VmxonRegion->RevisionIdentifier = vmxBasicMsr.Fields.RevisionIdentifier; auto vmxonRegionPA = MmGetPhysicalAddress(ProcessorData->VmxonRegion); if (__vmx_on( reinterpret_cast<unsigned long long *>(&vmxonRegionPA.QuadPart))) { return false; } return true; }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { // Apply FIXED bits const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } UtilInveptAll(); return true; }
static inline bool enter_vmx(struct vmcs *vmxon) { /* If we're running nested on a hypervisor that does not * support VT-x, this will cause #GP. */ u64 cr0 = __readcr0(); cr0 &= __readmsr(MSR_IA32_VMX_CR0_FIXED1); cr0 |= __readmsr(MSR_IA32_VMX_CR0_FIXED0); __writecr0(cr0); u64 cr4 = __readcr4(); cr4 &= __readmsr(MSR_IA32_VMX_CR4_FIXED1); cr4 |= __readmsr(MSR_IA32_VMX_CR4_FIXED0); __writecr4(cr4); u64 vmx = __readmsr(MSR_IA32_VMX_BASIC); vmxon->revision_id = (u32)vmx; /* Enter VMX root operation */ uintptr_t pa = __pa(vmxon); if (__vmx_on(&pa)) return false; /* This is necessary here or just before we exit the VM, * we do it here as it's easier. */ __invept_all(); return true; }
// ---------------------------------------------------------------------------- void SetupCR0() { dword cr0 = __readcr0(); SetBit(cr0, 31); // Set PG 1 (Enable Paging) ClearBit(cr0, 30); // Set CD 0 (Enable Cache) ClearBit(cr0, 29); // Set NW 0 (Enable Writethrough) SetBit(cr0, 16); // Set WP 1 (Enable Write Protect) SetBit(cr0, 5); // Set NE 1 (Enable Internal FP Mode) __writecr0(cr0); }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { PAGED_CODE(); // Apply FIXED bits // See: VMX-FIXED BITS IN CR0 // IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning // Values 1 1 bit of CRx is fixed to 1 // Values 0 1 bit of CRx is flexible // Values 0 0 bit of CRx is fixed to 0 const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; Cr0 cr0_original = cr0; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0 = %08x", cr0_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1 = %08x", cr0_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR0 = %08x", cr0_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR0 = %08x", cr0.all); // See: VMX-FIXED BITS IN CR4 const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; Cr4 cr4_original = cr4; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0 = %08x", cr4_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1 = %08x", cr4_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR4 = %08x", cr4_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR4 = %08x", cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use // of the INVEPT Instruction UtilInveptGlobal(); UtilInvvpidAllContext(); return true; }
void WPOFF() { #ifndef _WIN64 _asm { cli mov eax,cr0 and eax,not 10000h mov cr0,eax }; #else UINT64 cr0=__readcr0(); cr0 &= 0xfffffffffffeffff; __writecr0(cr0); _disable(); #endif //_WIN64 }
VOID WPON() { #ifndef _WIN64 _asm { mov eax,cr0 or eax,10000h mov cr0,eax sti }; #else UINT64 cr0=__readcr0(); cr0 &= 0xfffffffffffeffff; __writecr0(cr0); _disable(); #endif //_WIN64 }
VOID NTAPI HalpBiosCall() { /* Must be volatile so it doesn't get optimized away! */ volatile KTRAP_FRAME V86TrapFrame; ULONG_PTR StackOffset, CodeOffset; /* Save the context, check for return */ if (_setjmp(HalpSavedContext)) { /* Returned from v86 */ return; } /* Kill alignment faults */ __writecr0(__readcr0() & ~CR0_AM); /* Set new stack address */ KeGetPcr()->TSS->Esp0 = (ULONG)&V86TrapFrame - 0x20 - sizeof(FX_SAVE_AREA); /* Compute segmented IP and SP offsets */ StackOffset = (ULONG_PTR)&HalpRealModeEnd - 4 - (ULONG_PTR)HalpRealModeStart; CodeOffset = (ULONG_PTR)HalpRealModeStart & 0xFFF; /* Now build the V86 trap frame */ V86TrapFrame.V86Es = 0; V86TrapFrame.V86Ds = 0; V86TrapFrame.V86Gs = 0; V86TrapFrame.V86Fs = 0; V86TrapFrame.HardwareSegSs = 0x2000; V86TrapFrame.HardwareEsp = StackOffset + CodeOffset; V86TrapFrame.EFlags = __readeflags() | EFLAGS_V86_MASK | EFLAGS_IOPL; V86TrapFrame.SegCs = 0x2000; V86TrapFrame.Eip = CodeOffset; /* Exit to V86 mode */ HalpExitToV86((PKTRAP_FRAME)&V86TrapFrame); }
DECLSPEC_NORETURN VOID FASTCALL KiTrap07Handler(IN PKTRAP_FRAME TrapFrame) { PKTHREAD Thread, NpxThread; PFX_SAVE_AREA SaveArea, NpxSaveArea; ULONG Cr0; /* Save trap frame */ KiEnterTrap(TrapFrame); /* Try to handle NPX delay load */ while (TRUE) { /* Get the current thread */ Thread = KeGetCurrentThread(); /* Get the NPX frame */ SaveArea = KiGetThreadNpxArea(Thread); /* Check if emulation is enabled */ if (SaveArea->Cr0NpxState & CR0_EM) { /* Not implemented */ UNIMPLEMENTED; while (TRUE); } /* Save CR0 and check NPX state */ Cr0 = __readcr0(); if (Thread->NpxState != NPX_STATE_LOADED) { /* Update CR0 */ Cr0 &= ~(CR0_MP | CR0_EM | CR0_TS); __writecr0(Cr0); /* Get the NPX thread */ NpxThread = KeGetCurrentPrcb()->NpxThread; if (NpxThread) { /* Get the NPX frame */ NpxSaveArea = KiGetThreadNpxArea(NpxThread); /* Save FPU state */ DPRINT("FIXME: Save FPU state: %p\n", NpxSaveArea); //Ke386SaveFpuState(NpxSaveArea); /* Update NPX state */ Thread->NpxState = NPX_STATE_NOT_LOADED; } /* Load FPU state */ //Ke386LoadFpuState(SaveArea); /* Update NPX state */ Thread->NpxState = NPX_STATE_LOADED; KeGetCurrentPrcb()->NpxThread = Thread; /* Enable interrupts */ _enable(); /* Check if CR0 needs to be reloaded due to context switch */ if (!SaveArea->Cr0NpxState) KiEoiHelper(TrapFrame); /* Otherwise, we need to reload CR0, disable interrupts */ _disable(); /* Reload CR0 */ Cr0 = __readcr0(); Cr0 |= SaveArea->Cr0NpxState; __writecr0(Cr0); /* Now restore interrupts and check for TS */ _enable(); if (Cr0 & CR0_TS) KiEoiHelper(TrapFrame); /* We're still here -- clear TS and try again */ __writecr0(__readcr0() &~ CR0_TS); _disable(); } else { /* This is an actual fault, not a lack of FPU state */ break; } } /* TS should not be set */ if (Cr0 & CR0_TS) { /* * If it's incorrectly set, then maybe the state is actually still valid * but we could've lock track of that due to a BIOS call. * Make sure MP is still set, which should verify the theory. */ if (Cr0 & CR0_MP) { /* Indeed, the state is actually still valid, so clear TS */ __writecr0(__readcr0() &~ CR0_TS); KiEoiHelper(TrapFrame); } /* Otherwise, something strange is going on */ KeBugCheckWithTf(TRAP_CAUSE_UNKNOWN, 2, Cr0, 0, 0, TrapFrame); } /* It's not a delayed load, so process this trap as an NPX fault */ KiNpxHandler(TrapFrame, Thread, SaveArea); }
DECLSPEC_NORETURN VOID FASTCALL KiNpxHandler(IN PKTRAP_FRAME TrapFrame, IN PKTHREAD Thread, IN PFX_SAVE_AREA SaveArea) { ULONG Cr0, Mask, Error, ErrorOffset, DataOffset; /* Check for VDM trap */ ASSERT((KiVdmTrap(TrapFrame)) == FALSE); /* Check for kernel trap */ if (!KiUserTrap(TrapFrame)) { /* Kernel might've tripped a delayed error */ SaveArea->Cr0NpxState |= CR0_TS; /* Only valid if it happened during a restore */ //if ((PVOID)TrapFrame->Eip == FrRestore) { /* It did, so just skip the instruction */ //TrapFrame->Eip += 3; /* sizeof(FRSTOR) */ //KiEoiHelper(TrapFrame); } } /* User or kernel trap -- get ready to issue an exception */ //if (Thread->NpxState == NPX_STATE_NOT_LOADED) { /* Update CR0 */ Cr0 = __readcr0(); Cr0 &= ~(CR0_MP | CR0_EM | CR0_TS); __writecr0(Cr0); /* Save FPU state */ Ke386SaveFpuState(SaveArea); /* Mark CR0 state dirty */ Cr0 |= NPX_STATE_NOT_LOADED; Cr0 |= SaveArea->Cr0NpxState; __writecr0(Cr0); /* Update NPX state */ Thread->NpxState = NPX_STATE_NOT_LOADED; KeGetCurrentPrcb()->NpxThread = NULL; } /* Clear the TS bit and re-enable interrupts */ SaveArea->Cr0NpxState &= ~CR0_TS; _enable(); /* Check if we should get the FN or FX error */ if (KeI386FxsrPresent) { /* Get it from FX */ Mask = SaveArea->U.FxArea.ControlWord; Error = SaveArea->U.FxArea.StatusWord; /* Get the FPU exception address too */ ErrorOffset = SaveArea->U.FxArea.ErrorOffset; DataOffset = SaveArea->U.FxArea.DataOffset; } else { /* Get it from FN */ Mask = SaveArea->U.FnArea.ControlWord; Error = SaveArea->U.FnArea.StatusWord; /* Get the FPU exception address too */ ErrorOffset = SaveArea->U.FnArea.ErrorOffset; DataOffset = SaveArea->U.FnArea.DataOffset; } /* Get legal exceptions that software should handle */ Error &= (FSW_INVALID_OPERATION | FSW_DENORMAL | FSW_ZERO_DIVIDE | FSW_OVERFLOW | FSW_UNDERFLOW | FSW_PRECISION); Error &= ~Mask; if (Error & FSW_STACK_FAULT) { /* Issue stack check fault */ KiDispatchException2Args(STATUS_FLOAT_STACK_CHECK, ErrorOffset, 0, DataOffset, TrapFrame); } /* Check for invalid operation */ if (Error & FSW_INVALID_OPERATION) { /* Issue fault */ KiDispatchException1Args(STATUS_FLOAT_INVALID_OPERATION, ErrorOffset, 0, TrapFrame); } /* Check for divide by zero */ if (Error & FSW_ZERO_DIVIDE) { /* Issue fault */ KiDispatchException1Args(STATUS_FLOAT_DIVIDE_BY_ZERO, ErrorOffset, 0, TrapFrame); } /* Check for denormal */ if (Error & FSW_DENORMAL) { /* Issue fault */ KiDispatchException1Args(STATUS_FLOAT_INVALID_OPERATION, ErrorOffset, 0, TrapFrame); } /* Check for overflow */ if (Error & FSW_OVERFLOW) { /* Issue fault */ KiDispatchException1Args(STATUS_FLOAT_OVERFLOW, ErrorOffset, 0, TrapFrame); } /* Check for underflow */ if (Error & FSW_UNDERFLOW) { /* Issue fault */ KiDispatchException1Args(STATUS_FLOAT_UNDERFLOW, ErrorOffset, 0, TrapFrame); } /* Check for precision fault */ if (Error & FSW_PRECISION) { /* Issue fault */ KiDispatchException1Args(STATUS_FLOAT_INEXACT_RESULT, ErrorOffset, 0, TrapFrame); } /* Unknown FPU fault */ KeBugCheckWithTf(TRAP_CAUSE_UNKNOWN, 1, Error, 0, 0, TrapFrame); }
/// <summary> /// Switch CPU to root mode /// </summary> /// <param name="Vcpu">Virtual CPU data</param> /// <returns>TRUE on success</returns> BOOLEAN VmxEnterRoot( IN PVCPU Vcpu ) { PKSPECIAL_REGISTERS Registers = &Vcpu->HostState.SpecialRegisters; PIA32_VMX_BASIC_MSR pBasic = (PIA32_VMX_BASIC_MSR)&Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_BASIC )]; // Ensure the the VMCS can fit into a single page if (pBasic->Fields.RegionSize > PAGE_SIZE) { DPRINT( "HyperBone: CPU %d: %s: VMCS region doesn't fit into one page\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Ensure that the VMCS is supported in writeback memory if (pBasic->Fields.MemoryType != VMX_MEM_TYPE_WRITEBACK) { DPRINT( "HyperBone: CPU %d: %s: Unsupported memory type\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Ensure that true MSRs can be used for capabilities if (pBasic->Fields.VmxCapabilityHint == 0) { DPRINT( "HyperBone: CPU %d: %s: No true MSR support\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Capture the revision ID for the VMXON and VMCS region Vcpu->VMXON->RevisionId = pBasic->Fields.RevisionIdentifier; Vcpu->VMCS->RevisionId = pBasic->Fields.RevisionIdentifier; // Update CR0 with the must-be-zero and must-be-one requirements Registers->Cr0 &= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR0_FIXED1 )].LowPart; Registers->Cr0 |= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR0_FIXED0 )].LowPart; // Do the same for CR4 Registers->Cr4 &= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR4_FIXED1 )].LowPart; Registers->Cr4 |= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR4_FIXED0 )].LowPart; // Update host CR0 and CR4 based on the requirements above __writecr0( Registers->Cr0 ); __writecr4( Registers->Cr4 ); // Enable VMX Root Mode PHYSICAL_ADDRESS phys = MmGetPhysicalAddress( Vcpu->VMXON ); int res = __vmx_on( (PULONG64)&phys ); if (res) { DPRINT( "HyperBone: CPU %d: %s: __vmx_on failed with status %d\n", CPU_IDX, __FUNCTION__, res ); return FALSE; } // Clear the state of the VMCS, setting it to Inactive phys = MmGetPhysicalAddress( Vcpu->VMCS ); if (__vmx_vmclear( (PULONG64)&phys )) { DPRINT( "HyperBone: CPU %d: %s: __vmx_vmclear failed\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Load the VMCS, setting its state to Active if (__vmx_vmptrld( (PULONG64)&phys )) { DPRINT( "HyperBone: CPU %d: %s: __vmx_vmptrld failed\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // VMX Root Mode is enabled, with an active VMCS. return TRUE; }
VOID NTAPI KiInitializeCpu(PKIPCR Pcr) { ULONG64 Pat; ULONG FeatureBits; /* Initialize gs */ KiInitializeSegments(); /* Set GS base */ __writemsr(MSR_GS_BASE, (ULONG64)Pcr); __writemsr(MSR_GS_SWAP, (ULONG64)Pcr); /* Detect and set the CPU Type */ KiSetProcessorType(); /* Get the processor features for this CPU */ FeatureBits = KiGetFeatureBits(); /* Check if we support all needed features */ if ((FeatureBits & REQUIRED_FEATURE_BITS) != REQUIRED_FEATURE_BITS) { /* If not, bugcheck system */ FrLdrDbgPrint("CPU doesn't have needed features! Has: 0x%x, required: 0x%x\n", FeatureBits, REQUIRED_FEATURE_BITS); KeBugCheck(0); } /* Set DEP to always on */ SharedUserData->NXSupportPolicy = NX_SUPPORT_POLICY_ALWAYSON; FeatureBits |= KF_NX_ENABLED; /* Save feature bits */ Pcr->Prcb.FeatureBits = FeatureBits; /* Enable fx save restore support */ __writecr4(__readcr4() | CR4_FXSR); /* Enable XMMI exceptions */ __writecr4(__readcr4() | CR4_XMMEXCPT); /* Enable Write-Protection */ __writecr0(__readcr0() | CR0_WP); /* Disable fpu monitoring */ __writecr0(__readcr0() & ~CR0_MP); /* Disable x87 fpu exceptions */ __writecr0(__readcr0() & ~CR0_NE); /* LDT is unused */ __lldt(0); /* Set the systemcall entry points */ __writemsr(MSR_LSTAR, (ULONG64)KiSystemCallEntry64); __writemsr(MSR_CSTAR, (ULONG64)KiSystemCallEntry32); __writemsr(MSR_STAR, ((ULONG64)KGDT64_R0_CODE << 32) | ((ULONG64)(KGDT64_R3_CMCODE|RPL_MASK) << 48)); /* Set the flags to be cleared when doing a syscall */ __writemsr(MSR_SYSCALL_MASK, EFLAGS_IF_MASK | EFLAGS_TF | EFLAGS_DF); /* Enable syscall instruction and no-execute support */ __writemsr(MSR_EFER, __readmsr(MSR_EFER) | MSR_SCE | MSR_NXE); /* Initialize the PAT */ Pat = (PAT_WB << 0) | (PAT_WC << 8) | (PAT_UCM << 16) | (PAT_UC << 24) | (PAT_WB << 32) | (PAT_WC << 40) | (PAT_UCM << 48) | (PAT_UC << 56); __writemsr(MSR_PAT, Pat); }
NTSTATUS VTxSoftwareStatus() { // // Check the feature control bit MSR // IA32_FEATURE_CONTROL_MSR msr; TO_ULL(msr) = __readmsr(MSR_IA32_FEATURE_CONTROL); if (msr.Lock == 1) { // If the MSR is locked, it can't be modified // If 'EnableVmxon' is unset, virtualization is not possible if (msr.EnableVmxon == 0) { DbgLog("VMX is disabled in bios: MSR_IA32_FEATURE_CONTROL is 0x%llx\n", msr); return STATUS_NOT_SUPPORTED; } } else { // Force the lock to be on and enable VMXON msr.Lock = 1; msr.VmxonInSmx = 1; msr.EnableVmxon = 1; __writemsr(MSR_IA32_FEATURE_CONTROL, TO_ULL(msr)); } // // Setup CR0 correctly (Protected mode and paging must be enabled) // CR0_REG cr0; TO_ULL(cr0) = __readcr0(); if (cr0.PE == 0 || cr0.PG == 0) { DbgLog("Error: Protected mode or paging is not set in CR0\n"); return STATUS_NOT_SUPPORTED; } else { // Required by first processors that supported VMX cr0.NE = 1; } __writecr0(TO_ULL(cr0)); // // Virtual Machine eXtensions Enable in CR4 // BIT #13 VMXE // __try { __writecr4(__readcr4() | (1 << 13)); } __except (EXCEPTION_EXECUTE_HANDLER) { // Possible 'Privileged Instruction Exception' with CR4 bits return GetExceptionCode(); } return STATUS_SUCCESS; }