_Use_decl_annotations_ EXTERN_C static bool VminitpEnterVmxMode( PER_PROCESSOR_DATA *ProcessorData) { // Apply FIXED bits const CR0_REG cr0Fixed0 = {__readmsr(IA32_VMX_CR0_FIXED0)}; const CR0_REG cr0Fixed1 = {__readmsr(IA32_VMX_CR0_FIXED1)}; CR0_REG cr0 = {__readcr0()}; cr0.All &= cr0Fixed1.All; cr0.All |= cr0Fixed0.All; __writecr0(cr0.All); const CR4_REG cr4Fixed0 = {__readmsr(IA32_VMX_CR4_FIXED0)}; const CR4_REG cr4Fixed1 = {__readmsr(IA32_VMX_CR4_FIXED1)}; CR4_REG cr4 = {__readcr4()}; cr4.All &= cr4Fixed1.All; cr4.All |= cr4Fixed0.All; __writecr4(cr4.All); // Write a VMCS revision identifier IA32_VMX_BASIC_MSR vmxBasicMsr = {__readmsr(IA32_VMX_BASIC)}; ProcessorData->VmxonRegion->RevisionIdentifier = vmxBasicMsr.Fields.RevisionIdentifier; auto vmxonRegionPA = MmGetPhysicalAddress(ProcessorData->VmxonRegion); if (__vmx_on( reinterpret_cast<unsigned long long *>(&vmxonRegionPA.QuadPart))) { return false; } return true; }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { // Apply FIXED bits const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } UtilInveptAll(); return true; }
static inline bool enter_vmx(struct vmcs *vmxon) { /* If we're running nested on a hypervisor that does not * support VT-x, this will cause #GP. */ u64 cr0 = __readcr0(); cr0 &= __readmsr(MSR_IA32_VMX_CR0_FIXED1); cr0 |= __readmsr(MSR_IA32_VMX_CR0_FIXED0); __writecr0(cr0); u64 cr4 = __readcr4(); cr4 &= __readmsr(MSR_IA32_VMX_CR4_FIXED1); cr4 |= __readmsr(MSR_IA32_VMX_CR4_FIXED0); __writecr4(cr4); u64 vmx = __readmsr(MSR_IA32_VMX_BASIC); vmxon->revision_id = (u32)vmx; /* Enter VMX root operation */ uintptr_t pa = __pa(vmxon); if (__vmx_on(&pa)) return false; /* This is necessary here or just before we exit the VM, * we do it here as it's easier. */ __invept_all(); return true; }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { PAGED_CODE(); // Apply FIXED bits // See: VMX-FIXED BITS IN CR0 // IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning // Values 1 1 bit of CRx is fixed to 1 // Values 0 1 bit of CRx is flexible // Values 0 0 bit of CRx is fixed to 0 const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; Cr0 cr0_original = cr0; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0 = %08x", cr0_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1 = %08x", cr0_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR0 = %08x", cr0_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR0 = %08x", cr0.all); // See: VMX-FIXED BITS IN CR4 const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; Cr4 cr4_original = cr4; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0 = %08x", cr4_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1 = %08x", cr4_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR4 = %08x", cr4_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR4 = %08x", cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use // of the INVEPT Instruction UtilInveptGlobal(); UtilInvvpidAllContext(); return true; }
VOID NTAPI INIT_FUNCTION Ki386VdmEnablePentiumExtentions(IN BOOLEAN Enable) { ULONG EFlags, Cr4; /* Save interrupt state and disable them */ EFlags = __readeflags(); _disable(); /* Enable or disable VME as required */ Cr4 = __readcr4(); __writecr4(Enable ? Cr4 | CR4_VME : Cr4 & ~CR4_VME); /* Restore interrupt state */ __writeeflags(EFlags); }
/// <summary> /// Switch CPU to root mode /// </summary> /// <param name="Vcpu">Virtual CPU data</param> /// <returns>TRUE on success</returns> BOOLEAN VmxEnterRoot( IN PVCPU Vcpu ) { PKSPECIAL_REGISTERS Registers = &Vcpu->HostState.SpecialRegisters; PIA32_VMX_BASIC_MSR pBasic = (PIA32_VMX_BASIC_MSR)&Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_BASIC )]; // Ensure the the VMCS can fit into a single page if (pBasic->Fields.RegionSize > PAGE_SIZE) { DPRINT( "HyperBone: CPU %d: %s: VMCS region doesn't fit into one page\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Ensure that the VMCS is supported in writeback memory if (pBasic->Fields.MemoryType != VMX_MEM_TYPE_WRITEBACK) { DPRINT( "HyperBone: CPU %d: %s: Unsupported memory type\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Ensure that true MSRs can be used for capabilities if (pBasic->Fields.VmxCapabilityHint == 0) { DPRINT( "HyperBone: CPU %d: %s: No true MSR support\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Capture the revision ID for the VMXON and VMCS region Vcpu->VMXON->RevisionId = pBasic->Fields.RevisionIdentifier; Vcpu->VMCS->RevisionId = pBasic->Fields.RevisionIdentifier; // Update CR0 with the must-be-zero and must-be-one requirements Registers->Cr0 &= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR0_FIXED1 )].LowPart; Registers->Cr0 |= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR0_FIXED0 )].LowPart; // Do the same for CR4 Registers->Cr4 &= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR4_FIXED1 )].LowPart; Registers->Cr4 |= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR4_FIXED0 )].LowPart; // Update host CR0 and CR4 based on the requirements above __writecr0( Registers->Cr0 ); __writecr4( Registers->Cr4 ); // Enable VMX Root Mode PHYSICAL_ADDRESS phys = MmGetPhysicalAddress( Vcpu->VMXON ); int res = __vmx_on( (PULONG64)&phys ); if (res) { DPRINT( "HyperBone: CPU %d: %s: __vmx_on failed with status %d\n", CPU_IDX, __FUNCTION__, res ); return FALSE; } // Clear the state of the VMCS, setting it to Inactive phys = MmGetPhysicalAddress( Vcpu->VMCS ); if (__vmx_vmclear( (PULONG64)&phys )) { DPRINT( "HyperBone: CPU %d: %s: __vmx_vmclear failed\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Load the VMCS, setting its state to Active if (__vmx_vmptrld( (PULONG64)&phys )) { DPRINT( "HyperBone: CPU %d: %s: __vmx_vmptrld failed\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // VMX Root Mode is enabled, with an active VMCS. return TRUE; }
VOID NTAPI KiInitializeCpu(PKIPCR Pcr) { ULONG64 Pat; ULONG FeatureBits; /* Initialize gs */ KiInitializeSegments(); /* Set GS base */ __writemsr(MSR_GS_BASE, (ULONG64)Pcr); __writemsr(MSR_GS_SWAP, (ULONG64)Pcr); /* Detect and set the CPU Type */ KiSetProcessorType(); /* Get the processor features for this CPU */ FeatureBits = KiGetFeatureBits(); /* Check if we support all needed features */ if ((FeatureBits & REQUIRED_FEATURE_BITS) != REQUIRED_FEATURE_BITS) { /* If not, bugcheck system */ FrLdrDbgPrint("CPU doesn't have needed features! Has: 0x%x, required: 0x%x\n", FeatureBits, REQUIRED_FEATURE_BITS); KeBugCheck(0); } /* Set DEP to always on */ SharedUserData->NXSupportPolicy = NX_SUPPORT_POLICY_ALWAYSON; FeatureBits |= KF_NX_ENABLED; /* Save feature bits */ Pcr->Prcb.FeatureBits = FeatureBits; /* Enable fx save restore support */ __writecr4(__readcr4() | CR4_FXSR); /* Enable XMMI exceptions */ __writecr4(__readcr4() | CR4_XMMEXCPT); /* Enable Write-Protection */ __writecr0(__readcr0() | CR0_WP); /* Disable fpu monitoring */ __writecr0(__readcr0() & ~CR0_MP); /* Disable x87 fpu exceptions */ __writecr0(__readcr0() & ~CR0_NE); /* LDT is unused */ __lldt(0); /* Set the systemcall entry points */ __writemsr(MSR_LSTAR, (ULONG64)KiSystemCallEntry64); __writemsr(MSR_CSTAR, (ULONG64)KiSystemCallEntry32); __writemsr(MSR_STAR, ((ULONG64)KGDT64_R0_CODE << 32) | ((ULONG64)(KGDT64_R3_CMCODE|RPL_MASK) << 48)); /* Set the flags to be cleared when doing a syscall */ __writemsr(MSR_SYSCALL_MASK, EFLAGS_IF_MASK | EFLAGS_TF | EFLAGS_DF); /* Enable syscall instruction and no-execute support */ __writemsr(MSR_EFER, __readmsr(MSR_EFER) | MSR_SCE | MSR_NXE); /* Initialize the PAT */ Pat = (PAT_WB << 0) | (PAT_WC << 8) | (PAT_UCM << 16) | (PAT_UC << 24) | (PAT_WB << 32) | (PAT_WC << 40) | (PAT_UCM << 48) | (PAT_UC << 56); __writemsr(MSR_PAT, Pat); }
NTSTATUS VTxSoftwareStatus() { // // Check the feature control bit MSR // IA32_FEATURE_CONTROL_MSR msr; TO_ULL(msr) = __readmsr(MSR_IA32_FEATURE_CONTROL); if (msr.Lock == 1) { // If the MSR is locked, it can't be modified // If 'EnableVmxon' is unset, virtualization is not possible if (msr.EnableVmxon == 0) { DbgLog("VMX is disabled in bios: MSR_IA32_FEATURE_CONTROL is 0x%llx\n", msr); return STATUS_NOT_SUPPORTED; } } else { // Force the lock to be on and enable VMXON msr.Lock = 1; msr.VmxonInSmx = 1; msr.EnableVmxon = 1; __writemsr(MSR_IA32_FEATURE_CONTROL, TO_ULL(msr)); } // // Setup CR0 correctly (Protected mode and paging must be enabled) // CR0_REG cr0; TO_ULL(cr0) = __readcr0(); if (cr0.PE == 0 || cr0.PG == 0) { DbgLog("Error: Protected mode or paging is not set in CR0\n"); return STATUS_NOT_SUPPORTED; } else { // Required by first processors that supported VMX cr0.NE = 1; } __writecr0(TO_ULL(cr0)); // // Virtual Machine eXtensions Enable in CR4 // BIT #13 VMXE // __try { __writecr4(__readcr4() | (1 << 13)); } __except (EXCEPTION_EXECUTE_HANDLER) { // Possible 'Privileged Instruction Exception' with CR4 bits return GetExceptionCode(); } return STATUS_SUCCESS; }