_Use_decl_annotations_ EXTERN_C static bool VminitpEnterVmxMode( PER_PROCESSOR_DATA *ProcessorData) { // Apply FIXED bits const CR0_REG cr0Fixed0 = {__readmsr(IA32_VMX_CR0_FIXED0)}; const CR0_REG cr0Fixed1 = {__readmsr(IA32_VMX_CR0_FIXED1)}; CR0_REG cr0 = {__readcr0()}; cr0.All &= cr0Fixed1.All; cr0.All |= cr0Fixed0.All; __writecr0(cr0.All); const CR4_REG cr4Fixed0 = {__readmsr(IA32_VMX_CR4_FIXED0)}; const CR4_REG cr4Fixed1 = {__readmsr(IA32_VMX_CR4_FIXED1)}; CR4_REG cr4 = {__readcr4()}; cr4.All &= cr4Fixed1.All; cr4.All |= cr4Fixed0.All; __writecr4(cr4.All); // Write a VMCS revision identifier IA32_VMX_BASIC_MSR vmxBasicMsr = {__readmsr(IA32_VMX_BASIC)}; ProcessorData->VmxonRegion->RevisionIdentifier = vmxBasicMsr.Fields.RevisionIdentifier; auto vmxonRegionPA = MmGetPhysicalAddress(ProcessorData->VmxonRegion); if (__vmx_on( reinterpret_cast<unsigned long long *>(&vmxonRegionPA.QuadPart))) { return false; } return true; }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { // Apply FIXED bits const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } UtilInveptAll(); return true; }
static inline bool enter_vmx(struct vmcs *vmxon) { /* If we're running nested on a hypervisor that does not * support VT-x, this will cause #GP. */ u64 cr0 = __readcr0(); cr0 &= __readmsr(MSR_IA32_VMX_CR0_FIXED1); cr0 |= __readmsr(MSR_IA32_VMX_CR0_FIXED0); __writecr0(cr0); u64 cr4 = __readcr4(); cr4 &= __readmsr(MSR_IA32_VMX_CR4_FIXED1); cr4 |= __readmsr(MSR_IA32_VMX_CR4_FIXED0); __writecr4(cr4); u64 vmx = __readmsr(MSR_IA32_VMX_BASIC); vmxon->revision_id = (u32)vmx; /* Enter VMX root operation */ uintptr_t pa = __pa(vmxon); if (__vmx_on(&pa)) return false; /* This is necessary here or just before we exit the VM, * we do it here as it's easier. */ __invept_all(); return true; }
NTSTATUS ControlAreaInitializeProcessor(LONG ProcessorNumber) { // // Allocate host stack region // 16 pages available for use // SIZE_T stackSize = 16 * PAGE_SIZE; PUCHAR stackBase = ExAllocatePoolWithTag(NonPagedPool, stackSize, 'KSTK'); if (!stackBase) return STATUS_NO_MEMORY; RtlSecureZeroMemory((PVOID)stackBase, stackSize); // // Set up CPU control structure // PVIRT_CPU cpu = (PVIRT_CPU)(stackBase + stackSize - 8 - sizeof(VIRT_CPU)); cpu->HostStackBase = stackBase; cpu->Self = cpu; CpuControlArea[ProcessorNumber] = cpu; // // Allocate all VMX regions // if (!NT_SUCCESS(AllocateVmxProcessorData(&cpu->VmxonVa, &cpu->VmxonPa, &cpu->VmxonSize))) return STATUS_NO_MEMORY; if (!NT_SUCCESS(AllocateVmxProcessorData(&cpu->VmcsVa, &cpu->VmcsPa, &cpu->VmcsSize))) return STATUS_NO_MEMORY; if (!NT_SUCCESS(AllocateVmxProcessorData(&cpu->MSRBitmapVa, &cpu->MSRBitmapPa, &cpu->MSRBitmapSize))) return STATUS_NO_MEMORY; // Bitmap needs to be zeroed RtlSecureZeroMemory(cpu->MSRBitmapVa, cpu->MSRBitmapSize); __try { if (__vmx_on(PA_PTR_INT64(cpu->VmxonPa)) > 0) return STATUS_UNSUCCESSFUL; if (__vmx_vmclear(PA_PTR_INT64(cpu->VmcsPa)) > 0) return STATUS_UNSUCCESSFUL; if (__vmx_vmptrld(PA_PTR_INT64(cpu->VmcsPa)) > 0) return STATUS_UNSUCCESSFUL; } __except (EXCEPTION_EXECUTE_HANDLER) { // Rare case (or if physical address is invalid) return GetExceptionCode(); } return STATUS_SUCCESS; }
// See: VMM SETUP & TEAR DOWN _Use_decl_annotations_ static bool VmpEnterVmxMode( ProcessorData *processor_data) { PAGED_CODE(); // Apply FIXED bits // See: VMX-FIXED BITS IN CR0 // IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning // Values 1 1 bit of CRx is fixed to 1 // Values 0 1 bit of CRx is flexible // Values 0 0 bit of CRx is fixed to 0 const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)}; const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)}; Cr0 cr0 = {__readcr0()}; Cr0 cr0_original = cr0; cr0.all &= cr0_fixed1.all; cr0.all |= cr0_fixed0.all; __writecr0(cr0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0 = %08x", cr0_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1 = %08x", cr0_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR0 = %08x", cr0_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR0 = %08x", cr0.all); // See: VMX-FIXED BITS IN CR4 const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)}; const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)}; Cr4 cr4 = {__readcr4()}; Cr4 cr4_original = cr4; cr4.all &= cr4_fixed1.all; cr4.all |= cr4_fixed0.all; __writecr4(cr4.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0 = %08x", cr4_fixed0.all); HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1 = %08x", cr4_fixed1.all); HYPERPLATFORM_LOG_DEBUG("Original CR4 = %08x", cr4_original.all); HYPERPLATFORM_LOG_DEBUG("Fixed CR4 = %08x", cr4.all); // Write a VMCS revision identifier const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)}; processor_data->vmxon_region->revision_identifier = vmx_basic_msr.fields.revision_identifier; auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region); if (__vmx_on(&vmxon_region_pa)) { return false; } // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use // of the INVEPT Instruction UtilInveptGlobal(); UtilInvvpidAllContext(); return true; }
/// <summary> /// Switch CPU to root mode /// </summary> /// <param name="Vcpu">Virtual CPU data</param> /// <returns>TRUE on success</returns> BOOLEAN VmxEnterRoot( IN PVCPU Vcpu ) { PKSPECIAL_REGISTERS Registers = &Vcpu->HostState.SpecialRegisters; PIA32_VMX_BASIC_MSR pBasic = (PIA32_VMX_BASIC_MSR)&Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_BASIC )]; // Ensure the the VMCS can fit into a single page if (pBasic->Fields.RegionSize > PAGE_SIZE) { DPRINT( "HyperBone: CPU %d: %s: VMCS region doesn't fit into one page\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Ensure that the VMCS is supported in writeback memory if (pBasic->Fields.MemoryType != VMX_MEM_TYPE_WRITEBACK) { DPRINT( "HyperBone: CPU %d: %s: Unsupported memory type\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Ensure that true MSRs can be used for capabilities if (pBasic->Fields.VmxCapabilityHint == 0) { DPRINT( "HyperBone: CPU %d: %s: No true MSR support\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Capture the revision ID for the VMXON and VMCS region Vcpu->VMXON->RevisionId = pBasic->Fields.RevisionIdentifier; Vcpu->VMCS->RevisionId = pBasic->Fields.RevisionIdentifier; // Update CR0 with the must-be-zero and must-be-one requirements Registers->Cr0 &= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR0_FIXED1 )].LowPart; Registers->Cr0 |= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR0_FIXED0 )].LowPart; // Do the same for CR4 Registers->Cr4 &= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR4_FIXED1 )].LowPart; Registers->Cr4 |= Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_CR4_FIXED0 )].LowPart; // Update host CR0 and CR4 based on the requirements above __writecr0( Registers->Cr0 ); __writecr4( Registers->Cr4 ); // Enable VMX Root Mode PHYSICAL_ADDRESS phys = MmGetPhysicalAddress( Vcpu->VMXON ); int res = __vmx_on( (PULONG64)&phys ); if (res) { DPRINT( "HyperBone: CPU %d: %s: __vmx_on failed with status %d\n", CPU_IDX, __FUNCTION__, res ); return FALSE; } // Clear the state of the VMCS, setting it to Inactive phys = MmGetPhysicalAddress( Vcpu->VMCS ); if (__vmx_vmclear( (PULONG64)&phys )) { DPRINT( "HyperBone: CPU %d: %s: __vmx_vmclear failed\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // Load the VMCS, setting its state to Active if (__vmx_vmptrld( (PULONG64)&phys )) { DPRINT( "HyperBone: CPU %d: %s: __vmx_vmptrld failed\n", CPU_IDX, __FUNCTION__ ); return FALSE; } // VMX Root Mode is enabled, with an active VMCS. return TRUE; }