Ejemplo n.º 1
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpEnterVmxMode(
    ProcessorData *processor_data) {
  // Apply FIXED bits
  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
  Cr0 cr0 = {__readcr0()};
  cr0.all &= cr0_fixed1.all;
  cr0.all |= cr0_fixed0.all;
  __writecr0(cr0.all);

  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
  Cr4 cr4 = {__readcr4()};
  cr4.all &= cr4_fixed1.all;
  cr4.all |= cr4_fixed0.all;
  __writecr4(cr4.all);

  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmxon_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
  if (__vmx_on(&vmxon_region_pa)) {
    return false;
  }

  UtilInveptAll();
  return true;
}
Ejemplo n.º 2
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpEnterVmxMode(
    ProcessorData *processor_data) {
  PAGED_CODE();

  // Apply FIXED bits
  // See: VMX-FIXED BITS IN CR0

  //        IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning
  // Values 1                   1                   bit of CRx is fixed to 1
  // Values 0                   1                   bit of CRx is flexible
  // Values 0                   0                   bit of CRx is fixed to 0
  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
  Cr0 cr0 = {__readcr0()};
  Cr0 cr0_original = cr0;
  cr0.all &= cr0_fixed1.all;
  cr0.all |= cr0_fixed0.all;
  __writecr0(cr0.all);

  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0   = %08x", cr0_fixed0.all);
  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1   = %08x", cr0_fixed1.all);
  HYPERPLATFORM_LOG_DEBUG("Original CR0          = %08x", cr0_original.all);
  HYPERPLATFORM_LOG_DEBUG("Fixed CR0             = %08x", cr0.all);

  // See: VMX-FIXED BITS IN CR4
  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
  Cr4 cr4 = {__readcr4()};
  Cr4 cr4_original = cr4;
  cr4.all &= cr4_fixed1.all;
  cr4.all |= cr4_fixed0.all;
  __writecr4(cr4.all);

  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0   = %08x", cr4_fixed0.all);
  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1   = %08x", cr4_fixed1.all);
  HYPERPLATFORM_LOG_DEBUG("Original CR4          = %08x", cr4_original.all);
  HYPERPLATFORM_LOG_DEBUG("Fixed CR4             = %08x", cr4.all);

  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmxon_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
  if (__vmx_on(&vmxon_region_pa)) {
    return false;
  }

  // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use
  // of the INVEPT Instruction
  UtilInveptGlobal();
  UtilInvvpidAllContext();
  return true;
}
Ejemplo n.º 3
0
// Initialize shared processor data
_Use_decl_annotations_ static SharedProcessorData *VmpInitializeSharedData() {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(
      ExAllocatePoolWithTag(NonPagedPoolNx, sizeof(SharedProcessorData),
                            kHyperPlatformCommonPoolTag));
  if (!shared_data) {
    return nullptr;
  }
  RtlZeroMemory(shared_data, sizeof(SharedProcessorData));
  HYPERPLATFORM_LOG_DEBUG("SharedData=        %p", shared_data);

  // Set up the MSR bitmap
  const auto msr_bitmap = ExAllocatePoolWithTag(NonPagedPoolNx, PAGE_SIZE,
                                                kHyperPlatformCommonPoolTag);
  if (!msr_bitmap) {
    ExFreePoolWithTag(shared_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(msr_bitmap, PAGE_SIZE);
  shared_data->msr_bitmap = msr_bitmap;

  // Checks MSRs causing #GP and should not cause VM-exit from 0 to 0xfff.
  bool unsafe_msr_map[0x1000] = {};
  for (auto msr = 0ul; msr < RTL_NUMBER_OF(unsafe_msr_map); ++msr) {
    __try {
      UtilReadMsr(static_cast<Msr>(msr));
    } __except (EXCEPTION_EXECUTE_HANDLER) {
      unsafe_msr_map[msr] = true;
    }
  }

  // Activate VM-exit for RDMSR against all MSRs
  const auto bitmap_read_low = reinterpret_cast<UCHAR *>(msr_bitmap);
  const auto bitmap_read_high = bitmap_read_low + 1024;
  RtlFillMemory(bitmap_read_low, 1024, 0xff);   // read        0 -     1fff
  RtlFillMemory(bitmap_read_high, 1024, 0xff);  // read c0000000 - c0001fff

  // But ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8)
  RTL_BITMAP bitmap_read_low_header = {};
  RtlInitializeBitMap(&bitmap_read_low_header,
                      reinterpret_cast<PULONG>(bitmap_read_low), 1024 * 8);
  RtlClearBits(&bitmap_read_low_header, 0xe7, 2);

  // Also ignore the unsage MSRs
  for (auto msr = 0ul; msr < RTL_NUMBER_OF(unsafe_msr_map); ++msr) {
    const auto ignore = unsafe_msr_map[msr];
    if (ignore) {
      RtlClearBits(&bitmap_read_low_header, msr, 1);
    }
  }

  // But ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102)
  RTL_BITMAP bitmap_read_high_header = {};
  RtlInitializeBitMap(&bitmap_read_high_header,
                      reinterpret_cast<PULONG>(bitmap_read_high), 1024 * 8);
  RtlClearBits(&bitmap_read_high_header, 0x101, 2);

  return shared_data;
}
Ejemplo n.º 4
0
// Build MSR bitmap
_Use_decl_annotations_ static void *VmpBuildMsrBitmap() {
  PAGED_CODE();

  const auto msr_bitmap = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE,
                                                kHyperPlatformCommonPoolTag);
  if (!msr_bitmap) {
    return nullptr;
  }
  RtlZeroMemory(msr_bitmap, PAGE_SIZE);

  // Activate VM-exit for RDMSR against all MSRs
  const auto bitmap_read_low = reinterpret_cast<UCHAR *>(msr_bitmap);
  const auto bitmap_read_high = bitmap_read_low + 1024;
  RtlFillMemory(bitmap_read_low, 1024, 0xff);   // read        0 -     1fff
  RtlFillMemory(bitmap_read_high, 1024, 0xff);  // read c0000000 - c0001fff

  // Ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8)
  RTL_BITMAP bitmap_read_low_header = {};
  RtlInitializeBitMap(&bitmap_read_low_header,
                      reinterpret_cast<PULONG>(bitmap_read_low), 1024 * 8);
  RtlClearBits(&bitmap_read_low_header, 0xe7, 2);

  // Checks MSRs that cause #GP from 0 to 0xfff, and ignore all of them
  for (auto msr = 0ul; msr < 0x1000; ++msr) {
    __try {
      UtilReadMsr(static_cast<Msr>(msr));
    } __except (EXCEPTION_EXECUTE_HANDLER) {
      RtlClearBits(&bitmap_read_low_header, msr, 1);
    }
  }

  // Ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102)
  RTL_BITMAP bitmap_read_high_header = {};
  RtlInitializeBitMap(&bitmap_read_high_header,
                      reinterpret_cast<PULONG>(bitmap_read_high),
                      1024 * CHAR_BIT);
  RtlClearBits(&bitmap_read_high_header, 0x101, 2);

  return msr_bitmap;
}
Ejemplo n.º 5
0
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE
_Use_decl_annotations_ static bool VmpSetupVMCS(
    const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer,
    ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) {
  Gdtr gdtr = {};
  __sgdt(&gdtr);

  Idtr idtr = {};
  __sidt(&idtr);

  // See: Algorithms for Determining VMX Capabilities
  const auto use_true_msrs = Ia32VmxBasicMsr{
      UtilReadMsr64(
          Msr::kIa32VmxBasic)}.fields.vmx_capability_hint;

  VmxVmEntryControls vm_entryctl_requested = {};
  vm_entryctl_requested.fields.ia32e_mode_guest = IsX64();
  VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue(
      (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls,
      vm_entryctl_requested.all)};

  VmxVmExitControls vm_exitctl_requested = {};
  vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true;
  vm_exitctl_requested.fields.host_address_space_size = IsX64();
  VmxVmExitControls vm_exitctl = {VmpAdjustControlValue(
      (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls,
      vm_exitctl_requested.all)};

  VmxPinBasedControls vm_pinctl_requested = {};
  VmxPinBasedControls vm_pinctl = {
      VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls
                                            : Msr::kIa32VmxPinbasedCtls,
                            vm_pinctl_requested.all)};

  VmxProcessorBasedControls vm_procctl_requested = {};
  vm_procctl_requested.fields.invlpg_exiting = false;
  vm_procctl_requested.fields.rdtsc_exiting = false;
  vm_procctl_requested.fields.cr3_load_exiting = true;
  vm_procctl_requested.fields.cr8_load_exiting = false;  // NB: very frequent
  vm_procctl_requested.fields.mov_dr_exiting = true;
  vm_procctl_requested.fields.use_msr_bitmaps = true;
  vm_procctl_requested.fields.activate_secondary_control = true;
  VmxProcessorBasedControls vm_procctl = {
      VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls
                                            : Msr::kIa32VmxProcBasedCtls,
                            vm_procctl_requested.all)};

  VmxSecondaryProcessorBasedControls vm_procctl2_requested = {};
  vm_procctl2_requested.fields.enable_ept = true;
  vm_procctl2_requested.fields.enable_rdtscp = true;  // required for Win10
  vm_procctl2_requested.fields.descriptor_table_exiting = true;
  // required for Win10
  vm_procctl2_requested.fields.enable_xsaves_xstors = true;
  VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue(
      Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)};

  // Set up CR0 and CR4 bitmaps
  // - Where a bit is     masked, the shadow bit appears
  // - Where a bit is not masked, the actual bit appears
  // VM-exit occurs when a guest modifies any of those fields
  Cr0 cr0_mask = {};
  Cr4 cr4_mask = {};

  // See: PDPTE Registers
  // If PAE paging would be in use following an execution of MOV to CR0 or MOV
  // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD,
  // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are
  // loaded from the address in CR3.
  if (UtilIsX86Pae()) {
    cr0_mask.fields.pg = true;
    cr0_mask.fields.cd = true;
    cr0_mask.fields.nw = true;
    cr4_mask.fields.pae = true;
    cr4_mask.fields.pge = true;
    cr4_mask.fields.pse = true;
    cr4_mask.fields.smep = true;
  }

  const auto exception_bitmap =
      // 1 << InterruptionVector::kBreakpointException |
      // 1 << InterruptionVector::kGeneralProtectionException |
      // 1 << InterruptionVector::kPageFaultException |
      0;

  // clang-format off
  /* 16-Bit Control Field */

  /* 16-Bit Guest-State Fields */
  auto error = VmxStatus::kOk;
  error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES());
  error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS());
  error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS());
  error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS());
  error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS());
  error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS());
  error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR());
  error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR());

  /* 16-Bit Host-State Fields */
  // RPL and TI have to be 0
  error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8);

  /* 64-Bit Control Fields */
  error |= UtilVmWrite64(VmcsField::kIoBitmapA, 0);
  error |= UtilVmWrite64(VmcsField::kIoBitmapB, 0);
  error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap));
  error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data));

  /* 64-Bit Guest-State Fields */
  error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64);
  error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl));
  if (UtilIsX86Pae()) {
    UtilLoadPdptes(__readcr3());
  }

  /* 32-Bit Control Fields */
  error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all);
  error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all);
  error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap);
  error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMask, 0);
  error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMatch, 0);
  error |= UtilVmWrite(VmcsField::kCr3TargetCount, 0);
  error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all);
  error |= UtilVmWrite(VmcsField::kVmExitMsrStoreCount, 0);
  error |= UtilVmWrite(VmcsField::kVmExitMsrLoadCount, 0);
  error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all);
  error |= UtilVmWrite(VmcsField::kVmEntryMsrLoadCount, 0);
  error |= UtilVmWrite(VmcsField::kVmEntryIntrInfoField, 0);
  error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all);

  /* 32-Bit Guest-State Fields */
  error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES()));
  error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS()));
  error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS()));
  error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS()));
  error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS()));
  error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR()));
  error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit);
  error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit);
  error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES()));
  error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS()));
  error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS()));
  error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS()));
  error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS()));
  error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR()));
  error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kGuestInterruptibilityInfo, 0);
  error |= UtilVmWrite(VmcsField::kGuestActivityState, 0);
  error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs));

  /* 32-Bit Host-State Field */
  error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs));

  /* Natural-Width Control Fields */
  error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all);
  error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all);
  error |= UtilVmWrite(VmcsField::kCr0ReadShadow, __readcr0());
  error |= UtilVmWrite(VmcsField::kCr4ReadShadow, __readcr4());

  /* Natural-Width Guest-State Fields */
  error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0());
  error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3());
  error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4());
#if defined(_AMD64_)
  error |= UtilVmWrite(VmcsField::kGuestEsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestCsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestSsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestDsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase));
  error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase));
#else
  error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES()));
  error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS()));
  error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS()));
  error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS()));
  error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS()));
#endif
  error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR()));
  error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base);
  error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base);
  error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7));
  error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer);
  error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer);
  error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags());
  error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp));
  error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip));

  /* Natural-Width Host-State Fields */
  error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0());
  error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3());
  error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4());
#if defined(_AMD64_)
  error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase));
  error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase));
#else
  error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS()));
#endif
  error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base);
  error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base);
  error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp));
  error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip));
  error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer);
  error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint));
  // clang-format on

  const auto vmx_status = static_cast<VmxStatus>(error);
  return vmx_status == VmxStatus::kOk;
}