コード例 #1
0
ファイル: vcpu.c プロジェクト: HideSand/ksm
void vcpu_init(uintptr_t sp, uintptr_t ip, struct ksm *k)
{
	struct vcpu *vcpu = ExAllocatePool(NonPagedPoolNx, sizeof(*vcpu));
	if (!vcpu)
		return;

	RtlZeroMemory(vcpu, sizeof(*vcpu));
	if (!ept_init(&vcpu->ept))
		return ExFreePool(vcpu);

	PHYSICAL_ADDRESS highest;
	highest.QuadPart = -1;

	vcpu->stack = MmAllocateContiguousMemory(KERNEL_STACK_SIZE, highest);
	if (!vcpu->stack)
		goto out;
	RtlZeroMemory(vcpu->stack, KERNEL_STACK_SIZE);

	vcpu->vmcs = ExAllocatePool(NonPagedPoolNx, PAGE_SIZE);
	if (!vcpu->vmcs)
		goto out;
	RtlZeroMemory(vcpu->vmcs, PAGE_SIZE);

	vcpu->vmxon = ExAllocatePool(NonPagedPoolNx, PAGE_SIZE);
	if (!vcpu->vmxon)
		goto out;
	RtlZeroMemory(vcpu->vmxon, PAGE_SIZE);

	vcpu->ve = ExAllocatePool(NonPagedPoolNx, PAGE_SIZE);
	if (!vcpu->ve)
		goto out;
	RtlZeroMemory(vcpu->ve, PAGE_SIZE);

	vcpu->idt.limit = PAGE_SIZE - 1;
	vcpu->idt.base = (uintptr_t)ExAllocatePool(NonPagedPoolNx, PAGE_SIZE);
	if (!vcpu->idt.base)
		goto out;

	for (int i = 0; i < 0x100; ++i)
		vcpu->shadow_idt[i] = (struct kidt_entry64) { .e32 = (kidt_entry_t) { .p = 0 } };

	vcpu->nr = cpu_nr();
	k->vcpu_list[cpu_nr()] = vcpu;

	if (!enter_vmx(vcpu->vmxon))
		goto out;

	if (!init_vmcs(vcpu->vmcs))
		goto out_off;

	if (setup_vmcs(vcpu, sp, ip, (uintptr_t)vcpu->stack + KERNEL_STACK_SIZE))
		vcpu_launch();

out_off:
	__vmx_off();
out:
	vcpu_free(vcpu);
}
コード例 #2
0
ファイル: vminit.cpp プロジェクト: Rootkitsmm/Sushi
_Use_decl_annotations_ EXTERN_C static void VminitpInitializeVM(
    ULONG_PTR GuestStackPointer, ULONG_PTR GuestInstructionPointer) {
  // Allocate related structures
  auto ProcessorData =
      reinterpret_cast<PER_PROCESSOR_DATA *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, sizeof(PER_PROCESSOR_DATA), SUSHI_POOL_TAG_NAME));
  auto VmmStackTop = MiscAllocateContiguousMemory(KERNEL_STACK_SIZE);
  auto VmcsRegion =
      reinterpret_cast<VMCS *>(MiscAllocateContiguousMemory(MAXIMUM_VMCS_SIZE));
  auto VmxonRegion =
      reinterpret_cast<VMCS *>(MiscAllocateContiguousMemory(MAXIMUM_VMCS_SIZE));
  auto msrBitmap = MiscAllocateContiguousMemory(PAGE_SIZE);
  if (!ProcessorData || !VmmStackTop || !VmcsRegion || !VmxonRegion ||
      !msrBitmap) {
    goto ReturnFalse;
  }
  RtlZeroMemory(ProcessorData, sizeof(PER_PROCESSOR_DATA));
  RtlZeroMemory(VmmStackTop, KERNEL_STACK_SIZE);
  RtlZeroMemory(VmcsRegion, MAXIMUM_VMCS_SIZE);
  RtlZeroMemory(VmxonRegion, MAXIMUM_VMCS_SIZE);
  RtlZeroMemory(msrBitmap, PAGE_SIZE);

  // Initialize stack memory for VMM like this:
  /*
  (High)
  +------------------+
  | ProcessorData    |  <- VmmStackData
  +------------------+
  | ffffffffffffffff |  <- VmmStackBase
  +------------------+    v
  |                  |    v
  |   VmmStack       |    v (grow)
  |                  |    v
  +------------------+  <- VmmStackTop
  (Low)
  */
  const auto VmmStackBottom =
      reinterpret_cast<ULONG_PTR>(VmmStackTop) + KERNEL_STACK_SIZE;
  const auto VmmStackData = VmmStackBottom - sizeof(void *);
  const auto VmmStackBase = VmmStackData - sizeof(void *);
  LOG_DEBUG("VmmStackTop=       %p", VmmStackTop);
  LOG_DEBUG("VmmStackBottom=    %p", VmmStackBottom);
  LOG_DEBUG("VmmStackData=      %p", VmmStackData);
  LOG_DEBUG("ProcessorData=     %p stored at %p", ProcessorData, VmmStackData);
  LOG_DEBUG("VmmStackBase=      %p", VmmStackBase);
  LOG_DEBUG("GuestStackPointer= %p", GuestStackPointer);
  *reinterpret_cast<ULONG_PTR *>(VmmStackBase) = 0xffffffffffffffff;
  *reinterpret_cast<PER_PROCESSOR_DATA **>(VmmStackData) = ProcessorData;

  // Initialize the management structure
  ProcessorData->VmmStackTop = VmmStackTop;
  ProcessorData->VmcsRegion = VmcsRegion;
  ProcessorData->VmxonRegion = VmxonRegion;
  ProcessorData->MsrBitmap = msrBitmap;

  // Set up VMCS
  if (!VminitpEnterVmxMode(ProcessorData)) {
    goto ReturnFalse;
  }
  if (!VminitpInitializeVMCS(ProcessorData)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VminitpSetupVMCS(ProcessorData, GuestStackPointer,
                        GuestInstructionPointer, VmmStackBase)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VminitpLaunchVM();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by GuestInstructionPointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  if (ProcessorData) {
    ExFreePoolWithTag(ProcessorData, SUSHI_POOL_TAG_NAME);
  }
  if (VmmStackTop) {
    MiscFreeContiguousMemory(VmmStackTop);
  }
  if (VmcsRegion) {
    MiscFreeContiguousMemory(VmcsRegion);
  }
  if (VmxonRegion) {
    MiscFreeContiguousMemory(VmxonRegion);
  }
  if (msrBitmap) {
    MiscFreeContiguousMemory(msrBitmap);
  }
}
コード例 #3
0
ファイル: VMX.c プロジェクト: 453483289/HyperBone
/// <summary>
/// Execute VMLAUNCH
/// </summary>
/// <param name="Vcpu">Virtyal CPU data</param>
VOID VmxSubvertCPU( IN PVCPU Vcpu )
{
    PHYSICAL_ADDRESS phys = { 0 };
    phys.QuadPart = MAXULONG64;

    //
    // Initialize all the VMX-related MSRs by reading their value
    //
    for (ULONG i = 0; i <= VMX_MSR( MSR_IA32_VMX_VMCS_ENUM ); i++)
        Vcpu->MsrData[i].QuadPart = __readmsr( MSR_IA32_VMX_BASIC + i );

    // Secondary controls, if present
    if (g_Data->Features.SecondaryControls)
        Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_PROCBASED_CTLS2 )].QuadPart = __readmsr( MSR_IA32_VMX_PROCBASED_CTLS2 );

    // True MSRs, if present
    if (g_Data->Features.TrueMSRs)
        for (ULONG i = VMX_MSR( MSR_IA32_VMX_TRUE_PINBASED_CTLS ); i <= VMX_MSR( MSR_IA32_VMX_TRUE_ENTRY_CTLS ); i++)
            Vcpu->MsrData[i].QuadPart = __readmsr( MSR_IA32_VMX_BASIC + i );

    // VMFUNC, if present
    if(g_Data->Features.VMFUNC)
        Vcpu->MsrData[VMX_MSR( MSR_IA32_VMX_VMFUNC )].QuadPart = __readmsr( MSR_IA32_VMX_VMFUNC );

    Vcpu->VMXON    = MmAllocateContiguousMemory( sizeof( VMX_VMCS ), phys );
    Vcpu->VMCS     = MmAllocateContiguousMemory( sizeof( VMX_VMCS ), phys );
    Vcpu->VMMStack = MmAllocateContiguousMemory( KERNEL_STACK_SIZE,  phys );

    if (!Vcpu->VMXON || !Vcpu->VMCS || !Vcpu->VMMStack)
    {
        DPRINT( "HyperBone: CPU %d: %s: Failed to allocate memory\n", CPU_IDX, __FUNCTION__ );
        goto failed;
    }

    UtilProtectNonpagedMemory( Vcpu->VMXON,    sizeof( VMX_VMCS ), PAGE_READWRITE );
    UtilProtectNonpagedMemory( Vcpu->VMCS,     sizeof( VMX_VMCS ), PAGE_READWRITE );
    UtilProtectNonpagedMemory( Vcpu->VMMStack, KERNEL_STACK_SIZE,  PAGE_READWRITE );

    RtlZeroMemory( Vcpu->VMXON,    sizeof( VMX_VMCS ) );
    RtlZeroMemory( Vcpu->VMCS,     sizeof( VMX_VMCS ) );
    RtlZeroMemory( Vcpu->VMMStack, KERNEL_STACK_SIZE );

    // Attempt to enter VMX root mode on this processor.
    if (VmxEnterRoot( Vcpu ))
    {
        // Initialize the VMCS, both guest and host state.
        VmxSetupVMCS( Vcpu );

        // Setup EPT
        if(g_Data->Features.EPT)
        {
            if (!NT_SUCCESS( EptBuildIdentityMap( &Vcpu->EPT ) ))
            {
                DPRINT( "HyperBone: CPU %d: %s: Failed to build EPT identity map\n", CPU_IDX, __FUNCTION__ );
                goto failedvmxoff;
            }

            EptEnable( Vcpu->EPT.PML4Ptr );
        }

        // Record that VMX is now enabled
        Vcpu->VmxState = VMX_STATE_TRANSITION;

        // Setup various VMCS fields by VmxSetupVmcs. This will cause the
        // processor to jump to the return address of RtlCaptureContext in
        // VmxInitializeCPU, which called us.
        InterlockedIncrement( &g_Data->vcpus );
        int res = __vmx_vmlaunch();
        InterlockedDecrement( &g_Data->vcpus );

        // If we got here, either VMCS setup failed in some way, or the launch
        // did not proceed as planned. Because VmxEnabled is not set to 1, this
        // will correctly register as a failure.
        Vcpu->VmxState = VMX_STATE_OFF;

        DPRINT( "HyperBone: CPU %d: %s: __vmx_vmlaunch failed with result %d\n", CPU_IDX, __FUNCTION__, res );

failedvmxoff:
        __vmx_off();
    }

failed:;
    if (Vcpu->VMXON)
        MmFreeContiguousMemory( Vcpu->VMXON );
    if (Vcpu->VMCS)
        MmFreeContiguousMemory( Vcpu->VMCS );
    if (Vcpu->VMMStack)
        MmFreeContiguousMemory( Vcpu->VMMStack );

    Vcpu->VMXON    = NULL;
    Vcpu->VMCS     = NULL;
    Vcpu->VMMStack = NULL;
}
コード例 #4
0
ファイル: vm.cpp プロジェクト: Rootkitsmm/HyperPlatform
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  const auto vmm_stack_limit = UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  const auto vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  const auto vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));

  // Initialize the management structure
  processor_data->vmm_stack_limit = vmm_stack_limit;
  processor_data->vmcs_region = vmcs_region;
  processor_data->vmxon_region = vmxon_region;

  if (!vmm_stack_limit || !vmcs_region || !vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(vmm_stack_limit, KERNEL_STACK_SIZE);
  RtlZeroMemory(vmcs_region, kVmxMaxVmcsSize);
  RtlZeroMemory(vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(vmm_stack_limit) + KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("VmmStackTop=       %p", vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("VmmStackBottom=    %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("VmmStackData=      %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("ProcessorData=     %p stored at %p", processor_data,
                          vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("VmmStackBase=      %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("GuestStackPointer= %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("GuestInstPointer=  %p", guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVMCS(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVMCS(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVM();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}
コード例 #5
0
ファイル: shvvmxhv.c プロジェクト: AmesianX/SimpleVisor
DECLSPEC_NORETURN
EXTERN_C
VOID
ShvVmxEntryHandler (
    _In_ PCONTEXT Context
    )
{
    SHV_VP_STATE guestContext;
    PSHV_VP_DATA vpData;

    //
    // Because we run with interrupts disabled during the entire hypervisor's
    // exit handling, raise the IRQL to HIGH_LEVEL which matches the reality of
    // the situation. This will block IPIs and the clock interrupt timer, which
    // means that it's critical to spend as little time here as possible. You
    // can expect CLOCK_WATCHDOG_TIMEOUT bugchecks to happen otherwise. If you
    // chose to enable interrupts note that this will result in further crashes
    // as we are not on a correct OS stack, and you will be hitting crashes if
    // RtlpCheckStackLimits is ever called, or if PatchGuard validates the RSP
    // value.
    //
    KeRaiseIrql(HIGH_LEVEL, &guestContext.GuestIrql);

    //
    // Because we had to use RCX when calling RtlCaptureContext, its true value
    // was actually pushed on the stack right before the call. Go dig into the
    // stack to find it, and overwrite the bogus value that's there now.
    //
    Context->Rcx = *(PULONG64)((ULONG_PTR)Context - sizeof(Context->Rcx));

    //
    // Get the per-VP data for this processor.
    //
    vpData = &ShvGlobalData->VpData[KeGetCurrentProcessorNumberEx(NULL)];

    //
    // Build a little stack context to make it easier to keep track of certain
    // guest state, such as the RIP/RSP/RFLAGS, and the exit reason. The rest
    // of the general purpose registers come from the context structure that we
    // captured on our own with RtlCaptureContext in the assembly entrypoint.
    //
    guestContext.GuestEFlags = ShvVmxRead(GUEST_RFLAGS);
    guestContext.GuestRip = ShvVmxRead(GUEST_RIP);
    guestContext.GuestRsp = ShvVmxRead(GUEST_RSP);
    guestContext.ExitReason = ShvVmxRead(VM_EXIT_REASON) & 0xFFFF;
    guestContext.VpRegs = Context;
    guestContext.ExitVm = FALSE;

    //
    // Call the generic handler
    //
    ShvVmxHandleExit(&guestContext);

    //
    // Did we hit the magic exit sequence, or should we resume back to the VM
    // context?
    //
    if (guestContext.ExitVm)
    {
        //
        // When running in VMX root mode, the processor will set limits of the
        // GDT and IDT to 0xFFFF (notice that there are no Host VMCS fields to
        // set these values). This causes problems with PatchGuard, which will
        // believe that the GDTR and IDTR have been modified by malware, and
        // eventually crash the system. Since we know what the original state
        // of the GDTR and IDTR was, simply restore it now.
        //
        __lgdt(&vpData->HostState.SpecialRegisters.Gdtr.Limit);
        __lidt(&vpData->HostState.SpecialRegisters.Idtr.Limit);

        //
        // Our DPC routine may have interrupted an arbitrary user process, and
        // not an idle or system thread as usually happens on an idle system.
        // Therefore if we return back to the original caller after turning off
        // VMX, it will keep our current "host" CR3 value which we set on entry
        // to the PML4 of the SYSTEM process. We want to return back with the
        // correct value of the "guest" CR3, so that the currently executing
        // process continues to run with its expected address space mappings.
        //
        __writecr3(ShvVmxRead(GUEST_CR3));

        //
        // Finally, set the stack and instruction pointer to whatever location
        // had the instruction causing our VM-Exit, such as ShvVpUninitialize.
        // This will effectively act as a longjmp back to that location.
        //
        Context->Rsp = guestContext.GuestRsp;
        Context->Rip = (ULONG64)guestContext.GuestRip;

        //
        // Turn off VMX root mode on this logical processor. We're done here.
        //
        __vmx_off();
    }
    else
    {
        //
        // Because we won't be returning back into assembly code, nothing will
        // ever know about the "pop rcx" that must technically be done (or more
        // accurately "add rsp, 4" as rcx will already be correct thanks to the
        // fixup earlier. In order to keep the stack sane, do that adjustment
        // here.
        //
        Context->Rsp += sizeof(Context->Rcx);

        //
        // Return into a VMXRESUME intrinsic, which we broke out as its own
        // function, in order to allow this to work. No assembly code will be
        // needed as RtlRestoreContext will fix all the GPRs, and what we just
        // did to RSP will take care of the rest.
        //
        Context->Rip = (ULONG64)ShvVmxResume;
    }

    //
    // Restore the IRQL back to the original level
    //
    KeLowerIrql(guestContext.GuestIrql);

    //
    // Restore the context to either ShvVmxResume, in which case the CPU's VMX
    // facility will do the "true" return back to the VM (but without restoring
    // GPRs, which is why we must do it here), or to the original guest's RIP,
    // which we use in case an exit was requested. In this case VMX must now be
    // off, and this will look like a longjmp to the original stack and RIP.
    //
    RtlRestoreContext(Context, NULL);
}
コード例 #6
0
ファイル: vm.cpp プロジェクト: N3mes1s/HyperPlatform
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPool, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));
  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  // Check if XSAVE/XRSTOR are available and save an instruction mask for all
  // supported user state components
  processor_data->xsave_inst_mask =
      RtlGetEnabledExtendedFeatures(static_cast<ULONG64>(-1));
  HYPERPLATFORM_LOG_DEBUG("xsave_inst_mask       = %p",
                          processor_data->xsave_inst_mask);
  if (processor_data->xsave_inst_mask) {
    // Allocate a large enough XSAVE area to store all supported user state
    // components. A size is round-up to multiple of the page size so that the
    // address fulfills a requirement of 64K alignment.
    //
    // See: ENUMERATION OF CPU SUPPORT FOR XSAVE INSTRUCTIONS AND XSAVESUPPORTED
    // FEATURES
    int cpu_info[4] = {};
    __cpuidex(cpu_info, 0xd, 0);
    const auto xsave_area_size = ROUND_TO_PAGES(cpu_info[2]);  // ecx
    processor_data->xsave_area = ExAllocatePoolWithTag(
        NonPagedPool, xsave_area_size, kHyperPlatformCommonPoolTag);
    if (!processor_data->xsave_area) {
      goto ReturnFalse;
    }
    RtlZeroMemory(processor_data->xsave_area, xsave_area_size);
  } else {
    // Use FXSAVE/FXRSTOR instead.
    int cpu_info[4] = {};
    __cpuid(cpu_info, 1);
    const CpuFeaturesEcx cpu_features_ecx = {static_cast<ULONG32>(cpu_info[2])};
    const CpuFeaturesEdx cpu_features_edx = {static_cast<ULONG32>(cpu_info[3])};
    if (cpu_features_ecx.fields.avx) {
      HYPERPLATFORM_LOG_ERROR("A processor supports AVX but not XSAVE/XRSTOR.");
      goto ReturnFalse;
    }
    if (!cpu_features_edx.fields.fxsr) {
      HYPERPLATFORM_LOG_ERROR("A processor does not support FXSAVE/FXRSTOR.");
      goto ReturnFalse;
    }
  }

  // Allocate other processor data fields
  processor_data->vmm_stack_limit =
      UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  if (!processor_data->vmm_stack_limit) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmm_stack_limit, KERNEL_STACK_SIZE);

  processor_data->vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmcs_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmcs_region, kVmxMaxVmcsSize);

  processor_data->vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(processor_data->vmm_stack_limit) +
      KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_limit       = %p",
                          processor_data->vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_region_base = %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_data        = %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_base        = %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("processor_data        = %p stored at %p",
                          processor_data, vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("guest_stack_pointer   = %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("guest_inst_pointer    = %p",
                          guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVmcs(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVmcs(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVm();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}