Ejemplo n.º 1
0
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  const auto vmm_stack_limit = UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  const auto vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  const auto vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));

  // Initialize the management structure
  processor_data->vmm_stack_limit = vmm_stack_limit;
  processor_data->vmcs_region = vmcs_region;
  processor_data->vmxon_region = vmxon_region;

  if (!vmm_stack_limit || !vmcs_region || !vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(vmm_stack_limit, KERNEL_STACK_SIZE);
  RtlZeroMemory(vmcs_region, kVmxMaxVmcsSize);
  RtlZeroMemory(vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(vmm_stack_limit) + KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("VmmStackTop=       %p", vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("VmmStackBottom=    %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("VmmStackData=      %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("ProcessorData=     %p stored at %p", processor_data,
                          vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("VmmStackBase=      %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("GuestStackPointer= %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("GuestInstPointer=  %p", guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVMCS(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVMCS(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVM();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}
Ejemplo n.º 2
0
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPool, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));
  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  // Check if XSAVE/XRSTOR are available and save an instruction mask for all
  // supported user state components
  processor_data->xsave_inst_mask =
      RtlGetEnabledExtendedFeatures(static_cast<ULONG64>(-1));
  HYPERPLATFORM_LOG_DEBUG("xsave_inst_mask       = %p",
                          processor_data->xsave_inst_mask);
  if (processor_data->xsave_inst_mask) {
    // Allocate a large enough XSAVE area to store all supported user state
    // components. A size is round-up to multiple of the page size so that the
    // address fulfills a requirement of 64K alignment.
    //
    // See: ENUMERATION OF CPU SUPPORT FOR XSAVE INSTRUCTIONS AND XSAVESUPPORTED
    // FEATURES
    int cpu_info[4] = {};
    __cpuidex(cpu_info, 0xd, 0);
    const auto xsave_area_size = ROUND_TO_PAGES(cpu_info[2]);  // ecx
    processor_data->xsave_area = ExAllocatePoolWithTag(
        NonPagedPool, xsave_area_size, kHyperPlatformCommonPoolTag);
    if (!processor_data->xsave_area) {
      goto ReturnFalse;
    }
    RtlZeroMemory(processor_data->xsave_area, xsave_area_size);
  } else {
    // Use FXSAVE/FXRSTOR instead.
    int cpu_info[4] = {};
    __cpuid(cpu_info, 1);
    const CpuFeaturesEcx cpu_features_ecx = {static_cast<ULONG32>(cpu_info[2])};
    const CpuFeaturesEdx cpu_features_edx = {static_cast<ULONG32>(cpu_info[3])};
    if (cpu_features_ecx.fields.avx) {
      HYPERPLATFORM_LOG_ERROR("A processor supports AVX but not XSAVE/XRSTOR.");
      goto ReturnFalse;
    }
    if (!cpu_features_edx.fields.fxsr) {
      HYPERPLATFORM_LOG_ERROR("A processor does not support FXSAVE/FXRSTOR.");
      goto ReturnFalse;
    }
  }

  // Allocate other processor data fields
  processor_data->vmm_stack_limit =
      UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  if (!processor_data->vmm_stack_limit) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmm_stack_limit, KERNEL_STACK_SIZE);

  processor_data->vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmcs_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmcs_region, kVmxMaxVmcsSize);

  processor_data->vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(processor_data->vmm_stack_limit) +
      KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_limit       = %p",
                          processor_data->vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_region_base = %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_data        = %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_base        = %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("processor_data        = %p stored at %p",
                          processor_data, vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("guest_stack_pointer   = %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("guest_inst_pointer    = %p",
                          guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVmcs(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVmcs(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVm();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}