// Power callback routine dealing with hibernate and sleep
_Use_decl_annotations_ static void PowerCallbackpCallbackRoutine(
    PVOID callback_context, PVOID argument1, PVOID argument2) {
  UNREFERENCED_PARAMETER(callback_context);
  PAGED_CODE();

  if (argument1 != reinterpret_cast<void*>(PO_CB_SYSTEM_STATE_LOCK)) {
    return;
  }

  HYPERPLATFORM_COMMON_DBG_BREAK();

  if (argument2) {
    // the computer has just reentered S0.
    HYPERPLATFORM_LOG_INFO("Resuming the system...");
    auto status = VmInitialization();
    if (!NT_SUCCESS(status)) {
      HYPERPLATFORM_LOG_ERROR(
          "Failed to re-virtualize processors. Please unload the driver.");
    }
  } else {
    // the computer is about to exit system power state S0
    HYPERPLATFORM_LOG_INFO("Suspending the system...");
    VmTermination();
  }
}
Beispiel #2
0
// Checks if the system supports virtualization
_Use_decl_annotations_ static bool VmpIsVmxAvailable() {
  PAGED_CODE();

  // See: DISCOVERING SUPPORT FOR VMX
  // If CPUID.1:ECX.VMX[bit 5]=1, then VMX operation is supported.
  int cpu_info[4] = {};
  __cpuid(cpu_info, 1);
  const CpuFeaturesEcx cpu_features = {static_cast<ULONG_PTR>(cpu_info[2])};
  if (!cpu_features.fields.vmx) {
    HYPERPLATFORM_LOG_ERROR("VMX features are not supported.");
    return false;
  }

  // See: BASIC VMX INFORMATION
  // The first processors to support VMX operation use the write-back type.
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  if (static_cast<memory_type>(vmx_basic_msr.fields.memory_type) !=
      memory_type::kWriteBack) {
    HYPERPLATFORM_LOG_ERROR("Write-back cache type is not supported.");
    return false;
  }

  // See: ENABLING AND ENTERING VMX OPERATION
  Ia32FeatureControlMsr vmx_feature_control = {
      UtilReadMsr64(Msr::kIa32FeatureControl)};
  if (!vmx_feature_control.fields.lock) {
    HYPERPLATFORM_LOG_INFO("The lock bit is clear. Attempting to set 1.");
    const auto status = UtilForEachProcessor(VmpSetLockBitCallback, nullptr);
    if (!NT_SUCCESS(status)) {
      return false;
    }
  }
  if (!vmx_feature_control.fields.enable_vmxon) {
    HYPERPLATFORM_LOG_ERROR("VMX features are not enabled.");
    return false;
  }

  if (!EptIsEptAvailable()) {
    HYPERPLATFORM_LOG_ERROR("EPT features are not fully supported.");
    return false;
  }
  return true;
}
Beispiel #3
0
// Executes vmlaunch
/*_Use_decl_annotations_*/ static void VmpLaunchVM() {
  auto error_code = UtilVmRead(VmcsField::kVmInstructionError);
  if (error_code) {
    HYPERPLATFORM_LOG_WARN("VM_INSTRUCTION_ERROR = %d", error_code);
  }
  HYPERPLATFORM_COMMON_DBG_BREAK();
  auto vmx_status = static_cast<VmxStatus>(__vmx_vmlaunch());

  // Here is not be executed with successful vmlaunch. Instead, the context
  // jumps to an address specified by GUEST_RIP.
  if (vmx_status == VmxStatus::kErrorWithStatus) {
    error_code = UtilVmRead(VmcsField::kVmInstructionError);
    HYPERPLATFORM_LOG_ERROR("VM_INSTRUCTION_ERROR = %d", error_code);
  }
  HYPERPLATFORM_COMMON_DBG_BREAK();
}
Beispiel #4
0
// Sets 1 to the lock bit of the IA32_FEATURE_CONTROL MSR
_Use_decl_annotations_ static NTSTATUS VmpSetLockBitCallback(void *context) {
  UNREFERENCED_PARAMETER(context);

  Ia32FeatureControlMsr vmx_feature_control = {
      UtilReadMsr64(Msr::kIa32FeatureControl)};
  if (vmx_feature_control.fields.lock) {
    return STATUS_SUCCESS;
  }
  vmx_feature_control.fields.lock = true;
  UtilWriteMsr64(Msr::kIa32FeatureControl, vmx_feature_control.all);
  vmx_feature_control.all = UtilReadMsr64(Msr::kIa32FeatureControl);
  if (!vmx_feature_control.fields.lock) {
    HYPERPLATFORM_LOG_ERROR("The lock bit is still clear.");
    return STATUS_DEVICE_CONFIGURATION_ERROR;
  }
  return STATUS_SUCCESS;
}
Beispiel #5
0
// Search EPROCESS::Token offset from a pair of EPROCESS and token
_Use_decl_annotations_ static bool EopmonpInitTokenOffset(PEPROCESS process,
                                                          PACCESS_TOKEN token) {
  PAGED_CODE();

  // Search up to a 0x80 pointers size
  for (auto offset = 0ul; offset < sizeof(void*) * 0x80;
       offset += sizeof(void*)) {
    const auto address = reinterpret_cast<ULONG_PTR>(process) + offset;
    const auto possible_token = EopmonpGetProceesTokenByAddress(address);
    if (possible_token == token) {
      g_eopmonp_offset_to_token = offset;
      HYPERPLATFORM_LOG_INFO("EPROCESS::Token offset = %x", offset);
      return true;
    }
  }

  HYPERPLATFORM_LOG_ERROR("Token could not found within an expected range.");
  return false;
}
Beispiel #6
0
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPool, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));
  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  // Check if XSAVE/XRSTOR are available and save an instruction mask for all
  // supported user state components
  processor_data->xsave_inst_mask =
      RtlGetEnabledExtendedFeatures(static_cast<ULONG64>(-1));
  HYPERPLATFORM_LOG_DEBUG("xsave_inst_mask       = %p",
                          processor_data->xsave_inst_mask);
  if (processor_data->xsave_inst_mask) {
    // Allocate a large enough XSAVE area to store all supported user state
    // components. A size is round-up to multiple of the page size so that the
    // address fulfills a requirement of 64K alignment.
    //
    // See: ENUMERATION OF CPU SUPPORT FOR XSAVE INSTRUCTIONS AND XSAVESUPPORTED
    // FEATURES
    int cpu_info[4] = {};
    __cpuidex(cpu_info, 0xd, 0);
    const auto xsave_area_size = ROUND_TO_PAGES(cpu_info[2]);  // ecx
    processor_data->xsave_area = ExAllocatePoolWithTag(
        NonPagedPool, xsave_area_size, kHyperPlatformCommonPoolTag);
    if (!processor_data->xsave_area) {
      goto ReturnFalse;
    }
    RtlZeroMemory(processor_data->xsave_area, xsave_area_size);
  } else {
    // Use FXSAVE/FXRSTOR instead.
    int cpu_info[4] = {};
    __cpuid(cpu_info, 1);
    const CpuFeaturesEcx cpu_features_ecx = {static_cast<ULONG32>(cpu_info[2])};
    const CpuFeaturesEdx cpu_features_edx = {static_cast<ULONG32>(cpu_info[3])};
    if (cpu_features_ecx.fields.avx) {
      HYPERPLATFORM_LOG_ERROR("A processor supports AVX but not XSAVE/XRSTOR.");
      goto ReturnFalse;
    }
    if (!cpu_features_edx.fields.fxsr) {
      HYPERPLATFORM_LOG_ERROR("A processor does not support FXSAVE/FXRSTOR.");
      goto ReturnFalse;
    }
  }

  // Allocate other processor data fields
  processor_data->vmm_stack_limit =
      UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  if (!processor_data->vmm_stack_limit) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmm_stack_limit, KERNEL_STACK_SIZE);

  processor_data->vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmcs_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmcs_region, kVmxMaxVmcsSize);

  processor_data->vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(processor_data->vmm_stack_limit) +
      KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_limit       = %p",
                          processor_data->vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_region_base = %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_data        = %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_base        = %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("processor_data        = %p stored at %p",
                          processor_data, vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("guest_stack_pointer   = %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("guest_inst_pointer    = %p",
                          guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVmcs(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVmcs(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVm();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}