Beispiel #1
0
_Use_decl_annotations_ bool EptIsEptAvailable() {
  PAGED_CODE();

  int regs[4] = {};
  __cpuidex(regs, 0x80000008, 0);
  Cpuid80000008Eax cpuidEax = {static_cast<ULONG32>(regs[0])};
  HYPERPLATFORM_LOG_DEBUG("Physical Address Range = %d bits",
                          cpuidEax.fields.physical_address_bits);

  // No processors supporting the Intel 64 architecture support more than 48
  // physical-address bits
  if (cpuidEax.fields.physical_address_bits > 48) {
    return false;
  }

  // page walk length is 4 steps
  // extended page tables can be laid out in write-back memory
  // INVEPT instruction with all possible types is supported
  Ia32VmxEptVpidCapMsr vpid = {UtilReadMsr64(Msr::kIa32VmxEptVpidCap)};
  if (!vpid.fields.support_page_walk_length4 ||
      !vpid.fields.support_write_back_memory_type ||
      !vpid.fields.support_invept ||
      !vpid.fields.support_single_context_invept ||
      !vpid.fields.support_all_context_invept) {
    return false;
  }
  return true;
}
Beispiel #2
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpEnterVmxMode(
    ProcessorData *processor_data) {
  // Apply FIXED bits
  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
  Cr0 cr0 = {__readcr0()};
  cr0.all &= cr0_fixed1.all;
  cr0.all |= cr0_fixed0.all;
  __writecr0(cr0.all);

  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
  Cr4 cr4 = {__readcr4()};
  cr4.all &= cr4_fixed1.all;
  cr4.all |= cr4_fixed0.all;
  __writecr4(cr4.all);

  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmxon_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
  if (__vmx_on(&vmxon_region_pa)) {
    return false;
  }

  UtilInveptAll();
  return true;
}
Beispiel #3
0
// RDMSR and WRMSR
_Use_decl_annotations_ static void VmmpHandleMsrAccess(
    GuestContext *guest_context, bool read_access) {
  // Apply it for VMCS instead of a real MSR if a speficied MSR is either of
  // them.
  const auto msr = static_cast<Msr>(guest_context->gp_regs->cx);

  bool transfer_to_vmcs = false;
  VmcsField vmcs_field = {};
  switch (msr) {
    case Msr::kIa32SysenterCs:
      vmcs_field = VmcsField::kGuestSysenterCs;
      transfer_to_vmcs = true;
      break;
    case Msr::kIa32SysenterEsp:
      vmcs_field = VmcsField::kGuestSysenterEsp;
      transfer_to_vmcs = true;
      break;
    case Msr::kIa32SysenterEip:
      vmcs_field = VmcsField::kGuestSysenterEip;
      transfer_to_vmcs = true;
      break;
    case Msr::kIa32GsBase:
      vmcs_field = VmcsField::kGuestGsBase;
      transfer_to_vmcs = true;
      break;
    case Msr::kIa32FsBase:
      vmcs_field = VmcsField::kGuestFsBase;
      break;
    default:
      break;
  }
  // Do not shadow 64bit fields because the current implmentation for x86 is not
  // able to handle it due to a simple use of UtilVmWrite() below.
  NT_ASSERT(UtilIsInBounds(vmcs_field, VmcsField::kIoBitmapA,
                           VmcsField::kHostIa32PerfGlobalCtrlHigh) == false);

  // This unconditional __readmsr and __writemsr may cause #GP resulting in
  // bug check. A proper way to solve this is check supported MSR values
  // beforehand and inject an exception when unsupported MSR values are given.
  LARGE_INTEGER msr_value = {};
  if (read_access) {
    if (transfer_to_vmcs) {
      msr_value.QuadPart = UtilVmRead(vmcs_field);
    } else {
      msr_value.QuadPart = UtilReadMsr64(msr);
    }
    guest_context->gp_regs->ax = msr_value.LowPart;
    guest_context->gp_regs->dx = msr_value.HighPart;
  } else {
    msr_value.LowPart = static_cast<ULONG>(guest_context->gp_regs->ax);
    msr_value.HighPart = static_cast<ULONG>(guest_context->gp_regs->dx);
    if (transfer_to_vmcs) {
      UtilVmWrite(vmcs_field, static_cast<ULONG_PTR>(msr_value.QuadPart));
    } else {
      UtilWriteMsr64(msr, msr_value.QuadPart);
    }
  }

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
Beispiel #4
0
// Sets 1 to the lock bit of the IA32_FEATURE_CONTROL MSR
_Use_decl_annotations_ static NTSTATUS VmpSetLockBitCallback(void *context) {
  UNREFERENCED_PARAMETER(context);

  Ia32FeatureControlMsr vmx_feature_control = {
      UtilReadMsr64(Msr::kIa32FeatureControl)};
  if (vmx_feature_control.fields.lock) {
    return STATUS_SUCCESS;
  }
  vmx_feature_control.fields.lock = true;
  UtilWriteMsr64(Msr::kIa32FeatureControl, vmx_feature_control.all);
  vmx_feature_control.all = UtilReadMsr64(Msr::kIa32FeatureControl);
  if (!vmx_feature_control.fields.lock) {
    HYPERPLATFORM_LOG_ERROR("The lock bit is still clear.");
    return STATUS_DEVICE_CONFIGURATION_ERROR;
  }
  return STATUS_SUCCESS;
}
Beispiel #5
0
// Checks if the system supports virtualization
_Use_decl_annotations_ static bool VmpIsVmxAvailable() {
  PAGED_CODE();

  // See: DISCOVERING SUPPORT FOR VMX
  // If CPUID.1:ECX.VMX[bit 5]=1, then VMX operation is supported.
  int cpu_info[4] = {};
  __cpuid(cpu_info, 1);
  const CpuFeaturesEcx cpu_features = {static_cast<ULONG_PTR>(cpu_info[2])};
  if (!cpu_features.fields.vmx) {
    HYPERPLATFORM_LOG_ERROR("VMX features are not supported.");
    return false;
  }

  // See: BASIC VMX INFORMATION
  // The first processors to support VMX operation use the write-back type.
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  if (static_cast<memory_type>(vmx_basic_msr.fields.memory_type) !=
      memory_type::kWriteBack) {
    HYPERPLATFORM_LOG_ERROR("Write-back cache type is not supported.");
    return false;
  }

  // See: ENABLING AND ENTERING VMX OPERATION
  Ia32FeatureControlMsr vmx_feature_control = {
      UtilReadMsr64(Msr::kIa32FeatureControl)};
  if (!vmx_feature_control.fields.lock) {
    HYPERPLATFORM_LOG_INFO("The lock bit is clear. Attempting to set 1.");
    const auto status = UtilForEachProcessor(VmpSetLockBitCallback, nullptr);
    if (!NT_SUCCESS(status)) {
      return false;
    }
  }
  if (!vmx_feature_control.fields.enable_vmxon) {
    HYPERPLATFORM_LOG_ERROR("VMX features are not enabled.");
    return false;
  }

  if (!EptIsEptAvailable()) {
    HYPERPLATFORM_LOG_ERROR("EPT features are not fully supported.");
    return false;
  }
  return true;
}
Beispiel #6
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpEnterVmxMode(
    ProcessorData *processor_data) {
  PAGED_CODE();

  // Apply FIXED bits
  // See: VMX-FIXED BITS IN CR0

  //        IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning
  // Values 1                   1                   bit of CRx is fixed to 1
  // Values 0                   1                   bit of CRx is flexible
  // Values 0                   0                   bit of CRx is fixed to 0
  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
  Cr0 cr0 = {__readcr0()};
  Cr0 cr0_original = cr0;
  cr0.all &= cr0_fixed1.all;
  cr0.all |= cr0_fixed0.all;
  __writecr0(cr0.all);

  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0   = %08x", cr0_fixed0.all);
  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1   = %08x", cr0_fixed1.all);
  HYPERPLATFORM_LOG_DEBUG("Original CR0          = %08x", cr0_original.all);
  HYPERPLATFORM_LOG_DEBUG("Fixed CR0             = %08x", cr0.all);

  // See: VMX-FIXED BITS IN CR4
  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
  Cr4 cr4 = {__readcr4()};
  Cr4 cr4_original = cr4;
  cr4.all &= cr4_fixed1.all;
  cr4.all |= cr4_fixed0.all;
  __writecr4(cr4.all);

  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0   = %08x", cr4_fixed0.all);
  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1   = %08x", cr4_fixed1.all);
  HYPERPLATFORM_LOG_DEBUG("Original CR4          = %08x", cr4_original.all);
  HYPERPLATFORM_LOG_DEBUG("Fixed CR4             = %08x", cr4.all);

  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmxon_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
  if (__vmx_on(&vmxon_region_pa)) {
    return false;
  }

  // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use
  // of the INVEPT Instruction
  UtilInveptGlobal();
  UtilInvvpidAllContext();
  return true;
}
Beispiel #7
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpInitializeVMCS(
    ProcessorData *processor_data) {
  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmcs_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmcs_region_pa = UtilPaFromVa(processor_data->vmcs_region);
  if (__vmx_vmclear(&vmcs_region_pa)) {
    return false;
  }
  if (__vmx_vmptrld(&vmcs_region_pa)) {
    return false;
  }

  // The launch state of current VMCS is "clear"
  return true;
}
Beispiel #8
0
// Checks if the system supports EPT technology sufficient enough
_Use_decl_annotations_ bool EptIsEptAvailable() {
  PAGED_CODE();

  // Check the followings:
  // - page walk length is 4 steps
  // - extended page tables can be laid out in write-back memory
  // - INVEPT instruction with all possible types is supported
  // - INVVPID instruction with all possible types is supported
  Ia32VmxEptVpidCapMsr capability = {UtilReadMsr64(Msr::kIa32VmxEptVpidCap)};
  if (!capability.fields.support_page_walk_length4 ||
      !capability.fields.support_write_back_memory_type ||
      !capability.fields.support_invept ||
      !capability.fields.support_single_context_invept ||
      !capability.fields.support_all_context_invept ||
      !capability.fields.support_invvpid ||
      !capability.fields.support_individual_address_invvpid ||
      !capability.fields.support_single_context_invvpid ||
      !capability.fields.support_all_context_invvpid ||
      !capability.fields.support_single_context_retaining_globals_invvpid) {
    return false;
  }
  return true;
}
Beispiel #9
0
// Builds EPT, allocates pre-allocated enties, initializes and returns EptData
_Use_decl_annotations_ EptData *EptInitialization() {
  PAGED_CODE();

  static const auto kEptPageWalkLevel = 4ul;

  // Allocate ept_data
  const auto ept_data = reinterpret_cast<EptData *>(ExAllocatePoolWithTag(
      NonPagedPool, sizeof(EptData), kHyperPlatformCommonPoolTag));
  if (!ept_data) {
    return nullptr;
  }
  RtlZeroMemory(ept_data, sizeof(EptData));

  // Allocate EptPointer
  const auto ept_poiner = reinterpret_cast<EptPointer *>(ExAllocatePoolWithTag(
      NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
  if (!ept_poiner) {
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(ept_poiner, PAGE_SIZE);

  // Allocate EPT_PML4 and initialize EptPointer
  const auto ept_pml4 =
      reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag(
          NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
  if (!ept_pml4) {
    ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(ept_pml4, PAGE_SIZE);
  ept_poiner->fields.memory_type = static_cast<ULONG64>(EptpGetMemoryType(UtilPaFromVa(ept_pml4))); 
  ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1;
  ept_poiner->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(ept_pml4));

  // Initialize all EPT entries for all physical memory pages
  const auto pm_ranges = UtilGetPhysicalMemoryRanges();
  for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs;
       ++run_index) {
    const auto run = &pm_ranges->run[run_index];
    const auto base_addr = run->base_page * PAGE_SIZE;
    for (auto page_index = 0ull; page_index < run->page_count; ++page_index) {
      const auto indexed_addr = base_addr + page_index * PAGE_SIZE;
      const auto ept_pt_entry =
          EptpConstructTables(ept_pml4, 4, indexed_addr, nullptr);
      if (!ept_pt_entry) {
        EptpDestructTables(ept_pml4, 4);
        ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
        ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
        return nullptr;
      }
    }
  }

  // Initialize an EPT entry for APIC_BASE. It is required to allocated it now
  // for some reasons, or else, system hangs.
  const Ia32ApicBaseMsr apic_msr = {UtilReadMsr64(Msr::kIa32ApicBase)};
  if (!EptpConstructTables(ept_pml4, 4, apic_msr.fields.apic_base * PAGE_SIZE,
                           nullptr)) {
    EptpDestructTables(ept_pml4, 4);
    ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }

  // Allocate preallocated_entries
  const auto preallocated_entries_size =
      sizeof(EptCommonEntry *) * kEptpNumberOfPreallocatedEntries;
  const auto preallocated_entries = reinterpret_cast<EptCommonEntry **>(
      ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size,
                            kHyperPlatformCommonPoolTag));
  if (!preallocated_entries) {
    EptpDestructTables(ept_pml4, 4);
    ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(preallocated_entries, preallocated_entries_size);

  // And fill preallocated_entries with newly created entries
  for (auto i = 0ul; i < kEptpNumberOfPreallocatedEntries; ++i) {
    const auto ept_entry = EptpAllocateEptEntry(nullptr);
    if (!ept_entry) {
      EptpFreeUnusedPreAllocatedEntries(preallocated_entries, 0);
      EptpDestructTables(ept_pml4, 4);
      ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
      ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
      return nullptr;
    }
    preallocated_entries[i] = ept_entry;
  }

  // Initialization completed
  ept_data->ept_pointer = ept_poiner;
  ept_data->ept_pml4 = ept_pml4;
  ept_data->preallocated_entries = preallocated_entries;
  ept_data->preallocated_entries_count = 0;
  return ept_data;
}
Beispiel #10
0
// Reads and stores all MTRRs to set a correct memory type for EPT
_Use_decl_annotations_ void EptInitializeMtrrEntries() {
	PAGED_CODE();

	int index = 0;
	MtrrData *mtrr_entries = g_eptp_mtrr_entries;

	// Get and store the default memory type
	Ia32MtrrDefaultTypeMsr default_type = { UtilReadMsr64(Msr::kIa32MtrrDefType) };
	g_eptp_mtrr_default_type = default_type.fields.default_mtemory_type;

	// Read MTRR capability
	Ia32MtrrCapabilitiesMsr mtrr_capabilities = {
		UtilReadMsr64(Msr::kIa32MtrrCap) };
	HYPERPLATFORM_LOG_DEBUG(
		"MTRR Default=%lld, VariableCount=%lld, FixedSupported=%lld, FixedEnabled=%lld",
		default_type.fields.default_mtemory_type,
		mtrr_capabilities.fields.variable_range_count,
		mtrr_capabilities.fields.fixed_range_supported,
		default_type.fields.fixed_mtrrs_enabled);

	// Read fixed range MTRRs if supported
	if (mtrr_capabilities.fields.fixed_range_supported &&
		default_type.fields.fixed_mtrrs_enabled) {
		static const auto k64kBase = 0x0;
		static const auto k64kManagedSize = 0x10000;
		static const auto k16kBase = 0x80000;
		static const auto k16kManagedSize = 0x4000;
		static const auto k4kBase = 0xC0000;
		static const auto k4kManagedSize = 0x1000;

		// The kIa32MtrrFix64k00000 manages 8 ranges of memory. The first range
		// starts at 0x0, and each range manages a 64k (0x10000) range. For example,
		//  entry[0]:     0x0 : 0x10000 - 1
		//  entry[1]: 0x10000 : 0x20000 - 1
		//  ...
		//  entry[7]: 0x70000 : 0x80000 - 1
		ULONG64 offset = 0;
		Ia32MtrrFixedRangeMsr fixed_range = {
			UtilReadMsr64(Msr::kIa32MtrrFix64k00000) };
		for (auto memory_type : fixed_range.fields.types) {
			// Each entry manages 64k (0x10000) length.
			ULONG64 base = k64kBase + offset;
			offset += k64kManagedSize;

			// Saves the MTRR
			mtrr_entries[index].enabled = true;
			mtrr_entries[index].fixedMtrr = true;
			mtrr_entries[index].type = memory_type;
			mtrr_entries[index].range_base = base;
			mtrr_entries[index].range_end = base + k64kManagedSize - 1;
			index++;
		}
		NT_ASSERT(k64kBase + offset == k16kBase);

		// kIa32MtrrFix16k80000 manages 8 ranges of memory. The first range starts
		// at 0x80000, and each range manages a 16k (0x4000) range. For example,
		//  entry[0]: 0x80000 : 0x84000 - 1
		//  entry[1]: 0x88000 : 0x8C000 - 1
		//  ...
		//  entry[7]: 0x9C000 : 0xA0000 - 1
		// Also, subsequent memory ranges are managed by other MSR,
		// kIa32MtrrFix16kA0000, which manages 8 ranges of memory starting at
		// 0xA0000 in the same fashion. For example,
		//  entry[0]: 0xA0000 : 0xA4000 - 1
		//  entry[1]: 0xA8000 : 0xAC000 - 1
		//  ...
		//  entry[7]: 0xBC000 : 0xC0000 - 1
		offset = 0;
		for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix16k80000);
			msr <= static_cast<ULONG>(Msr::kIa32MtrrFix16kA0000); msr++) {
			fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
			for (auto memory_type : fixed_range.fields.types) {
				// Each entry manages 16k (0x4000) length.
				ULONG64 base = k16kBase + offset;
				offset += k16kManagedSize;

				// Saves the MTRR
				mtrr_entries[index].enabled = true;
				mtrr_entries[index].fixedMtrr = true;
				mtrr_entries[index].type = memory_type;
				mtrr_entries[index].range_base = base;
				mtrr_entries[index].range_end = base + k16kManagedSize - 1;
				index++;
			}
		}
		NT_ASSERT(k16kBase + offset == k4kBase);

		// kIa32MtrrFix4kC0000 manages 8 ranges of memory. The first range starts
		// at 0xC0000, and each range manages a 4k (0x1000) range. For example,
		//  entry[0]: 0xC0000 : 0xC1000 - 1
		//  entry[1]: 0xC1000 : 0xC2000 - 1
		//  ...
		//  entry[7]: 0xC7000 : 0xC8000 - 1
		// Also, subsequent memory ranges are managed by other MSRs such as
		// kIa32MtrrFix4kC8000, kIa32MtrrFix4kD0000, and kIa32MtrrFix4kF8000. Each
		// MSR manages 8 ranges of memory in the same fashion up to 0x100000.
		offset = 0;
		for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix4kC0000);
			msr <= static_cast<ULONG>(Msr::kIa32MtrrFix4kF8000); msr++) {
			fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
			for (auto memory_type : fixed_range.fields.types) {
				// Each entry manages 4k (0x1000) length.
				ULONG64 base = k4kBase + offset;
				offset += k4kManagedSize;

				// Saves the MTRR
				mtrr_entries[index].enabled = true;
				mtrr_entries[index].fixedMtrr = true;
				mtrr_entries[index].type = memory_type;
				mtrr_entries[index].range_base = base;
				mtrr_entries[index].range_end = base + k4kManagedSize - 1;
				index++;
			}
		}
		NT_ASSERT(k4kBase + offset == 0x100000);
	}

	// Read all variable range MTRRs
	for (auto i = 0; i < mtrr_capabilities.fields.variable_range_count; i++) {
		// Read MTRR mask and check if it is in use
		const auto phy_mask = static_cast<ULONG>(Msr::kIa32MtrrPhysMaskN) + i * 2;
		Ia32MtrrPhysMaskMsr mtrr_mask = { UtilReadMsr64(static_cast<Msr>(phy_mask)) };
		if (!mtrr_mask.fields.valid) {
			continue;
		}

		// Get a length this MTRR manages
		ULONG length;
		BitScanForward64(&length, mtrr_mask.fields.phys_mask * PAGE_SIZE);

		// Read MTRR base and calculate a range this MTRR manages
		const auto phy_base = static_cast<ULONG>(Msr::kIa32MtrrPhysBaseN) + i * 2;
		Ia32MtrrPhysBaseMsr mtrr_base = { UtilReadMsr64(static_cast<Msr>(phy_base)) };
		ULONG64 base = mtrr_base.fields.phys_base * PAGE_SIZE;
		ULONG64 end = base + (1ull << length) - 1;

		// Save it
		mtrr_entries[index].enabled = true;
		mtrr_entries[index].fixedMtrr = false;
		mtrr_entries[index].type = mtrr_base.fields.type;
		mtrr_entries[index].range_base = base;
		mtrr_entries[index].range_end = end;
		index++;
	}
}
Beispiel #11
0
// See: PREPARATION AND LAUNCHING A VIRTUAL MACHINE
_Use_decl_annotations_ static bool VmpSetupVMCS(
    const ProcessorData *processor_data, ULONG_PTR guest_stack_pointer,
    ULONG_PTR guest_instruction_pointer, ULONG_PTR vmm_stack_pointer) {
  Gdtr gdtr = {};
  __sgdt(&gdtr);

  Idtr idtr = {};
  __sidt(&idtr);

  // See: Algorithms for Determining VMX Capabilities
  const auto use_true_msrs = Ia32VmxBasicMsr{
      UtilReadMsr64(
          Msr::kIa32VmxBasic)}.fields.vmx_capability_hint;

  VmxVmEntryControls vm_entryctl_requested = {};
  vm_entryctl_requested.fields.ia32e_mode_guest = IsX64();
  VmxVmEntryControls vm_entryctl = {VmpAdjustControlValue(
      (use_true_msrs) ? Msr::kIa32VmxTrueEntryCtls : Msr::kIa32VmxEntryCtls,
      vm_entryctl_requested.all)};

  VmxVmExitControls vm_exitctl_requested = {};
  vm_exitctl_requested.fields.acknowledge_interrupt_on_exit = true;
  vm_exitctl_requested.fields.host_address_space_size = IsX64();
  VmxVmExitControls vm_exitctl = {VmpAdjustControlValue(
      (use_true_msrs) ? Msr::kIa32VmxTrueExitCtls : Msr::kIa32VmxExitCtls,
      vm_exitctl_requested.all)};

  VmxPinBasedControls vm_pinctl_requested = {};
  VmxPinBasedControls vm_pinctl = {
      VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTruePinbasedCtls
                                            : Msr::kIa32VmxPinbasedCtls,
                            vm_pinctl_requested.all)};

  VmxProcessorBasedControls vm_procctl_requested = {};
  vm_procctl_requested.fields.invlpg_exiting = false;
  vm_procctl_requested.fields.rdtsc_exiting = false;
  vm_procctl_requested.fields.cr3_load_exiting = true;
  vm_procctl_requested.fields.cr8_load_exiting = false;  // NB: very frequent
  vm_procctl_requested.fields.mov_dr_exiting = true;
  vm_procctl_requested.fields.use_msr_bitmaps = true;
  vm_procctl_requested.fields.activate_secondary_control = true;
  VmxProcessorBasedControls vm_procctl = {
      VmpAdjustControlValue((use_true_msrs) ? Msr::kIa32VmxTrueProcBasedCtls
                                            : Msr::kIa32VmxProcBasedCtls,
                            vm_procctl_requested.all)};

  VmxSecondaryProcessorBasedControls vm_procctl2_requested = {};
  vm_procctl2_requested.fields.enable_ept = true;
  vm_procctl2_requested.fields.enable_rdtscp = true;  // required for Win10
  vm_procctl2_requested.fields.descriptor_table_exiting = true;
  // required for Win10
  vm_procctl2_requested.fields.enable_xsaves_xstors = true;
  VmxSecondaryProcessorBasedControls vm_procctl2 = {VmpAdjustControlValue(
      Msr::kIa32VmxProcBasedCtls2, vm_procctl2_requested.all)};

  // Set up CR0 and CR4 bitmaps
  // - Where a bit is     masked, the shadow bit appears
  // - Where a bit is not masked, the actual bit appears
  // VM-exit occurs when a guest modifies any of those fields
  Cr0 cr0_mask = {};
  Cr4 cr4_mask = {};

  // See: PDPTE Registers
  // If PAE paging would be in use following an execution of MOV to CR0 or MOV
  // to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD,
  // CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are
  // loaded from the address in CR3.
  if (UtilIsX86Pae()) {
    cr0_mask.fields.pg = true;
    cr0_mask.fields.cd = true;
    cr0_mask.fields.nw = true;
    cr4_mask.fields.pae = true;
    cr4_mask.fields.pge = true;
    cr4_mask.fields.pse = true;
    cr4_mask.fields.smep = true;
  }

  const auto exception_bitmap =
      // 1 << InterruptionVector::kBreakpointException |
      // 1 << InterruptionVector::kGeneralProtectionException |
      // 1 << InterruptionVector::kPageFaultException |
      0;

  // clang-format off
  /* 16-Bit Control Field */

  /* 16-Bit Guest-State Fields */
  auto error = VmxStatus::kOk;
  error |= UtilVmWrite(VmcsField::kGuestEsSelector, AsmReadES());
  error |= UtilVmWrite(VmcsField::kGuestCsSelector, AsmReadCS());
  error |= UtilVmWrite(VmcsField::kGuestSsSelector, AsmReadSS());
  error |= UtilVmWrite(VmcsField::kGuestDsSelector, AsmReadDS());
  error |= UtilVmWrite(VmcsField::kGuestFsSelector, AsmReadFS());
  error |= UtilVmWrite(VmcsField::kGuestGsSelector, AsmReadGS());
  error |= UtilVmWrite(VmcsField::kGuestLdtrSelector, AsmReadLDTR());
  error |= UtilVmWrite(VmcsField::kGuestTrSelector, AsmReadTR());

  /* 16-Bit Host-State Fields */
  // RPL and TI have to be 0
  error |= UtilVmWrite(VmcsField::kHostEsSelector, AsmReadES() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostCsSelector, AsmReadCS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostSsSelector, AsmReadSS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostDsSelector, AsmReadDS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostFsSelector, AsmReadFS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostGsSelector, AsmReadGS() & 0xf8);
  error |= UtilVmWrite(VmcsField::kHostTrSelector, AsmReadTR() & 0xf8);

  /* 64-Bit Control Fields */
  error |= UtilVmWrite64(VmcsField::kIoBitmapA, 0);
  error |= UtilVmWrite64(VmcsField::kIoBitmapB, 0);
  error |= UtilVmWrite64(VmcsField::kMsrBitmap, UtilPaFromVa(processor_data->shared_data->msr_bitmap));
  error |= UtilVmWrite64(VmcsField::kEptPointer, EptGetEptPointer(processor_data->ept_data));

  /* 64-Bit Guest-State Fields */
  error |= UtilVmWrite64(VmcsField::kVmcsLinkPointer, MAXULONG64);
  error |= UtilVmWrite64(VmcsField::kGuestIa32Debugctl, UtilReadMsr64(Msr::kIa32Debugctl));
  if (UtilIsX86Pae()) {
    UtilLoadPdptes(__readcr3());
  }

  /* 32-Bit Control Fields */
  error |= UtilVmWrite(VmcsField::kPinBasedVmExecControl, vm_pinctl.all);
  error |= UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all);
  error |= UtilVmWrite(VmcsField::kExceptionBitmap, exception_bitmap);
  error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMask, 0);
  error |= UtilVmWrite(VmcsField::kPageFaultErrorCodeMatch, 0);
  error |= UtilVmWrite(VmcsField::kCr3TargetCount, 0);
  error |= UtilVmWrite(VmcsField::kVmExitControls, vm_exitctl.all);
  error |= UtilVmWrite(VmcsField::kVmExitMsrStoreCount, 0);
  error |= UtilVmWrite(VmcsField::kVmExitMsrLoadCount, 0);
  error |= UtilVmWrite(VmcsField::kVmEntryControls, vm_entryctl.all);
  error |= UtilVmWrite(VmcsField::kVmEntryMsrLoadCount, 0);
  error |= UtilVmWrite(VmcsField::kVmEntryIntrInfoField, 0);
  error |= UtilVmWrite(VmcsField::kSecondaryVmExecControl, vm_procctl2.all);

  /* 32-Bit Guest-State Fields */
  error |= UtilVmWrite(VmcsField::kGuestEsLimit, GetSegmentLimit(AsmReadES()));
  error |= UtilVmWrite(VmcsField::kGuestCsLimit, GetSegmentLimit(AsmReadCS()));
  error |= UtilVmWrite(VmcsField::kGuestSsLimit, GetSegmentLimit(AsmReadSS()));
  error |= UtilVmWrite(VmcsField::kGuestDsLimit, GetSegmentLimit(AsmReadDS()));
  error |= UtilVmWrite(VmcsField::kGuestFsLimit, GetSegmentLimit(AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kGuestGsLimit, GetSegmentLimit(AsmReadGS()));
  error |= UtilVmWrite(VmcsField::kGuestLdtrLimit, GetSegmentLimit(AsmReadLDTR()));
  error |= UtilVmWrite(VmcsField::kGuestTrLimit, GetSegmentLimit(AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kGuestGdtrLimit, gdtr.limit);
  error |= UtilVmWrite(VmcsField::kGuestIdtrLimit, idtr.limit);
  error |= UtilVmWrite(VmcsField::kGuestEsArBytes, VmpGetSegmentAccessRight(AsmReadES()));
  error |= UtilVmWrite(VmcsField::kGuestCsArBytes, VmpGetSegmentAccessRight(AsmReadCS()));
  error |= UtilVmWrite(VmcsField::kGuestSsArBytes, VmpGetSegmentAccessRight(AsmReadSS()));
  error |= UtilVmWrite(VmcsField::kGuestDsArBytes, VmpGetSegmentAccessRight(AsmReadDS()));
  error |= UtilVmWrite(VmcsField::kGuestFsArBytes, VmpGetSegmentAccessRight(AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kGuestGsArBytes, VmpGetSegmentAccessRight(AsmReadGS()));
  error |= UtilVmWrite(VmcsField::kGuestLdtrArBytes, VmpGetSegmentAccessRight(AsmReadLDTR()));
  error |= UtilVmWrite(VmcsField::kGuestTrArBytes, VmpGetSegmentAccessRight(AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kGuestInterruptibilityInfo, 0);
  error |= UtilVmWrite(VmcsField::kGuestActivityState, 0);
  error |= UtilVmWrite(VmcsField::kGuestSysenterCs, UtilReadMsr(Msr::kIa32SysenterCs));

  /* 32-Bit Host-State Field */
  error |= UtilVmWrite(VmcsField::kHostIa32SysenterCs, UtilReadMsr(Msr::kIa32SysenterCs));

  /* Natural-Width Control Fields */
  error |= UtilVmWrite(VmcsField::kCr0GuestHostMask, cr0_mask.all);
  error |= UtilVmWrite(VmcsField::kCr4GuestHostMask, cr4_mask.all);
  error |= UtilVmWrite(VmcsField::kCr0ReadShadow, __readcr0());
  error |= UtilVmWrite(VmcsField::kCr4ReadShadow, __readcr4());

  /* Natural-Width Guest-State Fields */
  error |= UtilVmWrite(VmcsField::kGuestCr0, __readcr0());
  error |= UtilVmWrite(VmcsField::kGuestCr3, __readcr3());
  error |= UtilVmWrite(VmcsField::kGuestCr4, __readcr4());
#if defined(_AMD64_)
  error |= UtilVmWrite(VmcsField::kGuestEsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestCsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestSsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestDsBase, 0);
  error |= UtilVmWrite(VmcsField::kGuestFsBase, UtilReadMsr(Msr::kIa32FsBase));
  error |= UtilVmWrite(VmcsField::kGuestGsBase, UtilReadMsr(Msr::kIa32GsBase));
#else
  error |= UtilVmWrite(VmcsField::kGuestEsBase, VmpGetSegmentBase(gdtr.base, AsmReadES()));
  error |= UtilVmWrite(VmcsField::kGuestCsBase, VmpGetSegmentBase(gdtr.base, AsmReadCS()));
  error |= UtilVmWrite(VmcsField::kGuestSsBase, VmpGetSegmentBase(gdtr.base, AsmReadSS()));
  error |= UtilVmWrite(VmcsField::kGuestDsBase, VmpGetSegmentBase(gdtr.base, AsmReadDS()));
  error |= UtilVmWrite(VmcsField::kGuestFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kGuestGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS()));
#endif
  error |= UtilVmWrite(VmcsField::kGuestLdtrBase, VmpGetSegmentBase(gdtr.base, AsmReadLDTR()));
  error |= UtilVmWrite(VmcsField::kGuestTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kGuestGdtrBase, gdtr.base);
  error |= UtilVmWrite(VmcsField::kGuestIdtrBase, idtr.base);
  error |= UtilVmWrite(VmcsField::kGuestDr7, __readdr(7));
  error |= UtilVmWrite(VmcsField::kGuestRsp, guest_stack_pointer);
  error |= UtilVmWrite(VmcsField::kGuestRip, guest_instruction_pointer);
  error |= UtilVmWrite(VmcsField::kGuestRflags, __readeflags());
  error |= UtilVmWrite(VmcsField::kGuestSysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp));
  error |= UtilVmWrite(VmcsField::kGuestSysenterEip, UtilReadMsr(Msr::kIa32SysenterEip));

  /* Natural-Width Host-State Fields */
  error |= UtilVmWrite(VmcsField::kHostCr0, __readcr0());
  error |= UtilVmWrite(VmcsField::kHostCr3, __readcr3());
  error |= UtilVmWrite(VmcsField::kHostCr4, __readcr4());
#if defined(_AMD64_)
  error |= UtilVmWrite(VmcsField::kHostFsBase, UtilReadMsr(Msr::kIa32FsBase));
  error |= UtilVmWrite(VmcsField::kHostGsBase, UtilReadMsr(Msr::kIa32GsBase));
#else
  error |= UtilVmWrite(VmcsField::kHostFsBase, VmpGetSegmentBase(gdtr.base, AsmReadFS()));
  error |= UtilVmWrite(VmcsField::kHostGsBase, VmpGetSegmentBase(gdtr.base, AsmReadGS()));
#endif
  error |= UtilVmWrite(VmcsField::kHostTrBase, VmpGetSegmentBase(gdtr.base, AsmReadTR()));
  error |= UtilVmWrite(VmcsField::kHostGdtrBase, gdtr.base);
  error |= UtilVmWrite(VmcsField::kHostIdtrBase, idtr.base);
  error |= UtilVmWrite(VmcsField::kHostIa32SysenterEsp, UtilReadMsr(Msr::kIa32SysenterEsp));
  error |= UtilVmWrite(VmcsField::kHostIa32SysenterEip, UtilReadMsr(Msr::kIa32SysenterEip));
  error |= UtilVmWrite(VmcsField::kHostRsp, vmm_stack_pointer);
  error |= UtilVmWrite(VmcsField::kHostRip, reinterpret_cast<ULONG_PTR>(AsmVmmEntryPoint));
  // clang-format on

  const auto vmx_status = static_cast<VmxStatus>(error);
  return vmx_status == VmxStatus::kOk;
}