Ejemplo n.º 1
0
// Initialize shared processor data
_Use_decl_annotations_ static SharedProcessorData *VmpInitializeSharedData() {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(
      ExAllocatePoolWithTag(NonPagedPoolNx, sizeof(SharedProcessorData),
                            kHyperPlatformCommonPoolTag));
  if (!shared_data) {
    return nullptr;
  }
  RtlZeroMemory(shared_data, sizeof(SharedProcessorData));
  HYPERPLATFORM_LOG_DEBUG("SharedData=        %p", shared_data);

  // Set up the MSR bitmap
  const auto msr_bitmap = ExAllocatePoolWithTag(NonPagedPoolNx, PAGE_SIZE,
                                                kHyperPlatformCommonPoolTag);
  if (!msr_bitmap) {
    ExFreePoolWithTag(shared_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(msr_bitmap, PAGE_SIZE);
  shared_data->msr_bitmap = msr_bitmap;

  // Checks MSRs causing #GP and should not cause VM-exit from 0 to 0xfff.
  bool unsafe_msr_map[0x1000] = {};
  for (auto msr = 0ul; msr < RTL_NUMBER_OF(unsafe_msr_map); ++msr) {
    __try {
      UtilReadMsr(static_cast<Msr>(msr));
    } __except (EXCEPTION_EXECUTE_HANDLER) {
      unsafe_msr_map[msr] = true;
    }
  }

  // Activate VM-exit for RDMSR against all MSRs
  const auto bitmap_read_low = reinterpret_cast<UCHAR *>(msr_bitmap);
  const auto bitmap_read_high = bitmap_read_low + 1024;
  RtlFillMemory(bitmap_read_low, 1024, 0xff);   // read        0 -     1fff
  RtlFillMemory(bitmap_read_high, 1024, 0xff);  // read c0000000 - c0001fff

  // But ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8)
  RTL_BITMAP bitmap_read_low_header = {};
  RtlInitializeBitMap(&bitmap_read_low_header,
                      reinterpret_cast<PULONG>(bitmap_read_low), 1024 * 8);
  RtlClearBits(&bitmap_read_low_header, 0xe7, 2);

  // Also ignore the unsage MSRs
  for (auto msr = 0ul; msr < RTL_NUMBER_OF(unsafe_msr_map); ++msr) {
    const auto ignore = unsafe_msr_map[msr];
    if (ignore) {
      RtlClearBits(&bitmap_read_low_header, msr, 1);
    }
  }

  // But ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102)
  RTL_BITMAP bitmap_read_high_header = {};
  RtlInitializeBitMap(&bitmap_read_high_header,
                      reinterpret_cast<PULONG>(bitmap_read_high), 1024 * 8);
  RtlClearBits(&bitmap_read_high_header, 0x101, 2);

  return shared_data;
}
Ejemplo n.º 2
0
_Use_decl_annotations_ bool EptIsEptAvailable() {
  PAGED_CODE();

  int regs[4] = {};
  __cpuidex(regs, 0x80000008, 0);
  Cpuid80000008Eax cpuidEax = {static_cast<ULONG32>(regs[0])};
  HYPERPLATFORM_LOG_DEBUG("Physical Address Range = %d bits",
                          cpuidEax.fields.physical_address_bits);

  // No processors supporting the Intel 64 architecture support more than 48
  // physical-address bits
  if (cpuidEax.fields.physical_address_bits > 48) {
    return false;
  }

  // page walk length is 4 steps
  // extended page tables can be laid out in write-back memory
  // INVEPT instruction with all possible types is supported
  Ia32VmxEptVpidCapMsr vpid = {UtilReadMsr64(Msr::kIa32VmxEptVpidCap)};
  if (!vpid.fields.support_page_walk_length4 ||
      !vpid.fields.support_write_back_memory_type ||
      !vpid.fields.support_invept ||
      !vpid.fields.support_single_context_invept ||
      !vpid.fields.support_all_context_invept) {
    return false;
  }
  return true;
}
Ejemplo n.º 3
0
// Execute a non-image region as a test
_Use_decl_annotations_ void MmonExecuteDoggyRegion() {
  PAGED_CODE();

#pragma prefast(suppress : 30030, "Allocating executable POOL_TYPE memory")
  auto code = reinterpret_cast<UCHAR *>(ExAllocatePoolWithTag(
      NonPagedPoolExecute, PAGE_SIZE, kHyperPlatformCommonPoolTag));

  if (!code) {
    return;
  }
  RtlZeroMemory(code, PAGE_SIZE);
  HYPERPLATFORM_LOG_DEBUG("PoolCode = %p, Pa = %016llx", code,
                          UtilPaFromVa(code));
  code[0] = 0x90;  // nop
  code[1] = 0x90;  // nop
  if (IsX64()) {
    code[2] = 0xc3;  // ret
  } else {
    code[2] = 0xc2;
    code[3] = 0x04;  // retn 4
  }
  KeInvalidateAllCaches();

  // Runs code on all processors at once
  auto function = reinterpret_cast<PKIPI_BROADCAST_WORKER>(code);
  KeIpiGenericCall(function, 0);

  ExFreePoolWithTag(code, kHyperPlatformCommonPoolTag);
}
Ejemplo n.º 4
0
// Initialize shared processor data
_Use_decl_annotations_ static SharedProcessorData *VmpInitializeSharedData() {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(
      ExAllocatePoolWithTag(NonPagedPool, sizeof(SharedProcessorData),
                            kHyperPlatformCommonPoolTag));
  if (!shared_data) {
    return nullptr;
  }
  RtlZeroMemory(shared_data, sizeof(SharedProcessorData));
  HYPERPLATFORM_LOG_DEBUG("shared_data           = %p", shared_data);

  // Setup MSR bitmap
  shared_data->msr_bitmap = VmpBuildMsrBitmap();
  if (!shared_data->msr_bitmap) {
    ExFreePoolWithTag(shared_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }

  // Setup IO bitmaps
  const auto io_bitmaps = VmpBuildIoBitmaps();
  if (!io_bitmaps) {
    ExFreePoolWithTag(shared_data->msr_bitmap, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(shared_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  shared_data->io_bitmap_a = io_bitmaps;
  shared_data->io_bitmap_b = io_bitmaps + PAGE_SIZE;
  return shared_data;
}
Ejemplo n.º 5
0
// Terminates the log functions.
_Use_decl_annotations_ void LogTermination() {
  PAGED_CODE();

  HYPERPLATFORM_LOG_DEBUG("Finalizing... (Max log usage = %08x bytes)",
                          g_logp_log_buffer_info.log_max_usage);
  HYPERPLATFORM_LOG_INFO("Bye!");
  g_logp_debug_flag = kLogPutLevelDisable;
  LogpFinalizeBufferInfo(&g_logp_log_buffer_info);
}
Ejemplo n.º 6
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpEnterVmxMode(
    ProcessorData *processor_data) {
  PAGED_CODE();

  // Apply FIXED bits
  // See: VMX-FIXED BITS IN CR0

  //        IA32_VMX_CRx_FIXED0 IA32_VMX_CRx_FIXED1 Meaning
  // Values 1                   1                   bit of CRx is fixed to 1
  // Values 0                   1                   bit of CRx is flexible
  // Values 0                   0                   bit of CRx is fixed to 0
  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
  Cr0 cr0 = {__readcr0()};
  Cr0 cr0_original = cr0;
  cr0.all &= cr0_fixed1.all;
  cr0.all |= cr0_fixed0.all;
  __writecr0(cr0.all);

  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED0   = %08x", cr0_fixed0.all);
  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR0_FIXED1   = %08x", cr0_fixed1.all);
  HYPERPLATFORM_LOG_DEBUG("Original CR0          = %08x", cr0_original.all);
  HYPERPLATFORM_LOG_DEBUG("Fixed CR0             = %08x", cr0.all);

  // See: VMX-FIXED BITS IN CR4
  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
  Cr4 cr4 = {__readcr4()};
  Cr4 cr4_original = cr4;
  cr4.all &= cr4_fixed1.all;
  cr4.all |= cr4_fixed0.all;
  __writecr4(cr4.all);

  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED0   = %08x", cr4_fixed0.all);
  HYPERPLATFORM_LOG_DEBUG("IA32_VMX_CR4_FIXED1   = %08x", cr4_fixed1.all);
  HYPERPLATFORM_LOG_DEBUG("Original CR4          = %08x", cr4_original.all);
  HYPERPLATFORM_LOG_DEBUG("Fixed CR4             = %08x", cr4.all);

  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmxon_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
  if (__vmx_on(&vmxon_region_pa)) {
    return false;
  }

  // See: Guidelines for Use of the INVVPID Instruction, and Guidelines for Use
  // of the INVEPT Instruction
  UtilInveptGlobal();
  UtilInvvpidAllContext();
  return true;
}
Ejemplo n.º 7
0
// Initializes MemoryMon
_Use_decl_annotations_ NTSTATUS
MmonInitialization(PDRIVER_OBJECT driver_object) {
  PAGED_CODE();

  auto status = MmonpInitializeRtlPcToFileHeader(driver_object);
  HYPERPLATFORM_LOG_DEBUG("PcToFileHeader = %p", g_mmonp_RtlPcToFileHeader);
  if (!NT_SUCCESS(status)) {
    return status;
  }

  status = MmonpInitializeMmPfnDatabase();
  HYPERPLATFORM_LOG_DEBUG("MmPfnDatabase = %p", g_mmonp_MmPfnDatabase);
  if (!NT_SUCCESS(status)) {
    return status;
  }

  MmonExecuteDoggyRegion();
  return STATUS_SUCCESS;
}
Ejemplo n.º 8
0
// Frees all EPT stuff
_Use_decl_annotations_ void EptTermination(EptData *ept_data) {
  HYPERPLATFORM_LOG_DEBUG("Used pre-allocated entries  = %2d / %2d",
                          ept_data->preallocated_entries_count,
                          kEptpNumberOfPreallocatedEntries);

  EptpFreeUnusedPreAllocatedEntries(ept_data->preallocated_entries,
                                    ept_data->preallocated_entries_count);
  EptpDestructTables(ept_data->ept_pml4, 4);
  ExFreePoolWithTag(ept_data->ept_pointer, kHyperPlatformCommonPoolTag);
  ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
}
Ejemplo n.º 9
0
// Uses STL for demonstration
extern "C++" static void GMonpDemoStl() {
  std::vector<std::unique_ptr<std::wstring>> strs;
  for (auto i = 0ul; i < 10; ++i) {
    strs.push_back(
        std::make_unique<std::wstring>(L"i = " + std::to_wstring(i)));
  }
  // Using a lambda expression
  std::for_each(strs.cbegin(), strs.cend(), [](const auto &str) {
    HYPERPLATFORM_LOG_DEBUG("%S", str->c_str());
  });
}
Ejemplo n.º 10
0
// Initializes MemoryMon
_Use_decl_annotations_ NTSTATUS MmonInitialization() {
  PAGED_CODE();

  auto status = MmonpInitializeMmPfnDatabase();
  HYPERPLATFORM_LOG_DEBUG("MmPfnDatabase = %p", g_mmonp_MmPfnDatabase);
  if (!NT_SUCCESS(status)) {
    return status;
  }

  // This execution should NOT be detected since a system is not virtualized yet
  MmonExecuteDoggyRegion();
  return STATUS_SUCCESS;
}
Ejemplo n.º 11
0
// Terminates the log functions without releasing resources.
_Use_decl_annotations_ void LogIrpShutdownHandler() {
  PAGED_CODE();

  HYPERPLATFORM_LOG_DEBUG("Flushing... (Max log usage = %08x bytes)",
                          g_logp_log_buffer_info.log_max_usage);
  HYPERPLATFORM_LOG_INFO("Bye!");
  g_logp_debug_flag = kLogPutLevelDisable;

  // Wait until the log buffer is emptied.
  auto &info = g_logp_log_buffer_info;
  while (info.log_buffer_head[0]) {
    LogpSleep(kLogpLogFlushIntervalMsec);
  }
}
Ejemplo n.º 12
0
// Terminates a given process as well as its child process, and wait for
// terminatation of the given process
_Use_decl_annotations_ static NTSTATUS EopmonpTerminateProcessTree(
    HANDLE process_handle, HANDLE pid) {
  PAGED_CODE();

  auto status = ZwTerminateProcess(process_handle, 0);
  HYPERPLATFORM_LOG_DEBUG(
      "Exploitation detected. Process %Iu is being terminated (status = %08x).",
      pid, status);
  if (status == STATUS_PROCESS_IS_TERMINATING) {
    return status;
  }
  status = EopmonpForEachProcess(EopmonpTerminateProcessIfChild, pid);
  NT_VERIFY(NT_SUCCESS(status));
  status = ZwWaitForSingleObject(process_handle, FALSE, nullptr);
  NT_VERIFY(NT_SUCCESS(status));
  return status;
}
Ejemplo n.º 13
0
// Terminates a process if it is created from a dodgy process
_Use_decl_annotations_ static bool EopmonpTerminateProcessIfChild(
    HANDLE pid, void* context) {
  PAGED_CODE();

  const auto dodgy_pid = reinterpret_cast<HANDLE>(context);

  const auto process_handle = EopmonpOpenProcess(pid);
  if (!process_handle) {
    return true;
  }

  // Is this process created from the dodgy process?
  const auto parent_pid =
      EopmonpGetProcessParentProcessIdByHandle(process_handle);
  if (parent_pid != dodgy_pid) {
    goto exit;
  }

  // Is this process created later than the dodgy process?
  const auto create_time = EopmonpGetProcessCreateTimeQuadPart(pid);
  const auto parent_create_time =
      EopmonpGetProcessCreateTimeQuadPart(dodgy_pid);
  if (!create_time || !parent_create_time ||
      create_time <= parent_create_time) {
    goto exit;
  }

  // Yes, terminate this process as well as its child processes
  auto status = ZwTerminateProcess(process_handle, 0);
  HYPERPLATFORM_LOG_DEBUG(
      "Exploitation detected. Process %Iu is being terminated (status = %08x).",
      pid, status);
  status = EopmonpForEachProcess(EopmonpTerminateProcessIfChild, pid);
  NT_VERIFY(NT_SUCCESS(status));

exit:;
  ZwClose(process_handle);
  return true;
}
Ejemplo n.º 14
0
_Use_decl_annotations_ NTSTATUS
LogInitialization(ULONG flag, const wchar_t *log_file_path) {
  PAGED_CODE();

  auto status = STATUS_SUCCESS;

  g_logp_debug_flag = flag;

  // Initialize a log file if a log file path is specified.
  bool need_reinitialization = false;
  if (log_file_path) {
    status = LogpInitializeBufferInfo(log_file_path, &g_logp_log_buffer_info);
    if (status == STATUS_REINITIALIZATION_NEEDED) {
      need_reinitialization = true;
    } else if (!NT_SUCCESS(status)) {
      return status;
    }
  }

  // Test the log.
  status = HYPERPLATFORM_LOG_INFO("Log has been %sinitialized.",
                                  (need_reinitialization ? "partially " : ""));
  if (!NT_SUCCESS(status)) {
    goto Fail;
  }
  HYPERPLATFORM_LOG_DEBUG("Info= %p, Buffer= %p %p, File= %S",
                          &g_logp_log_buffer_info,
                          g_logp_log_buffer_info.log_buffer1,
                          g_logp_log_buffer_info.log_buffer2, log_file_path);
  return (need_reinitialization ? STATUS_REINITIALIZATION_NEEDED
                                : STATUS_SUCCESS);

Fail:;
  if (log_file_path) {
    LogpFinalizeBufferInfo(&g_logp_log_buffer_info);
  }
  return status;
}
Ejemplo n.º 15
0
// A thread runs as long as info.buffer_flush_thread_should_be_alive is true and
// flushes a log buffer to a log file every kLogpLogFlushIntervalMsec msec.
_Use_decl_annotations_ static VOID LogpBufferFlushThreadRoutine(
    void *start_context) {
  PAGED_CODE();
  auto status = STATUS_SUCCESS;
  auto info = reinterpret_cast<LogBufferInfo *>(start_context);
  info->buffer_flush_thread_started = true;
  HYPERPLATFORM_LOG_DEBUG("Log thread started (TID= %p).",
                          PsGetCurrentThreadId());

  while (info->buffer_flush_thread_should_be_alive) {
    NT_ASSERT(LogpIsLogFileActivated(*info));
    if (info->log_buffer_head[0]) {
      NT_ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
      NT_ASSERT(!KeAreAllApcsDisabled());
      status = LogpFlushLogBuffer(info);
      // Do not flush the file for overall performance. Even a case of
      // bug check, we should be able to recover logs by looking at both
      // log buffers.
    }
    LogpSleep(kLogpLogFlushIntervalMsec);
  }
  PsTerminateSystemThread(status);
}
Ejemplo n.º 16
0
// Execute a non-image region as a test
_Use_decl_annotations_ void MmonExecuteDoggyRegion() {
  PAGED_CODE();

#pragma warning(push)
#pragma warning(disable : 30030)
  auto code = reinterpret_cast<UCHAR *>(ExAllocatePoolWithTag(
      NonPagedPoolExecute, PAGE_SIZE, kHyperPlatformCommonPoolTag));
#pragma warning(pop)

  if (!code) {
    return;
  }
  RtlZeroMemory(code, PAGE_SIZE);
  HYPERPLATFORM_LOG_DEBUG("PoolCode = %p, Pa = %016llx", code,
                          UtilPaFromVa(code));
  code[0] = 0x90; // nop
  code[1] = 0x90; // nop
  code[2] = 0xc3; // ret
  MmonpInvalidateInstructionCache(code, PAGE_SIZE);

  auto function = reinterpret_cast<void (*)(void)>(code);
  function();
  ExFreePoolWithTag(code, kHyperPlatformCommonPoolTag);
}
Ejemplo n.º 17
0
// Reads and stores all MTRRs to set a correct memory type for EPT
_Use_decl_annotations_ void EptInitializeMtrrEntries() {
	PAGED_CODE();

	int index = 0;
	MtrrData *mtrr_entries = g_eptp_mtrr_entries;

	// Get and store the default memory type
	Ia32MtrrDefaultTypeMsr default_type = { UtilReadMsr64(Msr::kIa32MtrrDefType) };
	g_eptp_mtrr_default_type = default_type.fields.default_mtemory_type;

	// Read MTRR capability
	Ia32MtrrCapabilitiesMsr mtrr_capabilities = {
		UtilReadMsr64(Msr::kIa32MtrrCap) };
	HYPERPLATFORM_LOG_DEBUG(
		"MTRR Default=%lld, VariableCount=%lld, FixedSupported=%lld, FixedEnabled=%lld",
		default_type.fields.default_mtemory_type,
		mtrr_capabilities.fields.variable_range_count,
		mtrr_capabilities.fields.fixed_range_supported,
		default_type.fields.fixed_mtrrs_enabled);

	// Read fixed range MTRRs if supported
	if (mtrr_capabilities.fields.fixed_range_supported &&
		default_type.fields.fixed_mtrrs_enabled) {
		static const auto k64kBase = 0x0;
		static const auto k64kManagedSize = 0x10000;
		static const auto k16kBase = 0x80000;
		static const auto k16kManagedSize = 0x4000;
		static const auto k4kBase = 0xC0000;
		static const auto k4kManagedSize = 0x1000;

		// The kIa32MtrrFix64k00000 manages 8 ranges of memory. The first range
		// starts at 0x0, and each range manages a 64k (0x10000) range. For example,
		//  entry[0]:     0x0 : 0x10000 - 1
		//  entry[1]: 0x10000 : 0x20000 - 1
		//  ...
		//  entry[7]: 0x70000 : 0x80000 - 1
		ULONG64 offset = 0;
		Ia32MtrrFixedRangeMsr fixed_range = {
			UtilReadMsr64(Msr::kIa32MtrrFix64k00000) };
		for (auto memory_type : fixed_range.fields.types) {
			// Each entry manages 64k (0x10000) length.
			ULONG64 base = k64kBase + offset;
			offset += k64kManagedSize;

			// Saves the MTRR
			mtrr_entries[index].enabled = true;
			mtrr_entries[index].fixedMtrr = true;
			mtrr_entries[index].type = memory_type;
			mtrr_entries[index].range_base = base;
			mtrr_entries[index].range_end = base + k64kManagedSize - 1;
			index++;
		}
		NT_ASSERT(k64kBase + offset == k16kBase);

		// kIa32MtrrFix16k80000 manages 8 ranges of memory. The first range starts
		// at 0x80000, and each range manages a 16k (0x4000) range. For example,
		//  entry[0]: 0x80000 : 0x84000 - 1
		//  entry[1]: 0x88000 : 0x8C000 - 1
		//  ...
		//  entry[7]: 0x9C000 : 0xA0000 - 1
		// Also, subsequent memory ranges are managed by other MSR,
		// kIa32MtrrFix16kA0000, which manages 8 ranges of memory starting at
		// 0xA0000 in the same fashion. For example,
		//  entry[0]: 0xA0000 : 0xA4000 - 1
		//  entry[1]: 0xA8000 : 0xAC000 - 1
		//  ...
		//  entry[7]: 0xBC000 : 0xC0000 - 1
		offset = 0;
		for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix16k80000);
			msr <= static_cast<ULONG>(Msr::kIa32MtrrFix16kA0000); msr++) {
			fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
			for (auto memory_type : fixed_range.fields.types) {
				// Each entry manages 16k (0x4000) length.
				ULONG64 base = k16kBase + offset;
				offset += k16kManagedSize;

				// Saves the MTRR
				mtrr_entries[index].enabled = true;
				mtrr_entries[index].fixedMtrr = true;
				mtrr_entries[index].type = memory_type;
				mtrr_entries[index].range_base = base;
				mtrr_entries[index].range_end = base + k16kManagedSize - 1;
				index++;
			}
		}
		NT_ASSERT(k16kBase + offset == k4kBase);

		// kIa32MtrrFix4kC0000 manages 8 ranges of memory. The first range starts
		// at 0xC0000, and each range manages a 4k (0x1000) range. For example,
		//  entry[0]: 0xC0000 : 0xC1000 - 1
		//  entry[1]: 0xC1000 : 0xC2000 - 1
		//  ...
		//  entry[7]: 0xC7000 : 0xC8000 - 1
		// Also, subsequent memory ranges are managed by other MSRs such as
		// kIa32MtrrFix4kC8000, kIa32MtrrFix4kD0000, and kIa32MtrrFix4kF8000. Each
		// MSR manages 8 ranges of memory in the same fashion up to 0x100000.
		offset = 0;
		for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix4kC0000);
			msr <= static_cast<ULONG>(Msr::kIa32MtrrFix4kF8000); msr++) {
			fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
			for (auto memory_type : fixed_range.fields.types) {
				// Each entry manages 4k (0x1000) length.
				ULONG64 base = k4kBase + offset;
				offset += k4kManagedSize;

				// Saves the MTRR
				mtrr_entries[index].enabled = true;
				mtrr_entries[index].fixedMtrr = true;
				mtrr_entries[index].type = memory_type;
				mtrr_entries[index].range_base = base;
				mtrr_entries[index].range_end = base + k4kManagedSize - 1;
				index++;
			}
		}
		NT_ASSERT(k4kBase + offset == 0x100000);
	}

	// Read all variable range MTRRs
	for (auto i = 0; i < mtrr_capabilities.fields.variable_range_count; i++) {
		// Read MTRR mask and check if it is in use
		const auto phy_mask = static_cast<ULONG>(Msr::kIa32MtrrPhysMaskN) + i * 2;
		Ia32MtrrPhysMaskMsr mtrr_mask = { UtilReadMsr64(static_cast<Msr>(phy_mask)) };
		if (!mtrr_mask.fields.valid) {
			continue;
		}

		// Get a length this MTRR manages
		ULONG length;
		BitScanForward64(&length, mtrr_mask.fields.phys_mask * PAGE_SIZE);

		// Read MTRR base and calculate a range this MTRR manages
		const auto phy_base = static_cast<ULONG>(Msr::kIa32MtrrPhysBaseN) + i * 2;
		Ia32MtrrPhysBaseMsr mtrr_base = { UtilReadMsr64(static_cast<Msr>(phy_base)) };
		ULONG64 base = mtrr_base.fields.phys_base * PAGE_SIZE;
		ULONG64 end = base + (1ull << length) - 1;

		// Save it
		mtrr_entries[index].enabled = true;
		mtrr_entries[index].fixedMtrr = false;
		mtrr_entries[index].type = mtrr_base.fields.type;
		mtrr_entries[index].range_base = base;
		mtrr_entries[index].range_end = end;
		index++;
	}
}
Ejemplo n.º 18
0
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  PAGED_CODE();

  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPool, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));
  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  // Check if XSAVE/XRSTOR are available and save an instruction mask for all
  // supported user state components
  processor_data->xsave_inst_mask =
      RtlGetEnabledExtendedFeatures(static_cast<ULONG64>(-1));
  HYPERPLATFORM_LOG_DEBUG("xsave_inst_mask       = %p",
                          processor_data->xsave_inst_mask);
  if (processor_data->xsave_inst_mask) {
    // Allocate a large enough XSAVE area to store all supported user state
    // components. A size is round-up to multiple of the page size so that the
    // address fulfills a requirement of 64K alignment.
    //
    // See: ENUMERATION OF CPU SUPPORT FOR XSAVE INSTRUCTIONS AND XSAVESUPPORTED
    // FEATURES
    int cpu_info[4] = {};
    __cpuidex(cpu_info, 0xd, 0);
    const auto xsave_area_size = ROUND_TO_PAGES(cpu_info[2]);  // ecx
    processor_data->xsave_area = ExAllocatePoolWithTag(
        NonPagedPool, xsave_area_size, kHyperPlatformCommonPoolTag);
    if (!processor_data->xsave_area) {
      goto ReturnFalse;
    }
    RtlZeroMemory(processor_data->xsave_area, xsave_area_size);
  } else {
    // Use FXSAVE/FXRSTOR instead.
    int cpu_info[4] = {};
    __cpuid(cpu_info, 1);
    const CpuFeaturesEcx cpu_features_ecx = {static_cast<ULONG32>(cpu_info[2])};
    const CpuFeaturesEdx cpu_features_edx = {static_cast<ULONG32>(cpu_info[3])};
    if (cpu_features_ecx.fields.avx) {
      HYPERPLATFORM_LOG_ERROR("A processor supports AVX but not XSAVE/XRSTOR.");
      goto ReturnFalse;
    }
    if (!cpu_features_edx.fields.fxsr) {
      HYPERPLATFORM_LOG_ERROR("A processor does not support FXSAVE/FXRSTOR.");
      goto ReturnFalse;
    }
  }

  // Allocate other processor data fields
  processor_data->vmm_stack_limit =
      UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  if (!processor_data->vmm_stack_limit) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmm_stack_limit, KERNEL_STACK_SIZE);

  processor_data->vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmcs_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmcs_region, kVmxMaxVmcsSize);

  processor_data->vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPool, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  if (!processor_data->vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(processor_data->vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(processor_data->vmm_stack_limit) +
      KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_limit       = %p",
                          processor_data->vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_region_base = %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_data        = %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("vmm_stack_base        = %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("processor_data        = %p stored at %p",
                          processor_data, vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("guest_stack_pointer   = %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("guest_inst_pointer    = %p",
                          guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVmcs(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVmcs(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVm();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}
Ejemplo n.º 19
0
// Locate MmPfnDatabase
_Use_decl_annotations_ static NTSTATUS MmonpInitializeMmPfnDatabase() {
  PAGED_CODE();

  RTL_OSVERSIONINFOW os_version = {};
  auto status = RtlGetVersion(&os_version);
  if (!NT_SUCCESS(status)) {
    return status;
  }

  // Set appropriate patterns and based on an OS version
  struct MmPfnDatabaseSearchPattern {
    const UCHAR *bytes;
    SIZE_T bytes_size;
    bool hard_coded;
  };
  MmPfnDatabaseSearchPattern patterns[2] = {};

  if (IsX64()) {
    // Win 10 build 14316 is the first version implements randomized page tables
    if (os_version.dwMajorVersion < 10 || os_version.dwBuildNumber < 14316) {
      // PFN database is at the constant location on older x64 Windows
      g_mmonp_MmPfnDatabase = reinterpret_cast<void *>(0xfffffa8000000000);
      return STATUS_SUCCESS;
    }

    // Windows 10 x64 Build 14332+
    static const UCHAR kPatternWin10x64[] = {
        0x48, 0x8B, 0xC1,        // mov     rax, rcx
        0x48, 0xC1, 0xE8, 0x0C,  // shr     rax, 0Ch
        0x48, 0x8D, 0x14, 0x40,  // lea     rdx, [rax + rax * 2]
        0x48, 0x03, 0xD2,        // add     rdx, rdx
        0x48, 0xB8,              // mov     rax, 0FFFFFA8000000008h
    };
    patterns[0].bytes = kPatternWin10x64;
    patterns[0].bytes_size = sizeof(kPatternWin10x64);
    patterns[0].hard_coded = true;

  } else {
    // x86
    if (os_version.dwMajorVersion == 6 && os_version.dwMinorVersion == 1) {
      // Windows 7 (No PAE)
      static const UCHAR kPatternWin7[] = {
          0x6B, 0xC0, 0x18,  // imul    eax, 18h
          0x8B, 0x0D,        // mov     ecx, ds:_MmPfnDatabase
      };
      // Windows 7 (PAE)
      static const UCHAR kPatternWin7Pae[] = {
          0x6B, 0xC0, 0x1C,  // imul    eax, 1Ch
          0x8B, 0x0D,        // mov     ecx, ds:_MmPfnDatabase
      };

      if (UtilIsX86Pae()) {
        patterns[0].bytes = kPatternWin7Pae;
        patterns[0].bytes_size = sizeof(kPatternWin7Pae);
        patterns[0].hard_coded = false;
      } else {
        patterns[0].bytes = kPatternWin7;
        patterns[0].bytes_size = sizeof(kPatternWin7);
        patterns[0].hard_coded = false;
      }

    } else if ((os_version.dwMajorVersion == 6 &&
                os_version.dwMinorVersion == 3) ||
               (os_version.dwMajorVersion == 10 &&
                os_version.dwMinorVersion == 0)) {
      // Windows 8.1 and 10
      static const UCHAR kPatternWin81And10_0[] = {
          0xC1, 0xF8, 0x0C,  // sar     eax, 0Ch
          0xA1,              // mov     eax, ds:_MmPfnDatabase
      };
      static const UCHAR kPatternWin81And10_1[] = {
          0xC1, 0xE8, 0x0C,  // shr     eax, 0Ch
          0xA1,              // mov     eax, ds:_MmPfnDatabase
      };
      patterns[0].bytes = kPatternWin81And10_0;
      patterns[0].bytes_size = sizeof(kPatternWin81And10_0);
      patterns[0].hard_coded = false;
      patterns[1].bytes = kPatternWin81And10_1;
      patterns[1].bytes_size = sizeof(kPatternWin81And10_1);
      patterns[1].hard_coded = false;

    } else {
      // Unknown x86 OS version
      return STATUS_UNSUCCESSFUL;
    }
  }

  // Search the patterns from MmGetVirtualForPhysical
  const auto p_MmGetVirtualForPhysical = reinterpret_cast<UCHAR *>(
      UtilGetSystemProcAddress(L"MmGetVirtualForPhysical"));
  if (!p_MmGetVirtualForPhysical) {
    return STATUS_PROCEDURE_NOT_FOUND;
  }

  for (const auto &pattern : patterns) {
    if (!pattern.bytes) {
      break;  // no more patterns
    }

    auto found = reinterpret_cast<UCHAR *>(UtilMemMem(
        p_MmGetVirtualForPhysical, 0x20, pattern.bytes, pattern.bytes_size));
    if (!found) {
      continue;
    }

    // Get an address of PFN database
    found += pattern.bytes_size;
    if (pattern.hard_coded) {
      HYPERPLATFORM_LOG_DEBUG("Found a hard coded PFN database address at %p",
                              found);
      g_mmonp_MmPfnDatabase = *reinterpret_cast<void **>(found);
    } else {
      HYPERPLATFORM_LOG_DEBUG("Found a reference to MmPfnDatabase at %p",
                              found);
      const auto mmpfn_address = *reinterpret_cast<ULONG_PTR *>(found);
      g_mmonp_MmPfnDatabase = *reinterpret_cast<void **>(mmpfn_address);
    }

    // On Windows 10 RS, a value has 0x8. Delete it.
    g_mmonp_MmPfnDatabase = PAGE_ALIGN(g_mmonp_MmPfnDatabase);
    break;
  }

  HYPERPLATFORM_LOG_DEBUG("MmPfnDatabase = %p", g_mmonp_MmPfnDatabase);
  if (!g_mmonp_MmPfnDatabase) {
    return STATUS_UNSUCCESSFUL;
  }

  return STATUS_SUCCESS;
}
Ejemplo n.º 20
0
// Allocates structures for virtualization, initializes VMCS and virtualizes
// the current processor
_Use_decl_annotations_ static void VmpInitializeVm(
    ULONG_PTR guest_stack_pointer, ULONG_PTR guest_instruction_pointer,
    void *context) {
  const auto shared_data = reinterpret_cast<SharedProcessorData *>(context);
  if (!shared_data) {
    return;
  }

  // Allocate related structures
  const auto processor_data =
      reinterpret_cast<ProcessorData *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, sizeof(ProcessorData), kHyperPlatformCommonPoolTag));
  if (!processor_data) {
    return;
  }
  RtlZeroMemory(processor_data, sizeof(ProcessorData));

  // Set up EPT
  processor_data->ept_data = EptInitialization();
  if (!processor_data->ept_data) {
    goto ReturnFalse;
  }

  const auto vmm_stack_limit = UtilAllocateContiguousMemory(KERNEL_STACK_SIZE);
  const auto vmcs_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));
  const auto vmxon_region =
      reinterpret_cast<VmControlStructure *>(ExAllocatePoolWithTag(
          NonPagedPoolNx, kVmxMaxVmcsSize, kHyperPlatformCommonPoolTag));

  // Initialize the management structure
  processor_data->vmm_stack_limit = vmm_stack_limit;
  processor_data->vmcs_region = vmcs_region;
  processor_data->vmxon_region = vmxon_region;

  if (!vmm_stack_limit || !vmcs_region || !vmxon_region) {
    goto ReturnFalse;
  }
  RtlZeroMemory(vmm_stack_limit, KERNEL_STACK_SIZE);
  RtlZeroMemory(vmcs_region, kVmxMaxVmcsSize);
  RtlZeroMemory(vmxon_region, kVmxMaxVmcsSize);

  // Initialize stack memory for VMM like this:
  //
  // (High)
  // +------------------+  <- vmm_stack_region_base      (eg, AED37000)
  // | processor_data   |
  // +------------------+  <- vmm_stack_data             (eg, AED36FFC)
  // | MAXULONG_PTR     |
  // +------------------+  <- vmm_stack_base (initial SP)(eg, AED36FF8)
  // |                  |    v
  // | (VMM Stack)      |    v (grow)
  // |                  |    v
  // +------------------+  <- vmm_stack_limit            (eg, AED34000)
  // (Low)
  const auto vmm_stack_region_base =
      reinterpret_cast<ULONG_PTR>(vmm_stack_limit) + KERNEL_STACK_SIZE;
  const auto vmm_stack_data = vmm_stack_region_base - sizeof(void *);
  const auto vmm_stack_base = vmm_stack_data - sizeof(void *);
  HYPERPLATFORM_LOG_DEBUG("VmmStackTop=       %p", vmm_stack_limit);
  HYPERPLATFORM_LOG_DEBUG("VmmStackBottom=    %p", vmm_stack_region_base);
  HYPERPLATFORM_LOG_DEBUG("VmmStackData=      %p", vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("ProcessorData=     %p stored at %p", processor_data,
                          vmm_stack_data);
  HYPERPLATFORM_LOG_DEBUG("VmmStackBase=      %p", vmm_stack_base);
  HYPERPLATFORM_LOG_DEBUG("GuestStackPointer= %p", guest_stack_pointer);
  HYPERPLATFORM_LOG_DEBUG("GuestInstPointer=  %p", guest_instruction_pointer);
  *reinterpret_cast<ULONG_PTR *>(vmm_stack_base) = MAXULONG_PTR;
  *reinterpret_cast<ProcessorData **>(vmm_stack_data) = processor_data;

  processor_data->shared_data = shared_data;
  InterlockedIncrement(&processor_data->shared_data->reference_count);

  // Set up VMCS
  if (!VmpEnterVmxMode(processor_data)) {
    goto ReturnFalse;
  }
  if (!VmpInitializeVMCS(processor_data)) {
    goto ReturnFalseWithVmxOff;
  }
  if (!VmpSetupVMCS(processor_data, guest_stack_pointer,
                    guest_instruction_pointer, vmm_stack_base)) {
    goto ReturnFalseWithVmxOff;
  }

  // Do virtualize the processor
  VmpLaunchVM();

// Here is not be executed with successful vmlaunch. Instead, the context
// jumps to an address specified by guest_instruction_pointer.

ReturnFalseWithVmxOff:;
  __vmx_off();

ReturnFalse:;
  VmpFreeProcessorData(processor_data);
}