예제 #1
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va = exit_qualification.fields.valid_guest_linear_address
                            ? UtilVmRead(VmcsField::kGuestLinearAddress)
                            : 0;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
    if (!ept_entry || !ept_entry->all) {
      // EPT entry miss. It should be device memory.
      HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

      if (!IsReleaseBuild()) {
        NT_VERIFY(EptpIsDeviceMemory(fault_pa));
      }
      EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

      UtilInveptAll();
      return;
    }
  }
  HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                               fault_pa);
}
예제 #2
0
// Deal with L2 EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolationEx(EptData *ept_data, EptData *ept_data02, ULONG_PTR guest_pa, bool is_range_of_ept12) {
	
	const EptViolationQualification exit_qualification = {
		UtilVmRead(VmcsField::kExitQualification) };
	ULONG_PTR fault_pa = 0;

	if (!guest_pa)
	{
		 fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
	}
	else
	{
		fault_pa = guest_pa;
	}
	
	const auto fault_va = reinterpret_cast<void *>(
		exit_qualification.fields.valid_guest_linear_address
		? UtilVmRead(VmcsField::kGuestLinearAddress)
		: 0);


	//GuestPhysicalAddress will be the guest physical adderss of EPT1-2 Entry , we disable it write in L2 first initial

	if (!exit_qualification.fields.ept_readable &&
		!exit_qualification.fields.ept_writeable &&
		!exit_qualification.fields.ept_executable) {
		const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
		if (!ept_entry || !ept_entry->all) {
			// EPT entry miss. It should be device memory.
			HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

			if (!IsReleaseBuild()) {
				NT_VERIFY(EptpIsDeviceMemory(fault_pa));
			}
			EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

			UtilInveptGlobal();
			return;
		}
	}
	
	if (!exit_qualification.fields.ept_writeable && is_range_of_ept12)
	{
		EptCommonEntry* Ept01Pte = EptGetEptPtEntry(ept_data, UtilVmRead64(VmcsField::kGuestPhysicalAddress));
		if (Ept01Pte)
		{  
			EptCommonEntry* entry = (EptCommonEntry*)UtilVaFromPa(UtilVmRead64(VmcsField::kGuestPhysicalAddress));
			Ept01Pte->fields.write_access = true;
			HYPERPLATFORM_LOG_DEBUG_SAFE("Faced non-writable address but it is readble. :%p  %p", UtilVmRead64(VmcsField::kGuestPhysicalAddress), entry->fields.physial_address);
			UtilInveptGlobal();
		}
	}
	 
}
예제 #3
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va =
      exit_qualification.fields.valid_guest_linear_address
          ? reinterpret_cast<void *>(UtilVmRead(VmcsField::kGuestLinearAddress))
          : nullptr;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    // EPT entry miss. It should be device memory.
    HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
    // HYPERPLATFORM_LOG_DEBUG_SAFE(
    //    "[INIT] Dev VA = %p, PA = %016llx, Used = %d",
    //    0, fault_pa, ept_data->preallocated_entries_count);

    if (!IsReleaseBuild()) {
      const auto is_device_memory = EptpIsDeviceMemory(fault_pa);
      NT_ASSERT(is_device_memory);
      UNREFERENCED_PARAMETER(is_device_memory);
    }

    // There is a race condition here. If multiple processors reach this code
    // with the same fault_pa, this function may create multiple EPT entries for
    // one physical address and leads memory leak. This call should probably be
    // guarded by a spin-lock but is not yet just because impact is so small.
    EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

    UtilInveptAll();
  } else if (exit_qualification.fields.caused_by_translation &&
             exit_qualification.fields.execute_access &&
             !exit_qualification.fields.ept_executable) {
    const auto ept_pt_entry = EptGetEptPtEntry(ept_data->ept_pml4, 4, fault_pa);

    MmoneptHandleDodgyRegionExecution(ept_data->hs_ept_data, ept_pt_entry,
                                      fault_pa, fault_va);
  } else {
    HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                                 fault_pa);
  }
}
예제 #4
0
// Builds EPT, allocates pre-allocated enties, initializes and returns EptData
_Use_decl_annotations_ EptData *EptInitialization() {
  PAGED_CODE();

  static const auto kEptPageWalkLevel = 4ul;

  // Allocate ept_data
  const auto ept_data = reinterpret_cast<EptData *>(ExAllocatePoolWithTag(
      NonPagedPool, sizeof(EptData), kHyperPlatformCommonPoolTag));
  if (!ept_data) {
    return nullptr;
  }
  RtlZeroMemory(ept_data, sizeof(EptData));

  // Allocate EptPointer
  const auto ept_poiner = reinterpret_cast<EptPointer *>(ExAllocatePoolWithTag(
      NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
  if (!ept_poiner) {
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(ept_poiner, PAGE_SIZE);

  // Allocate EPT_PML4 and initialize EptPointer
  const auto ept_pml4 =
      reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag(
          NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
  if (!ept_pml4) {
    ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(ept_pml4, PAGE_SIZE);
  ept_poiner->fields.memory_type = static_cast<ULONG64>(EptpGetMemoryType(UtilPaFromVa(ept_pml4))); 
  ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1;
  ept_poiner->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(ept_pml4));

  // Initialize all EPT entries for all physical memory pages
  const auto pm_ranges = UtilGetPhysicalMemoryRanges();
  for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs;
       ++run_index) {
    const auto run = &pm_ranges->run[run_index];
    const auto base_addr = run->base_page * PAGE_SIZE;
    for (auto page_index = 0ull; page_index < run->page_count; ++page_index) {
      const auto indexed_addr = base_addr + page_index * PAGE_SIZE;
      const auto ept_pt_entry =
          EptpConstructTables(ept_pml4, 4, indexed_addr, nullptr);
      if (!ept_pt_entry) {
        EptpDestructTables(ept_pml4, 4);
        ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
        ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
        return nullptr;
      }
    }
  }

  // Initialize an EPT entry for APIC_BASE. It is required to allocated it now
  // for some reasons, or else, system hangs.
  const Ia32ApicBaseMsr apic_msr = {UtilReadMsr64(Msr::kIa32ApicBase)};
  if (!EptpConstructTables(ept_pml4, 4, apic_msr.fields.apic_base * PAGE_SIZE,
                           nullptr)) {
    EptpDestructTables(ept_pml4, 4);
    ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }

  // Allocate preallocated_entries
  const auto preallocated_entries_size =
      sizeof(EptCommonEntry *) * kEptpNumberOfPreallocatedEntries;
  const auto preallocated_entries = reinterpret_cast<EptCommonEntry **>(
      ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size,
                            kHyperPlatformCommonPoolTag));
  if (!preallocated_entries) {
    EptpDestructTables(ept_pml4, 4);
    ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
    ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
    return nullptr;
  }
  RtlZeroMemory(preallocated_entries, preallocated_entries_size);

  // And fill preallocated_entries with newly created entries
  for (auto i = 0ul; i < kEptpNumberOfPreallocatedEntries; ++i) {
    const auto ept_entry = EptpAllocateEptEntry(nullptr);
    if (!ept_entry) {
      EptpFreeUnusedPreAllocatedEntries(preallocated_entries, 0);
      EptpDestructTables(ept_pml4, 4);
      ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
      ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
      return nullptr;
    }
    preallocated_entries[i] = ept_entry;
  }

  // Initialization completed
  ept_data->ept_pointer = ept_poiner;
  ept_data->ept_pml4 = ept_pml4;
  ept_data->preallocated_entries = preallocated_entries;
  ept_data->preallocated_entries_count = 0;
  return ept_data;
}
예제 #5
0
// Allocate and initialize all EPT entries associated with the physical_address
_Use_decl_annotations_ static EptCommonEntry *EptpConstructTables(
    EptCommonEntry *table, ULONG table_level, ULONG64 physical_address,
    EptData *ept_data) {
  switch (table_level) {
    case 4: {
      // table == PML4 (512 GB)
      const auto pxe_index = EptpAddressToPxeIndex(physical_address);
      const auto ept_pml4_entry = &table[pxe_index];
      if (!ept_pml4_entry->all) {
        const auto ept_pdpt = EptpAllocateEptEntry(ept_data);
        if (!ept_pdpt) {
          return nullptr;
        }
        EptpInitTableEntry(ept_pml4_entry, table_level, UtilPaFromVa(ept_pdpt));
      }
      return EptpConstructTables(
          reinterpret_cast<EptCommonEntry *>(
              UtilVaFromPfn(ept_pml4_entry->fields.physial_address)),
          table_level - 1, physical_address, ept_data);
    }
    case 3: {
      // table == PDPT (1 GB)
      const auto ppe_index = EptpAddressToPpeIndex(physical_address);
      const auto ept_pdpt_entry = &table[ppe_index];
      if (!ept_pdpt_entry->all) {
        const auto ept_pdt = EptpAllocateEptEntry(ept_data);
        if (!ept_pdt) {
          return nullptr;
        }
        EptpInitTableEntry(ept_pdpt_entry, table_level, UtilPaFromVa(ept_pdt));
      }
      return EptpConstructTables(
          reinterpret_cast<EptCommonEntry *>(
              UtilVaFromPfn(ept_pdpt_entry->fields.physial_address)),
          table_level - 1, physical_address, ept_data);
    }
    case 2: {
      // table == PDT (2 MB)
      const auto pde_index = EptpAddressToPdeIndex(physical_address);
      const auto ept_pdt_entry = &table[pde_index];
      if (!ept_pdt_entry->all) {
        const auto ept_pt = EptpAllocateEptEntry(ept_data);
        if (!ept_pt) {
          return nullptr;
        }
        EptpInitTableEntry(ept_pdt_entry, table_level, UtilPaFromVa(ept_pt));
      }
      return EptpConstructTables(
          reinterpret_cast<EptCommonEntry *>(
              UtilVaFromPfn(ept_pdt_entry->fields.physial_address)),
          table_level - 1, physical_address, ept_data);
    }
    case 1: {
      // table == PT (4 KB)
      const auto pte_index = EptpAddressToPteIndex(physical_address);
      const auto ept_pt_entry = &table[pte_index];
      NT_ASSERT(!ept_pt_entry->all);
      EptpInitTableEntry(ept_pt_entry, table_level, physical_address);
      return ept_pt_entry;
    }
    default:
      HYPERPLATFORM_COMMON_DBG_BREAK();
      return nullptr;
  }
}