// Deal with L2 EPT violation VM-exit. _Use_decl_annotations_ void EptHandleEptViolationEx(EptData *ept_data, EptData *ept_data02, ULONG_PTR guest_pa, bool is_range_of_ept12) { const EptViolationQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification) }; ULONG_PTR fault_pa = 0; if (!guest_pa) { fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress); } else { fault_pa = guest_pa; } const auto fault_va = reinterpret_cast<void *>( exit_qualification.fields.valid_guest_linear_address ? UtilVmRead(VmcsField::kGuestLinearAddress) : 0); //GuestPhysicalAddress will be the guest physical adderss of EPT1-2 Entry , we disable it write in L2 first initial if (!exit_qualification.fields.ept_readable && !exit_qualification.fields.ept_writeable && !exit_qualification.fields.ept_executable) { const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa); if (!ept_entry || !ept_entry->all) { // EPT entry miss. It should be device memory. HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (!IsReleaseBuild()) { NT_VERIFY(EptpIsDeviceMemory(fault_pa)); } EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data); UtilInveptGlobal(); return; } } if (!exit_qualification.fields.ept_writeable && is_range_of_ept12) { EptCommonEntry* Ept01Pte = EptGetEptPtEntry(ept_data, UtilVmRead64(VmcsField::kGuestPhysicalAddress)); if (Ept01Pte) { EptCommonEntry* entry = (EptCommonEntry*)UtilVaFromPa(UtilVmRead64(VmcsField::kGuestPhysicalAddress)); Ept01Pte->fields.write_access = true; HYPERPLATFORM_LOG_DEBUG_SAFE("Faced non-writable address but it is readble. :%p %p", UtilVmRead64(VmcsField::kGuestPhysicalAddress), entry->fields.physial_address); UtilInveptGlobal(); } } }
// Deal with EPT violation VM-exit. _Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) { const EptViolationQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress); const auto fault_va = exit_qualification.fields.valid_guest_linear_address ? UtilVmRead(VmcsField::kGuestLinearAddress) : 0; if (!exit_qualification.fields.ept_readable && !exit_qualification.fields.ept_writeable && !exit_qualification.fields.ept_executable) { const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa); if (!ept_entry || !ept_entry->all) { // EPT entry miss. It should be device memory. HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (!IsReleaseBuild()) { NT_VERIFY(EptpIsDeviceMemory(fault_pa)); } EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data); UtilInveptAll(); return; } } HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va, fault_pa); }
_Use_decl_annotations_ void EptpSetEntryAccess( EptData* ept_data, ULONG_PTR physical_address, bool readable, bool writable, bool executable) { EptCommonEntry* entry = EptGetEptPtEntry(ept_data, physical_address); if (!entry || !entry ->fields.physial_address) { return; } entry->fields.write_access = writable; }
// Stop showing a shadow page _Use_decl_annotations_ static void SbppDisablePageShadowing( const PatchInformation& info, EptData* ept_data) { // Replace with a fake copy const auto pa_base = UtilPaFromVa(PAGE_ALIGN(info.patch_address)); const auto ept_pt_entry = EptGetEptPtEntry(ept_data, pa_base); ept_pt_entry->fields.execute_access = true; ept_pt_entry->fields.write_access = true; ept_pt_entry->fields.read_access = true; ept_pt_entry->fields.physial_address = UtilPfnFromPa(pa_base); UtilInveptAll(); }
// EXIT_REASON_EPT_MISCONFIG _Use_decl_annotations_ static void VmmpHandleEptMisconfig( GuestContext *guest_context) { UNREFERENCED_PARAMETER(guest_context); const auto fault_address = UtilVmRead(VmcsField::kGuestPhysicalAddress); const auto ept_pt_entry = EptGetEptPtEntry( guest_context->stack->processor_data->ept_data, fault_address); HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kEptMisconfigVmExit, fault_address, reinterpret_cast<ULONG_PTR>(ept_pt_entry), 0); }
// Returns an EPT entry corresponds to the physical_address _Use_decl_annotations_ EptCommonEntry *EptGetEptPtEntry( EptCommonEntry *table, ULONG table_level, ULONG64 physical_address) { switch (table_level) { case 4: { // table == PML4 const auto pxe_index = EptpAddressToPxeIndex(physical_address); const auto ept_pml4_entry = &table[pxe_index]; return EptGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn( ept_pml4_entry->fields.physial_address)), table_level - 1, physical_address); } case 3: { // table == PDPT const auto ppe_index = EptpAddressToPpeIndex(physical_address); const auto ept_pdpt_entry = &table[ppe_index]; return EptGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn( ept_pdpt_entry->fields.physial_address)), table_level - 1, physical_address); } case 2: { // table == PDT const auto pde_index = EptpAddressToPdeIndex(physical_address); const auto ept_pdt_entry = &table[pde_index]; return EptGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn( ept_pdt_entry->fields.physial_address)), table_level - 1, physical_address); } case 1: { // table == PT const auto pte_index = EptpAddressToPteIndex(physical_address); const auto ept_pt_entry = &table[pte_index]; return ept_pt_entry; } default: HYPERPLATFORM_COMMON_DBG_BREAK(); return nullptr; } }
// Show a shadowed page for read and write _Use_decl_annotations_ static void SbppEnablePageShadowingForRW( const PatchInformation& info, EptData* ept_data) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address)); // Allow a guest to read and write as well as execute the address. Show the // copied page for read/write that does not have an breakpoint but reflects // all modification by a guest if that happened. ept_pt_entry->fields.write_access = true; ept_pt_entry->fields.read_access = true; ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_rw); UtilInveptAll(); }
// Show a shadowed page for execution _Use_decl_annotations_ static void SbppEnablePageShadowingForExec( const PatchInformation& info, EptData* ept_data) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address)); // Allow the VMM to redirect read and write access to the address by dening // those accesses and handling them on EPT violation ept_pt_entry->fields.write_access = false; ept_pt_entry->fields.read_access = false; // Only execute is allowed now to the adresss. Show the copied page for exec // that has an actual breakpoint to the guest. ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_exec); UtilInveptAll(); }
// Deal with EPT violation VM-exit. _Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) { const EptViolationQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress); const auto fault_va = exit_qualification.fields.valid_guest_linear_address ? reinterpret_cast<void *>(UtilVmRead(VmcsField::kGuestLinearAddress)) : nullptr; if (!exit_qualification.fields.ept_readable && !exit_qualification.fields.ept_writeable && !exit_qualification.fields.ept_executable) { // EPT entry miss. It should be device memory. HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); // HYPERPLATFORM_LOG_DEBUG_SAFE( // "[INIT] Dev VA = %p, PA = %016llx, Used = %d", // 0, fault_pa, ept_data->preallocated_entries_count); if (!IsReleaseBuild()) { const auto is_device_memory = EptpIsDeviceMemory(fault_pa); NT_ASSERT(is_device_memory); UNREFERENCED_PARAMETER(is_device_memory); } // There is a race condition here. If multiple processors reach this code // with the same fault_pa, this function may create multiple EPT entries for // one physical address and leads memory leak. This call should probably be // guarded by a spin-lock but is not yet just because impact is so small. EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data); UtilInveptAll(); } else if (exit_qualification.fields.caused_by_translation && exit_qualification.fields.execute_access && !exit_qualification.fields.ept_executable) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data->ept_pml4, 4, fault_pa); MmoneptHandleDodgyRegionExecution(ept_data->hs_ept_data, ept_pt_entry, fault_pa, fault_va); } else { HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va, fault_pa); } }