// Initialize an EPT entry with a "pass through" attribute _Use_decl_annotations_ static void EptpInitTableEntry( EptCommonEntry *entry, ULONG table_level, ULONG64 physical_address) { entry->fields.read_access = true; entry->fields.write_access = true; entry->fields.execute_access = true; entry->fields.physial_address = UtilPfnFromPa(physical_address); if (table_level == 1) { entry->fields.memory_type = static_cast<ULONG64>(memory_type::kWriteBack); } }
// Stop showing a shadow page _Use_decl_annotations_ static void SbppDisablePageShadowing( const PatchInformation& info, EptData* ept_data) { // Replace with a fake copy const auto pa_base = UtilPaFromVa(PAGE_ALIGN(info.patch_address)); const auto ept_pt_entry = EptGetEptPtEntry(ept_data, pa_base); ept_pt_entry->fields.execute_access = true; ept_pt_entry->fields.write_access = true; ept_pt_entry->fields.read_access = true; ept_pt_entry->fields.physial_address = UtilPfnFromPa(pa_base); UtilInveptAll(); }
// Show a shadowed page for read and write _Use_decl_annotations_ static void SbppEnablePageShadowingForRW( const PatchInformation& info, EptData* ept_data) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address)); // Allow a guest to read and write as well as execute the address. Show the // copied page for read/write that does not have an breakpoint but reflects // all modification by a guest if that happened. ept_pt_entry->fields.write_access = true; ept_pt_entry->fields.read_access = true; ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_rw); UtilInveptAll(); }
// Show a shadowed page for execution _Use_decl_annotations_ static void SbppEnablePageShadowingForExec( const PatchInformation& info, EptData* ept_data) { const auto ept_pt_entry = EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address)); // Allow the VMM to redirect read and write access to the address by dening // those accesses and handling them on EPT violation ept_pt_entry->fields.write_access = false; ept_pt_entry->fields.read_access = false; // Only execute is allowed now to the adresss. Show the copied page for exec // that has an actual breakpoint to the guest. ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_exec); UtilInveptAll(); }
// Builds EPT, allocates pre-allocated enties, initializes and returns EptData _Use_decl_annotations_ EptData *EptInitialization() { PAGED_CODE(); static const auto kEptPageWalkLevel = 4ul; // Allocate ept_data const auto ept_data = reinterpret_cast<EptData *>(ExAllocatePoolWithTag( NonPagedPool, sizeof(EptData), kHyperPlatformCommonPoolTag)); if (!ept_data) { return nullptr; } RtlZeroMemory(ept_data, sizeof(EptData)); // Allocate EptPointer const auto ept_poiner = reinterpret_cast<EptPointer *>(ExAllocatePoolWithTag( NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag)); if (!ept_poiner) { ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } RtlZeroMemory(ept_poiner, PAGE_SIZE); // Allocate EPT_PML4 and initialize EptPointer const auto ept_pml4 = reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag( NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag)); if (!ept_pml4) { ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } RtlZeroMemory(ept_pml4, PAGE_SIZE); ept_poiner->fields.memory_type = static_cast<ULONG64>(EptpGetMemoryType(UtilPaFromVa(ept_pml4))); ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1; ept_poiner->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(ept_pml4)); // Initialize all EPT entries for all physical memory pages const auto pm_ranges = UtilGetPhysicalMemoryRanges(); for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs; ++run_index) { const auto run = &pm_ranges->run[run_index]; const auto base_addr = run->base_page * PAGE_SIZE; for (auto page_index = 0ull; page_index < run->page_count; ++page_index) { const auto indexed_addr = base_addr + page_index * PAGE_SIZE; const auto ept_pt_entry = EptpConstructTables(ept_pml4, 4, indexed_addr, nullptr); if (!ept_pt_entry) { EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } } } // Initialize an EPT entry for APIC_BASE. It is required to allocated it now // for some reasons, or else, system hangs. const Ia32ApicBaseMsr apic_msr = {UtilReadMsr64(Msr::kIa32ApicBase)}; if (!EptpConstructTables(ept_pml4, 4, apic_msr.fields.apic_base * PAGE_SIZE, nullptr)) { EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } // Allocate preallocated_entries const auto preallocated_entries_size = sizeof(EptCommonEntry *) * kEptpNumberOfPreallocatedEntries; const auto preallocated_entries = reinterpret_cast<EptCommonEntry **>( ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size, kHyperPlatformCommonPoolTag)); if (!preallocated_entries) { EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } RtlZeroMemory(preallocated_entries, preallocated_entries_size); // And fill preallocated_entries with newly created entries for (auto i = 0ul; i < kEptpNumberOfPreallocatedEntries; ++i) { const auto ept_entry = EptpAllocateEptEntry(nullptr); if (!ept_entry) { EptpFreeUnusedPreAllocatedEntries(preallocated_entries, 0); EptpDestructTables(ept_pml4, 4); ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag); ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag); return nullptr; } preallocated_entries[i] = ept_entry; } // Initialization completed ept_data->ept_pointer = ept_poiner; ept_data->ept_pml4 = ept_pml4; ept_data->preallocated_entries = preallocated_entries; ept_data->preallocated_entries_count = 0; return ept_data; }
NTSTATUS EptpBuildNestedEpt( ULONG_PTR vmcs12_va, EptData* ept_data12, EptData* ept_data02) { do { EptCommonEntry* Pml4Entry = NULL; EptPointer* Ept02Ptr = NULL; EptPointer* Ept12Ptr = NULL; ULONG64 _Ept12Ptr = vmcs12_va; if (!vmcs12_va || !ept_data12 || !ept_data02) { break; } Ept12Ptr = (EptPointer*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptp'); if (!Ept12Ptr) { break; } RtlZeroMemory(Ept12Ptr, PAGE_SIZE); Ept02Ptr = (EptPointer*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptp'); if (!Ept02Ptr) { ExFreePool(Ept12Ptr); break; } RtlZeroMemory(Ept02Ptr, PAGE_SIZE); Pml4Entry = (EptCommonEntry*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'pml4'); if (!Pml4Entry) { ExFreePool(Ept12Ptr); ExFreePool(Ept02Ptr); break; } RtlZeroMemory(Pml4Entry, PAGE_SIZE); Ept12Ptr->all = _Ept12Ptr; Pml4Entry->fields.read_access = false; Pml4Entry->fields.execute_access = false; Pml4Entry->fields.memory_type = 0; Pml4Entry->fields.write_access = false; Ept02Ptr->fields.memory_type = static_cast<ULONG>(memory_type::kWriteBack); Ept02Ptr->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(Pml4Entry)); Ept02Ptr->fields.page_walk_length = 4 - 1; Ept02Ptr->fields.enable_accessed_and_dirty_flags = false; ept_data02->ept_pointer = Ept02Ptr; ept_data02->ept_pml4 = Pml4Entry; ept_data12->ept_pointer = Ept12Ptr; ept_data12->ept_pml4 = (EptCommonEntry*)UtilVaFromPfn(Ept12Ptr->fields.pml4_address); //vmcs0-2 with ept0-2 } while (FALSE); return STATUS_SUCCESS; }