// A driver entry point _Use_decl_annotations_ NTSTATUS DriverEntry(PDRIVER_OBJECT driver_object, PUNICODE_STRING registry_path) { UNREFERENCED_PARAMETER(registry_path); PAGED_CODE(); static const wchar_t kLogFilePath[] = L"\\SystemRoot\\HyperPlatform.log"; static const auto kLogLevel = (IsReleaseBuild()) ? kLogPutLevelInfo | kLogOptDisableFunctionName : kLogPutLevelDebug | kLogOptDisableFunctionName; auto status = STATUS_UNSUCCESSFUL; driver_object->DriverUnload = DriverpDriverUnload; HYPERPLATFORM_COMMON_DBG_BREAK(); // Initialize log functions bool need_reinitialization = false; status = LogInitialization(kLogLevel, kLogFilePath); if (status == STATUS_REINITIALIZATION_NEEDED) { need_reinitialization = true; } else if (!NT_SUCCESS(status)) { return status; } // Test if the system is supported if (!DriverpIsSuppoetedOS()) { return STATUS_CANCELLED; } // Initialize perf functions status = PerfInitialization(); if (!NT_SUCCESS(status)) { LogTermination(); return status; } // Initialize utility functions status = UtilInitialization(); if (!NT_SUCCESS(status)) { PerfTermination(); LogTermination(); return status; } // Virtualize all processors status = VmInitialization(); if (!NT_SUCCESS(status)) { UtilTermination(); PerfTermination(); LogTermination(); return status; } // Register re-initialization for the log functions if needed if (need_reinitialization) { LogRegisterReinitialization(driver_object); } HYPERPLATFORM_LOG_INFO("The VMM has been installed."); return status; }
// Selects a register to be used based on the index _Use_decl_annotations_ static ULONG_PTR *VmmpSelectRegister( ULONG index, GuestContext *guest_context) { ULONG_PTR *register_used = nullptr; // clang-format off switch (index) { case 0: register_used = &guest_context->gp_regs->ax; break; case 1: register_used = &guest_context->gp_regs->cx; break; case 2: register_used = &guest_context->gp_regs->dx; break; case 3: register_used = &guest_context->gp_regs->bx; break; case 4: register_used = &guest_context->gp_regs->sp; break; case 5: register_used = &guest_context->gp_regs->bp; break; case 6: register_used = &guest_context->gp_regs->si; break; case 7: register_used = &guest_context->gp_regs->di; break; #if defined(_AMD64_) case 8: register_used = &guest_context->gp_regs->r8; break; case 9: register_used = &guest_context->gp_regs->r9; break; case 10: register_used = &guest_context->gp_regs->r10; break; case 11: register_used = &guest_context->gp_regs->r11; break; case 12: register_used = &guest_context->gp_regs->r12; break; case 13: register_used = &guest_context->gp_regs->r13; break; case 14: register_used = &guest_context->gp_regs->r14; break; case 15: register_used = &guest_context->gp_regs->r15; break; #endif default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } // clang-format on return register_used; }
// Executes vmlaunch /*_Use_decl_annotations_*/ static void VmpLaunchVM() { auto error_code = UtilVmRead(VmcsField::kVmInstructionError); if (error_code) { HYPERPLATFORM_LOG_WARN("VM_INSTRUCTION_ERROR = %d", error_code); } HYPERPLATFORM_COMMON_DBG_BREAK(); auto vmx_status = static_cast<VmxStatus>(__vmx_vmlaunch()); // Here is not be executed with successful vmlaunch. Instead, the context // jumps to an address specified by GUEST_RIP. if (vmx_status == VmxStatus::kErrorWithStatus) { error_code = UtilVmRead(VmcsField::kVmInstructionError); HYPERPLATFORM_LOG_ERROR("VM_INSTRUCTION_ERROR = %d", error_code); } HYPERPLATFORM_COMMON_DBG_BREAK(); }
// Initializes DdiMon _Use_decl_annotations_ EXTERN_C NTSTATUS DdimonInitialization(SharedShadowHookData* shared_sh_data) { HYPERPLATFORM_COMMON_DBG_BREAK(); // Get a base address of ntoskrnl auto nt_base = UtilPcToFileHeader(KdDebuggerEnabled); if (!nt_base) { return STATUS_UNSUCCESSFUL; } // Install hooks by enumerating exports of ntoskrnl, but not activate them yet auto status = DdimonpEnumExportedSymbols(reinterpret_cast<ULONG_PTR>(nt_base), DdimonpEnumExportedSymbolsCallback, shared_sh_data); if (!NT_SUCCESS(status)) { return status; } // Activate installed hooks status = ShEnableHooks(); if (!NT_SUCCESS(status)) { DdimonpFreeAllocatedTrampolineRegions(); return status; } HYPERPLATFORM_LOG_INFO("DdiMon has been initialized."); return status; }
// Power callback routine dealing with hibernate and sleep _Use_decl_annotations_ static void PowerCallbackpCallbackRoutine( PVOID callback_context, PVOID argument1, PVOID argument2) { UNREFERENCED_PARAMETER(callback_context); PAGED_CODE(); if (argument1 != reinterpret_cast<void*>(PO_CB_SYSTEM_STATE_LOCK)) { return; } HYPERPLATFORM_COMMON_DBG_BREAK(); if (argument2) { // the computer has just reentered S0. HYPERPLATFORM_LOG_INFO("Resuming the system..."); auto status = VmInitialization(); if (!NT_SUCCESS(status)) { HYPERPLATFORM_LOG_ERROR( "Failed to re-virtualize processors. Please unload the driver."); } } else { // the computer is about to exit system power state S0 HYPERPLATFORM_LOG_INFO("Suspending the system..."); VmTermination(); } }
// EXIT_REASON_EPT_MISCONFIG _Use_decl_annotations_ static void VmmpHandleEptMisconfig( GuestContext *guest_context) { UNREFERENCED_PARAMETER(guest_context); const auto fault_address = UtilVmRead(VmcsField::kGuestPhysicalAddress); UNREFERENCED_PARAMETER(fault_address); HYPERPLATFORM_COMMON_DBG_BREAK(); }
// Terminates DdiMon _Use_decl_annotations_ EXTERN_C void DdimonTermination() { PAGED_CODE(); HYPERPLATFORM_COMMON_DBG_BREAK(); ShDisableHooks(); UtilSleep(500); DdimonpFreeAllocatedTrampolineRegions(); HYPERPLATFORM_LOG_INFO("DdiMon has been terminated."); }
// VMCALL _Use_decl_annotations_ static void VmmpHandleVmCall( GuestContext *guest_context) { // VMCALL for Sushi expects that cx holds a command number, and dx holds an // address of a context parameter optionally const auto hypercall_number = guest_context->gp_regs->cx; const auto context = reinterpret_cast<void *>(guest_context->gp_regs->dx); if (hypercall_number == kHyperPlatformVmmBackdoorCode) { // Unloading requested HYPERPLATFORM_COMMON_DBG_BREAK(); // The processor sets ffff to limits of IDT and GDT when VM-exit occurred. // It is not correct value but fine to ignore since vmresume loads correct // values from VMCS. But here, we are going to skip vmresume and simply // return to where VMCALL is executed. It results in keeping those broken // values and ends up with bug check 109, so we should fix them manually. const auto gdt_limit = UtilVmRead(VmcsField::kGuestGdtrLimit); const auto gdt_base = UtilVmRead(VmcsField::kGuestGdtrBase); const auto idt_limit = UtilVmRead(VmcsField::kGuestIdtrLimit); const auto idt_base = UtilVmRead(VmcsField::kGuestIdtrBase); Gdtr gdtr = {static_cast<USHORT>(gdt_limit), gdt_base}; Idtr idtr = {static_cast<USHORT>(idt_limit), idt_base}; __lgdt(&gdtr); __lidt(&idtr); // Store an address of the management structure to the context parameter const auto result_ptr = reinterpret_cast<ProcessorData **>(context); *result_ptr = guest_context->stack->processor_data; HYPERPLATFORM_LOG_DEBUG_SAFE("Context at %p %p", context, guest_context->stack->processor_data); // Set rip to the next instruction of VMCALL const auto exit_instruction_length = UtilVmRead(VmcsField::kVmExitInstructionLen); const auto return_address = guest_context->ip + exit_instruction_length; // Since rflags is overwritten after VMXOFF, we should manually indicates // that VMCALL was successful by clearing those flags. guest_context->flag_reg.fields.cf = false; guest_context->flag_reg.fields.zf = false; // Set registers used after VMXOFF to recover the context. Volatile // registers must be used because those changes are reflected to the // guest's context after VMXOFF. guest_context->gp_regs->cx = return_address; guest_context->gp_regs->dx = guest_context->gp_regs->sp; guest_context->gp_regs->ax = guest_context->flag_reg.all; guest_context->vm_continue = false; UtilInveptAll(); } else { // Unsupported hypercall. Handle like other VMX instructions VmmpHandleVmx(guest_context); } }
// Disables page shadowing for all breakpoints _Use_decl_annotations_ void SbpVmCallDisablePageShadowing(EptData* ept_data, void* context) { HYPERPLATFORM_COMMON_DBG_BREAK(); const auto breakpoints = reinterpret_cast<std::vector<std::unique_ptr<PatchInformation>>*>( context); for (auto& info : *breakpoints) { SbppDisablePageShadowing(*info, ept_data); } }
// Unload handler _Use_decl_annotations_ static void DriverpDriverUnload( PDRIVER_OBJECT driver_object) { UNREFERENCED_PARAMETER(driver_object); PAGED_CODE(); HYPERPLATFORM_COMMON_DBG_BREAK(); VmTermination(); UtilTermination(); PerfTermination(); LogTermination(); }
// Enables page shadowing for all breakpoints _Use_decl_annotations_ NTSTATUS SbpVmCallEnablePageShadowing(EptData* ept_data, void* context) { HYPERPLATFORM_COMMON_DBG_BREAK(); auto breakpoints = reinterpret_cast<std::vector<std::unique_ptr<PatchInformation>>*>( context); for (auto& info : *breakpoints) { SbppEnablePageShadowingForExec(*info, ept_data); } return STATUS_SUCCESS; }
// Returns an EPT entry corresponds to the physical_address _Use_decl_annotations_ static EptCommonEntry *EptpGetEptPtEntry( EptCommonEntry *table, ULONG table_level, ULONG64 physical_address) { if (!table) { return nullptr; } switch (table_level) { case 4: { // table == PML4 const auto pxe_index = EptpAddressToPxeIndex(physical_address); const auto ept_pml4_entry = &table[pxe_index]; if (!ept_pml4_entry->all) { return nullptr; } return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn( ept_pml4_entry->fields.physial_address)), table_level - 1, physical_address); } case 3: { // table == PDPT const auto ppe_index = EptpAddressToPpeIndex(physical_address); const auto ept_pdpt_entry = &table[ppe_index]; if (!ept_pdpt_entry->all) { return nullptr; } return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn( ept_pdpt_entry->fields.physial_address)), table_level - 1, physical_address); } case 2: { // table == PDT const auto pde_index = EptpAddressToPdeIndex(physical_address); const auto ept_pdt_entry = &table[pde_index]; if (!ept_pdt_entry->all) { return nullptr; } return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn( ept_pdt_entry->fields.physial_address)), table_level - 1, physical_address); } case 1: { // table == PT const auto pte_index = EptpAddressToPteIndex(physical_address); const auto ept_pt_entry = &table[pte_index]; return ept_pt_entry; } default: HYPERPLATFORM_COMMON_DBG_BREAK(); return nullptr; } }
_Use_decl_annotations_ void GMonWaitForever(const AllRegisters *registers, ULONG_PTR stack_pointer) { UNREFERENCED_PARAMETER(registers); UNREFERENCED_PARAMETER(stack_pointer); HYPERPLATFORM_LOG_INFO_SAFE( "PatchGuard context has been detected and terminated."); HYPERPLATFORM_COMMON_DBG_BREAK(); #pragma warning(push) #pragma warning(disable : 28138) KeLowerIrql(PASSIVE_LEVEL); #pragma warning(push) // Wait until this thread ends == never returns for (auto status = STATUS_SUCCESS;;) { status = KeWaitForSingleObject(PsGetCurrentThread(), Executive, KernelMode, FALSE, nullptr); HYPERPLATFORM_LOG_WARN("Oops? (%08x)", status); UtilSleep(60000); } }
// Frees all used EPT entries by walking through whole EPT _Use_decl_annotations_ static void EptpDestructTables(EptCommonEntry *table, ULONG table_level) { for (auto i = 0ul; i < 512; ++i) { const auto entry = table[i]; if (entry.fields.physial_address) { const auto sub_table = reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(entry.fields.physial_address)); switch (table_level) { case 4: // table == PML4, sub_table == PDPT case 3: // table == PDPT, sub_table == PDT EptpDestructTables(sub_table, table_level - 1); break; case 2: // table == PDT, sub_table == PT ExFreePoolWithTag(sub_table, kHyperPlatformCommonPoolTag); break; default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } } } ExFreePoolWithTag(table, kHyperPlatformCommonPoolTag); }
// Allocate and initialize all EPT entries associated with the physical_address _Use_decl_annotations_ EptCommonEntry *EptpConstructTablesEx( EptCommonEntry *table, ULONG table_level, ULONG64 physical_address, EptData *ept_data, EptCommonEntry* reserved) { switch (table_level) { case 4: { // table == PML4 (512 GB) const auto pxe_index = EptpAddressToPxeIndex(physical_address); const auto ept_pml4_entry = &table[pxe_index]; if (!ept_pml4_entry->all) { if (!reserved) { const auto ept_pdpt = EptpAllocateEptEntry(ept_data); if (!ept_pdpt) { return nullptr; } EptpInitTableEntry(ept_pml4_entry, table_level, UtilPaFromVa(ept_pdpt)); } else { const auto ept01_pml4_entry = &reserved[pxe_index]; ept_pml4_entry->all = ept01_pml4_entry->all; reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pml4_entry->fields.physial_address)); } } return EptpConstructTablesEx( reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(ept_pml4_entry->fields.physial_address)), table_level - 1, physical_address, ept_data, reserved); } case 3: { // table == PDPT (1 GB) const auto ppe_index = EptpAddressToPpeIndex(physical_address); const auto ept_pdpt_entry = &table[ppe_index]; if (!ept_pdpt_entry->all) { if (!reserved) { const auto ept_pdt = EptpAllocateEptEntry(ept_data); if (!ept_pdt) { return nullptr; } EptpInitTableEntry(ept_pdpt_entry, table_level, UtilPaFromVa(ept_pdt)); } else { const auto ept01_pdpt_entry = &reserved[ppe_index]; ept_pdpt_entry->all = ept01_pdpt_entry->all; reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pdpt_entry->fields.physial_address)); } } return EptpConstructTablesEx( reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(ept_pdpt_entry->fields.physial_address)), table_level - 1, physical_address, ept_data, reserved); } case 2: { // table == PDT (2 MB) const auto pde_index = EptpAddressToPdeIndex(physical_address); const auto ept_pdt_entry = &table[pde_index]; if (!ept_pdt_entry->all) { if (!reserved) { const auto ept_pt = EptpAllocateEptEntry(ept_data); if (!ept_pt) { return nullptr; } EptpInitTableEntry(ept_pdt_entry, table_level, UtilPaFromVa(ept_pt)); } else { const auto ept01_pdt_entry = &reserved[pde_index]; ept_pdt_entry->all = ept01_pdt_entry->all; reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pdt_entry->fields.physial_address)); } } return EptpConstructTablesEx( reinterpret_cast<EptCommonEntry *>( UtilVaFromPfn(ept_pdt_entry->fields.physial_address)), table_level - 1, physical_address, ept_data, reserved); } case 1: { // table == PT (4 KB) const auto pte_index = EptpAddressToPteIndex(physical_address); const auto ept_pt_entry = &table[pte_index]; // NT_ASSERT(!ept_pt_entry->all); if (!ept_pt_entry->all) { if (!reserved) { EptpInitTableEntry(ept_pt_entry, table_level, physical_address); } else { const auto ept01_pt_entry = &reserved[pte_index]; ept_pt_entry->all = ept01_pt_entry->all; } } return ept_pt_entry; } default: HYPERPLATFORM_COMMON_DBG_BREAK(); return nullptr; } }
// MOV to / from CRx _Use_decl_annotations_ static void VmmpHandleCrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const MovCrQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto register_used = VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context); switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) { case MovCrAccessType::kMoveToCr: { switch (exit_qualification.fields.control_register) { // CR0 <- Reg case 0: if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr0, *register_used); UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used); break; // CR3 <- Reg case 3: if (UtilIsX86Pae()) { UtilLoadPdptes(*register_used); } UtilVmWrite(VmcsField::kGuestCr3, *register_used); break; // CR4 <- Reg case 4: if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr4, *register_used); UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used); break; // CR8 <- Reg case 8: guest_context->cr8 = *register_used; break; default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); break; } } break; // Note that MOV from CRx should never cause VM-exit with the current // settings. This is just for case when you enable it. case MovCrAccessType::kMoveFromCr: { switch (exit_qualification.fields.control_register) { // Reg <- CR3 case 3: *register_used = UtilVmRead(VmcsField::kGuestCr3); break; // Reg <- CR8 case 8: *register_used = guest_context->cr8; break; default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0); break; } } break; // Unimplemented case MovCrAccessType::kClts: case MovCrAccessType::kLmsw: default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } VmmpAdjustGuestInstructionPointer(guest_context->ip); }
// MOV to / from CRx _Use_decl_annotations_ static void VmmpHandleCrAccess( GuestContext *guest_context) { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); const MovCrQualification exit_qualification = { UtilVmRead(VmcsField::kExitQualification)}; const auto register_used = VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context); switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) { case MovCrAccessType::kMoveToCr: switch (exit_qualification.fields.control_register) { // CR0 <- Reg case 0: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr0, *register_used); UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used); break; } // CR3 <- Reg case 3: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(*register_used); } UtilVmWrite(VmcsField::kGuestCr3, *register_used); break; } // CR4 <- Reg case 4: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); if (UtilIsX86Pae()) { UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3)); } UtilVmWrite(VmcsField::kGuestCr4, *register_used); UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used); break; } // CR8 <- Reg case 8: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); guest_context->cr8 = *register_used; break; } default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); break; } break; case MovCrAccessType::kMoveFromCr: switch (exit_qualification.fields.control_register) { // Reg <- CR3 case 3: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); *register_used = UtilVmRead(VmcsField::kGuestCr3); break; } // Reg <- CR8 case 8: { HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE(); *register_used = guest_context->cr8; break; } default: HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0, 0); break; } break; // Unimplemented case MovCrAccessType::kClts: case MovCrAccessType::kLmsw: default: HYPERPLATFORM_COMMON_DBG_BREAK(); break; } VmmpAdjustGuestInstructionPointer(guest_context->ip); }