static status_t linux_filemode_32bit_init(vmi_instance_t vmi, uint32_t swapper_pg_dir, uint32_t boundary, uint32_t pa, uint32_t va) { vmi->page_mode = VMI_PM_LEGACY; if (VMI_SUCCESS == arch_init(vmi)) { if (pa == vmi_pagetable_lookup(vmi, swapper_pg_dir - boundary, va)) { vmi->kpgd = swapper_pg_dir - boundary; return VMI_SUCCESS; } } vmi->page_mode = VMI_PM_PAE; if (VMI_SUCCESS == arch_init(vmi)) { if (pa == vmi_pagetable_lookup(vmi, swapper_pg_dir - boundary, va)) { vmi->kpgd = swapper_pg_dir - boundary; return VMI_SUCCESS; } } vmi->page_mode = VMI_PM_AARCH32; if (VMI_SUCCESS == arch_init(vmi)) { if (pa == vmi_pagetable_lookup(vmi, swapper_pg_dir - boundary, va)) { vmi->kpgd = swapper_pg_dir - boundary; return VMI_SUCCESS; } } return VMI_FAILURE; }
void ss_callback(vmi_instance_t vmi, vmi_event_t *event) { reg_t rip, cr3; vmi_get_vcpureg(vmi, &rip, RIP, event->vcpu_id); vmi_get_vcpureg(vmi, &cr3, CR3, event->vcpu_id); page_mode_t pm = vmi_get_page_mode(vmi); vmi_pid_t pid = vmi_dtb_to_pid(vmi, cr3); printf("----- Singlestep: CR3 0x%lx PID %u executing RIP 0x%lx\n", cr3, pid, rip); if ((PM2BIT(pm) == BIT32 && rip < KERNEL32) || (PM2BIT(pm) == BIT64 && rip < KERNEL64)) { printf("Good RIP: 0x%lx\n", rip); struct injector *injector = event->data; injector->ss_enabled = 0; injector->target_pid = pid; injector->target_rip = vmi_pagetable_lookup(vmi, cr3, rip); hijack_thread(injector, vmi, event->vcpu_id, pid); vmi_clear_event(vmi, event); vmi_clear_event(vmi, &injector->cr3_event); injector->mm_count++; uint8_t trap = 0xCC; vmi_read_8_pa(vmi, injector->target_rip, &injector->backup); vmi_write_8_pa(vmi, injector->target_rip, &trap); } }
/* expose virtual to physical mapping for kernel space via api call */ status_t vmi_translate_kv2p (vmi_instance_t vmi, addr_t virt_address, addr_t *paddr) { if (!vmi->kpgd) { dbprint(VMI_DEBUG_PTLOOKUP, "--early bail on v2p lookup because the kernel page global directory is unknown\n"); return VMI_FAILURE; } return vmi_pagetable_lookup(vmi, vmi->kpgd, virt_address, paddr); }
static status_t find_page_mode( vmi_instance_t vmi) { status_t ret = VMI_FAILURE; windows_instance_t windows = vmi->os_data; dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_LEGACY\n"); vmi->page_mode = VMI_PM_LEGACY; if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, vmi->kpgd, windows->ntoskrnl_va)) { goto found_pm; } } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_PAE\n"); vmi->page_mode = VMI_PM_PAE; if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, vmi->kpgd, windows->ntoskrnl_va)) { goto found_pm; } } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_IA32E\n"); vmi->page_mode = VMI_PM_IA32E; if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, vmi->kpgd, windows->ntoskrnl_va)) { goto found_pm; } } goto done; found_pm: ret = VMI_SUCCESS; done: return ret; }
/* expose virtual to physical mapping for user space via api call */ addr_t vmi_translate_uv2p_nocache (vmi_instance_t vmi, addr_t virt_address, vmi_pid_t pid) { addr_t dtb = vmi_pid_to_dtb(vmi, pid); if (!dtb) { dbprint(VMI_DEBUG_PTLOOKUP, "--early bail on v2p lookup because dtb is zero\n"); return 0; } else { addr_t rtnval = vmi_pagetable_lookup(vmi, dtb, virt_address); if (!rtnval) { pid_cache_del(vmi, pid); } return rtnval; } }
/* expose virtual to physical mapping for kernel space via api call */ addr_t vmi_translate_kv2p (vmi_instance_t vmi, addr_t virt_address) { reg_t cr3 = 0; if (vmi->kpgd) { cr3 = vmi->kpgd; } else { driver_get_vcpureg(vmi, &cr3, CR3, 0); } if (!cr3) { dbprint(VMI_DEBUG_PTLOOKUP, "--early bail on v2p lookup because cr3 is zero\n"); return 0; } else { return vmi_pagetable_lookup(vmi, cr3, virt_address); } }
static status_t linux_filemode_init(vmi_instance_t vmi) { status_t rc; addr_t swapper_pg_dir = 0, init_level4_pgt = 0; addr_t boundary = 0, phys_start = 0, virt_start = 0; switch (vmi->page_mode) { case VMI_PM_IA32E: linux_system_map_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_64", NULL, &virt_start); break; case VMI_PM_AARCH32: case VMI_PM_LEGACY: case VMI_PM_PAE: linux_system_map_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_32", NULL, &virt_start); break; case VMI_PM_UNKNOWN: linux_system_map_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_64", NULL, &virt_start); if (phys_start && virt_start) break; phys_start = virt_start = 0; linux_system_map_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_32", NULL, &virt_start); break; } if(phys_start && virt_start && phys_start < virt_start) { boundary = virt_start - phys_start; dbprint(VMI_DEBUG_MISC, "--got kernel boundary (0x%.16"PRIx64").\n", boundary); } rc = linux_system_map_symbol_to_address(vmi, "swapper_pg_dir", NULL, &swapper_pg_dir); if (VMI_SUCCESS == rc) { dbprint(VMI_DEBUG_MISC, "--got vaddr for swapper_pg_dir (0x%.16"PRIx64").\n", swapper_pg_dir); /* We don't know if VMI_PM_LEGACY, VMI_PM_PAE or VMI_PM_AARCH32 yet * so we do some heuristics below. */ if (boundary) { rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, phys_start, virt_start); if (VMI_SUCCESS == rc) return rc; } /* * So we have a swapper but don't know the physical page of it. * We will make some educated guesses now. */ boundary = 0xC0000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } boundary = 0x80000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } boundary = 0x40000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } return VMI_FAILURE; } rc = linux_system_map_symbol_to_address(vmi, "init_level4_pgt", NULL, &init_level4_pgt); if (rc == VMI_SUCCESS) { dbprint(VMI_DEBUG_MISC, "--got vaddr for init_level4_pgt (0x%.16"PRIx64").\n", init_level4_pgt); if (boundary) { vmi->page_mode = VMI_PM_IA32E; if (VMI_SUCCESS == arch_init(vmi)) { if (phys_start == vmi_pagetable_lookup(vmi, init_level4_pgt - boundary, virt_start)) { vmi->kpgd = init_level4_pgt - boundary; return VMI_SUCCESS; } } } } return VMI_FAILURE; }
void cr3_callback(vmi_instance_t vmi, vmi_event_t *event) { //printf("CR3 changed to 0x%lx\n", event->reg_event.value); struct injector *injector = event->data; addr_t thread = 0, kpcrb_offset = 0, trapframe = 0; addr_t userspace_return_va = 0, userspace_return_pa = 0; reg_t fsgs = 0; if (event->reg_event.value == injector->target_cr3) { if (!injector->target_rip) { /* TODO: Trap frame approach only works on 64-bit */ if (PM2BIT(injector->pm) == BIT32) { if(!injector->mm_enabled) { vmi_pid_t pid = vmi_dtb_to_pid(vmi, event->reg_event.value); addr_t waitfor = sym2va(vmi, pid, "kernel32.dll", "WaitForMultipleObjects"); //printf("PID %u waitfor 0x%lx\n", pid, waitfor); if (!waitfor) { //injector->clone->interrupted = 1; printf("Target process doesn't have kernel32.dll mapped!\n"); return; } injector->mm_enabled=1; memset(&injector->mm_event, 0, sizeof(vmi_event_t)); injector->mm_event.type = VMI_EVENT_MEMORY; injector->mm_event.mem_event.physical_address = vmi_translate_uv2p(vmi, waitfor, pid); injector->mm_event.mem_event.npages = 1; injector->mm_event.mem_event.granularity=VMI_MEMEVENT_PAGE; injector->mm_event.mem_event.in_access = VMI_MEMACCESS_X; injector->mm_event.callback=mm_callback; injector->mm_event.data = injector; vmi_register_event(vmi, &injector->mm_event); } return; } /* Read return to userspace RIP out of the stack trap frame. */ if (PM2BIT(injector->pm) == BIT32) { vmi_get_vcpureg(vmi, &fsgs, FS_BASE, event->vcpu_id); kpcrb_offset = offsets[KPCR_PRCBDATA]; } else { vmi_get_vcpureg(vmi, &fsgs, GS_BASE, event->vcpu_id); kpcrb_offset = offsets[KPCR_PRCB]; } vmi_read_addr_va(vmi, fsgs + kpcrb_offset + offsets[KPRCB_CURRENTTHREAD], 0, &thread); vmi_read_addr_va(vmi, thread + offsets[KTHREAD_TRAPFRAME], 0, &trapframe); //printf("Trap frame @ 0x%lx\n", trapframe); if (PM2BIT(injector->pm) == BIT32) { vmi_read_addr_va(vmi, trapframe + offsets[KTRAP_FRAME_EIP], 0, &userspace_return_va); } else { vmi_read_addr_va(vmi, trapframe + offsets[KTRAP_FRAME_RIP], 0, &userspace_return_va); } injector->userspace_return = vmi_pagetable_lookup(vmi, event->reg_event.value, userspace_return_va); //printf("Userspace return @ VA 0x%lx -> PA 0x%lx\n", userspace_return_va, injector->userspace_return); uint8_t trap = 0xCC; vmi_read_8_pa(vmi, injector->userspace_return, &injector->userspace_return_backup); vmi_write_8_pa(vmi, injector->userspace_return, &trap); /*if (!injector->ss_enabled) { printf("My target process is executing, registering singlestep\n"); injector->ss_enabled = 1; memset(&injector->ss_event, 0, sizeof(vmi_event_t)); injector->ss_event.type = VMI_EVENT_SINGLESTEP; injector->ss_event.callback = ss_callback; injector->ss_event.data = injector; SET_VCPU_SINGLESTEP(injector->ss_event.ss_event, event->vcpu_id); vmi_register_event(vmi, &injector->ss_event); }*/ } } else { //printf("CR3 0x%lx is executing, not my process!\n", // event->reg_event.value); if (injector->mm_enabled) { injector->mm_enabled = 0; vmi_clear_event(vmi, &injector->mm_event); } if (injector->ss_enabled) { //printf("\tDisabling singlestep\n"); injector->ss_enabled = 0; vmi_clear_event(vmi, &injector->ss_event); } if (injector->userspace_return) { vmi_write_8_pa(vmi, injector->userspace_return, &injector->userspace_return_backup); injector->userspace_return_backup = 0; injector->userspace_return = 0; } } }
bool inject_trap_breakpoint(drakvuf_t drakvuf, drakvuf_trap_t *trap) { if(trap->breakpoint.lookup_type == LOOKUP_NONE) { return inject_trap_pa(drakvuf, trap, trap->breakpoint.addr); } if(trap->breakpoint.lookup_type == LOOKUP_PID || trap->breakpoint.lookup_type == LOOKUP_NAME) { if (trap->breakpoint.addr_type == ADDR_RVA && trap->breakpoint.module) { vmi_pid_t pid = ~0; const char *name = NULL; addr_t module_list = 0; if(trap->breakpoint.pid == 4 || !strcmp(trap->breakpoint.proc, "System")) { pid = 4; name = "System"; if(VMI_FAILURE == vmi_read_addr_ksym(drakvuf->vmi, "PsLoadedModuleList", &module_list)) return 0; } else { /* Process library */ addr_t process_base; if(trap->breakpoint.lookup_type == LOOKUP_PID) pid = trap->breakpoint.pid; if(trap->breakpoint.lookup_type == LOOKUP_NAME) name = trap->breakpoint.proc; if( !drakvuf_find_eprocess(drakvuf, pid, name, &process_base) ) return 0; if(pid == ~0 && VMI_FAILURE == vmi_read_32_va(drakvuf->vmi, process_base + offsets[EPROCESS_PID], 0, (uint32_t*)&pid)) return 0; if( !drakvuf_get_module_list(drakvuf, process_base, &module_list) ) return 0; } return inject_traps_modules(drakvuf, trap, module_list, pid); } if(trap->breakpoint.addr_type == ADDR_VA) { addr_t dtb = vmi_pid_to_dtb(drakvuf->vmi, trap->breakpoint.pid); if (!dtb) return 0; addr_t trap_pa = vmi_pagetable_lookup(drakvuf->vmi, dtb, trap->breakpoint.addr); if (!trap_pa) return 0; return inject_trap_pa(drakvuf, trap, trap_pa); } if(trap->breakpoint.addr_type == ADDR_PA) { fprintf(stderr, "DRAKVUF Trap misconfiguration: PID lookup specified for PA location\n"); } } return 0; }
status_t linux_init(vmi_instance_t vmi) { status_t ret = VMI_FAILURE; os_interface_t os_interface = NULL; if (vmi->config == NULL) { errprint("No config table found\n"); return VMI_FAILURE; } if (vmi->os_data != NULL) { errprint("os data already initialized, reinitializing\n"); free(vmi->os_data); } vmi->os_data = safe_malloc(sizeof(struct linux_instance)); bzero(vmi->os_data, sizeof(struct linux_instance)); linux_instance_t linux_instance = vmi->os_data; g_hash_table_foreach(vmi->config, (GHFunc)linux_read_config_ghashtable_entries, vmi); addr_t boundary = 0, phys_start = 0, virt_start = 0; if(vmi->page_mode == VMI_PM_IA32E) { linux_system_map_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_64", NULL, &virt_start); } else if (vmi->page_mode == VMI_PM_LEGACY || vmi->page_mode == VMI_PM_PAE) { linux_system_map_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_32", NULL, &virt_start); } else if (vmi->page_mode == VMI_PM_UNKNOWN) { ret = linux_system_map_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); if(VMI_SUCCESS == ret) { linux_system_map_symbol_to_address(vmi, "startup_64", NULL, &virt_start); vmi->page_mode = VMI_PM_IA32E; } else { linux_system_map_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_system_map_symbol_to_address(vmi, "startup_32", NULL, &virt_start); vmi->page_mode = VMI_PM_PAE; // it's just a guess } } if(phys_start && virt_start && phys_start < virt_start) { boundary = virt_start - phys_start; } else { // Just guess the boundary boundary = 0xc0000000UL; } linux_instance->kernel_boundary = boundary; dbprint(VMI_DEBUG_MISC, "--got kernel boundary (0x%.16"PRIx64").\n", boundary); if(VMI_FAILURE == driver_get_vcpureg(vmi, &vmi->kpgd, CR3, 0)) { if (VMI_SUCCESS == linux_system_map_symbol_to_address(vmi, "swapper_pg_dir", NULL, &vmi->kpgd)) { dbprint(VMI_DEBUG_MISC, "--got vaddr for swapper_pg_dir (0x%.16"PRIx64").\n", vmi->kpgd); //We don't know if VMI_PM_LEGACY or VMI_PM_PAE yet //so we do some heuristics below } else if (VMI_SUCCESS == linux_system_map_symbol_to_address(vmi, "init_level4_pgt", NULL, &vmi->kpgd)) { dbprint(VMI_DEBUG_MISC, "--got vaddr for init_level4_pgt (0x%.16"PRIx64").\n", vmi->kpgd); //Set page mode to VMI_PM_IA32E vmi->page_mode = VMI_PM_IA32E; } else { goto _exit; } vmi->kpgd -= boundary; } dbprint(VMI_DEBUG_MISC, "**set vmi->kpgd (0x%.16"PRIx64").\n", vmi->kpgd); // We check if the page mode is known // and if no arch interface has been setup yet we do it now if(VMI_PM_UNKNOWN == vmi->page_mode) { //Try to check 32-bit paging modes vmi->page_mode = VMI_PM_LEGACY; if(VMI_SUCCESS == arch_init(vmi)) { if(phys_start == vmi_pagetable_lookup(vmi, vmi->kpgd, virt_start)) { // PM found goto done; } } vmi->page_mode = VMI_PM_PAE; if(VMI_SUCCESS == arch_init(vmi)) { if(phys_start == vmi_pagetable_lookup(vmi, vmi->kpgd, virt_start)) { // PM found goto done; } } errprint("VMI_ERROR: Page mode is still unknown\n"); goto _exit; } if(!vmi->arch_interface) { if(VMI_FAILURE == arch_init(vmi)) { goto _exit; } } done: ret = linux_system_map_symbol_to_address(vmi, "init_task", NULL, &vmi->init_task); if (ret != VMI_SUCCESS) { errprint("Could not get init_task from System.map\n"); goto _exit; } if(!vmi_pagetable_lookup(vmi, vmi->kpgd, vmi->init_task)) { errprint("Failed to translate init_task VA using the kpgd!\n"); goto _exit; } os_interface = safe_malloc(sizeof(struct os_interface)); bzero(os_interface, sizeof(struct os_interface)); os_interface->os_get_offset = linux_get_offset; os_interface->os_pid_to_pgd = linux_pid_to_pgd; os_interface->os_pgd_to_pid = linux_pgd_to_pid; os_interface->os_ksym2v = linux_system_map_symbol_to_address; os_interface->os_usym2rva = NULL; os_interface->os_rva2sym = NULL; os_interface->os_read_unicode_struct = NULL; os_interface->os_teardown = linux_teardown; vmi->os_interface = os_interface; return VMI_SUCCESS; _exit: free(vmi->os_data); vmi->os_data = NULL; return VMI_FAILURE; }
/* Tries to determine the page mode based on the kpgd found via heuristics */ static status_t find_page_mode( vmi_instance_t vmi) { status_t ret = VMI_FAILURE; windows_instance_t windows = vmi->os_data; uint32_t mask = ~0; if (!windows) { errprint("Windows functions not initialized in %s\n", __FUNCTION__); return VMI_FAILURE; } if (!windows->ntoskrnl || !windows->ntoskrnl_va) { errprint("Windows kernel virtual and physical address required for determining page mode\n"); return VMI_FAILURE; } if (!vmi->kpgd) { errprint("Windows kernel directory table base not set, can't determine page mode\n"); return VMI_FAILURE; } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_LEGACY\n"); vmi->page_mode = VMI_PM_LEGACY; /* As the size of vmi->kpgd is 64-bit, we mask it to be 32-bit here */ if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, (vmi->kpgd & mask), windows->ntoskrnl_va)) { vmi->kpgd &= mask; goto found_pm; } } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_PAE\n"); vmi->page_mode = VMI_PM_PAE; /* As the size of vmi->kpgd is 64-bit, we mask it to be only 32-bit here */ if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, (vmi->kpgd & mask), windows->ntoskrnl_va)) { vmi->kpgd &= mask; goto found_pm; } } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_IA32E\n"); vmi->page_mode = VMI_PM_IA32E; if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, vmi->kpgd, windows->ntoskrnl_va)) { goto found_pm; } } goto done; found_pm: ret = VMI_SUCCESS; done: return ret; }
/////////////////////////////////////////////////////////// // Classic read functions for access to memory size_t vmi_read( vmi_instance_t vmi, access_context_t *ctx, void *buf, size_t count) { unsigned char *memory = NULL; addr_t start_addr = 0; addr_t paddr = 0; addr_t pfn = 0; addr_t offset = 0; addr_t dtb = 0; size_t buf_offset = 0; if (NULL == ctx) { dbprint(VMI_DEBUG_READ, "--%s: ctx passed as NULL, returning without read\n", __FUNCTION__); return 0; } if (NULL == buf) { dbprint(VMI_DEBUG_READ, "--%s: buf passed as NULL, returning without read\n", __FUNCTION__); return 0; } switch (ctx->translate_mechanism) { case VMI_TM_NONE: start_addr = ctx->addr; break; case VMI_TM_KERNEL_SYMBOL: if (!vmi->arch_interface || !vmi->os_interface) { return 0; } dtb = vmi->kpgd; start_addr = vmi_translate_ksym2v(vmi, ctx->ksym); break; case VMI_TM_PROCESS_PID: if (!vmi->arch_interface || !vmi->os_interface) { return 0; } if(ctx->pid) { dtb = vmi_pid_to_dtb(vmi, ctx->pid); } else { dtb = vmi->kpgd; } start_addr = ctx->addr; break; case VMI_TM_PROCESS_DTB: if (!vmi->arch_interface) { return 0; } dtb = ctx->dtb; start_addr = ctx->addr; break; default: errprint("%s error: translation mechanism is not defined.\n", __FUNCTION__); return 0; } while (count > 0) { size_t read_len = 0; if(dtb) { paddr = vmi_pagetable_lookup(vmi, dtb, start_addr + buf_offset); } else { paddr = start_addr + buf_offset; } if (!paddr) { return buf_offset; } /* access the memory */ pfn = paddr >> vmi->page_shift; offset = (vmi->page_size - 1) & paddr; memory = vmi_read_page(vmi, pfn); if (NULL == memory) { return buf_offset; } /* determine how much we can read */ if ((offset + count) > vmi->page_size) { read_len = vmi->page_size - offset; } else { read_len = count; } /* do the read */ memcpy(((char *) buf) + (addr_t) buf_offset, memory + (addr_t) offset, read_len); /* set variables for next loop */ count -= read_len; buf_offset += read_len; } return buf_offset; }
status_t find_kdbg_address_instant( vmi_instance_t vmi, addr_t *kdbg_pa, addr_t *kernel_pa, addr_t *kernel_va) { dbprint(VMI_DEBUG_MISC, "**Trying find_kdbg_address_instant\n"); status_t ret = VMI_FAILURE; windows_instance_t windows = NULL; if (vmi->os_data == NULL) { goto done; } windows = vmi->os_data; // If the kernel base is unknown this approach requires the // location of the KPCR which we get from the GS/FS register, // available only on live machines. if (VMI_FILE == vmi->mode) { goto done; } // We also need the config settings for the RVAs if (!windows->kdbg_offset || !windows->kpcr_offset) { goto done; } reg_t cr3, fsgs; if (VMI_FAILURE == driver_get_vcpureg(vmi, &cr3, CR3, 0)) { goto done; } if (VMI_PM_IA32E == vmi->page_mode) { if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, GS_BASE, 0)) goto done; } else { if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, FS_BASE, 0)) goto done; } addr_t kernelbase_va = fsgs - windows->kpcr_offset; addr_t kernelbase_pa = 0; if ( VMI_FAILURE == vmi_pagetable_lookup(vmi, cr3, kernelbase_va, &kernelbase_pa) ) goto done; if ( !kernelbase_pa ) goto done; *kernel_pa = kernelbase_pa; *kernel_va = kernelbase_va; *kdbg_pa = kernelbase_pa + windows->kdbg_offset; ret = VMI_SUCCESS; done: return ret; }
status_t find_kdbg_address_faster( vmi_instance_t vmi, addr_t *kdbg_pa, addr_t *kernel_pa, addr_t *kernel_va) { dbprint(VMI_DEBUG_MISC, "**Trying find_kdbg_address_faster\n"); status_t ret = VMI_FAILURE; // This scan requires the location of the KPCR // which we get from the GS/FS register on live machines. // For file mode this needs to be further investigated. if (VMI_FILE == vmi->mode) { return ret; } void *bm = boyer_moore_init((unsigned char *)"KDBG", 4); int find_ofs = 0x10; reg_t cr3 = 0, fsgs = 0; if (VMI_FAILURE == driver_get_vcpureg(vmi, &cr3, CR3, 0)) { goto done; } switch ( vmi->page_mode ) { case VMI_PM_IA32E: if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, GS_BASE, 0)) goto done; break; case VMI_PM_LEGACY: /* Fall-through */ case VMI_PM_PAE: if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, FS_BASE, 0)) goto done; break; default: goto done; }; // We start the search from the KPCR, which has to be mapped into the kernel. // We further know that the Windows kernel is page aligned // so we are just checking if the page has a valid PE header // and if the first item in the export table is "ntoskrnl.exe". // Once the kernel is found, we find the .data section // and limit the string search for "KDBG" into that region. // start searching at the lower part from the kpcr // then switch to the upper part if needed int step = -VMI_PS_4KB; addr_t page_paddr; access_context_t ctx = { .translate_mechanism = VMI_TM_NONE, }; scan: if ( VMI_FAILURE == vmi_pagetable_lookup(vmi, cr3, fsgs, &page_paddr) ) goto done; page_paddr &= ~VMI_BIT_MASK(0,11); for (; page_paddr + step < vmi->max_physical_address; page_paddr += step) { uint8_t page[VMI_PS_4KB]; ctx.addr = page_paddr; status_t rc = peparse_get_image(vmi, &ctx, VMI_PS_4KB, page); if (VMI_FAILURE == rc) { continue; } struct pe_header *pe_header = NULL; struct dos_header *dos_header = NULL; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(page, &dos_header, &pe_header, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); if (!export_header_offset || page_paddr + export_header_offset >= vmi->max_physical_address) continue; if ( VMI_SUCCESS == vmi_read_pa(vmi, page_paddr + export_header_offset, sizeof(struct export_table), &et, NULL)) { if ( !(et.export_flags || !et.name) && page_paddr + et.name + 12 >= vmi->max_physical_address) continue; unsigned char name[13] = {0}; if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr + et.name, 12, name, NULL) ) continue; if (strcmp("ntoskrnl.exe", (const char *)name)) { continue; } } else { continue; } uint32_t c; for (c=0; c < pe_header->number_of_sections; c++) { struct section_header section; addr_t section_addr = page_paddr + dos_header->offset_to_pe + sizeof(struct pe_header) + pe_header->size_of_optional_header + c*sizeof(struct section_header); // Read the section header from memory if ( VMI_FAILURE == vmi_read_pa(vmi, section_addr, sizeof(struct section_header), (uint8_t *)§ion, NULL) ) continue; // .data check if (memcmp(section.short_name, "\x2E\x64\x61\x74\x61", 5) != 0) { continue; } uint8_t *haystack = alloca(section.size_of_raw_data); if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr + section.virtual_address, section.size_of_raw_data, haystack, NULL) ) continue; int match_offset = boyer_moore2(bm, haystack, section.size_of_raw_data); if (-1 != match_offset) { // We found the structure, but let's verify it. // The kernel is always mapped into VA at the same offset // it is found on physical memory + the kernel boundary. // Read "KernBase" from the haystack uint64_t *kernbase = (uint64_t *)&haystack[(unsigned int) match_offset + sizeof(uint64_t)]; int zeroes = __builtin_clzll(page_paddr); if ((*kernbase) << zeroes == page_paddr << zeroes) { *kernel_pa = page_paddr; *kernel_va = *kernbase; *kdbg_pa = page_paddr + section.virtual_address + (unsigned int) match_offset - find_ofs; ret = VMI_SUCCESS; dbprint(VMI_DEBUG_MISC, "--Found KdDebuggerDataBlock at PA %.16"PRIx64"\n", *kdbg_pa); goto done; } else { dbprint(VMI_DEBUG_MISC, "--WARNING: KernBase in KdDebuggerDataBlock at PA %.16"PRIx64" doesn't point back to this page.\n", page_paddr + section.virtual_address + (unsigned int) match_offset - find_ofs); } } break; } } if (step<0) { step = VMI_PS_4KB; goto scan; } done: boyer_moore_fini(bm); return ret; }
status_t find_kdbg_address_fast( vmi_instance_t vmi, addr_t *kdbg_pa, addr_t *kernel_pa, addr_t *kernel_va) { dbprint(VMI_DEBUG_MISC, "**Trying find_kdbg_address_fast\n"); status_t ret = VMI_FAILURE; reg_t cr3; if (VMI_FAILURE == driver_get_vcpureg(vmi, &cr3, CR3, 0)) { return ret; } addr_t memsize = vmi_get_max_physical_address(vmi); GSList *va_pages = vmi_get_va_pages(vmi, (addr_t)cr3); void *bm = 0; // boyer-moore internal state unsigned char haystack[VMI_PS_4KB]; int find_ofs = 0; if (VMI_PM_IA32E == vmi->page_mode) { bm = boyer_moore_init((unsigned char *)"\x00\xf8\xff\xffKDBG", 8); find_ofs = 0xc; } else { bm = boyer_moore_init((unsigned char *)"\x00\x00\x00\x00\x00\x00\x00\x00KDBG", 12); find_ofs = 0x8; } // if-else GSList *va_pages_loop = va_pages; while (va_pages_loop) { page_info_t *vap = (page_info_t *)va_pages_loop->data; // We might get pages that are greater than 4Kb // so we are just going to split them to 4Kb pages while (vap && vap->size >= VMI_PS_4KB) { vap->size -= VMI_PS_4KB; addr_t page_paddr = vap->paddr+vap->size; if (page_paddr + VMI_PS_4KB - 1 > memsize) { continue; } if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr, VMI_PS_4KB, haystack, NULL) ) continue; int match_offset = boyer_moore2(bm, haystack, VMI_PS_4KB); if (-1 != match_offset) { addr_t tmp_kva = 0, tmp_kpa = 0; addr_t tmp_kdbg = page_paddr + (unsigned int) match_offset - find_ofs; if (VMI_FAILURE == vmi_read_64_pa(vmi, tmp_kdbg + sizeof(DBGKD_DEBUG_DATA_HEADER64), &tmp_kva)) { continue; } if ( VMI_FAILURE == vmi_pagetable_lookup(vmi, cr3, tmp_kva, &tmp_kpa) ) continue; *kdbg_pa = tmp_kdbg; *kernel_va = tmp_kva; *kernel_pa = tmp_kpa; ret = VMI_SUCCESS; goto done; } } g_free(vap); va_pages_loop = va_pages_loop->next; } done: // free the rest of the list while (va_pages_loop) { g_free(va_pages_loop->data); va_pages_loop = va_pages_loop->next; } g_slist_free(va_pages); if (VMI_SUCCESS == ret) dbprint(VMI_DEBUG_MISC, "--Found KdDebuggerDataBlock at PA %.16"PRIx64"\n", *kdbg_pa); boyer_moore_fini(bm); return ret; }