/* convert a symbol into an address */ status_t vmi_translate_sym2v (vmi_instance_t vmi, const access_context_t *ctx, const char *symbol, addr_t *vaddr) { status_t status; addr_t rva = 0; addr_t address = 0; addr_t dtb = 0; switch(ctx->translate_mechanism) { case VMI_TM_PROCESS_PID: if ( VMI_FAILURE == vmi_pid_to_dtb(vmi, ctx->pid, &dtb) ) return VMI_FAILURE; break; case VMI_TM_PROCESS_DTB: dtb = ctx->dtb; break; default: dbprint(VMI_DEBUG_MISC, "sym2v only supported in a virtual context!\n"); return VMI_FAILURE; }; status = sym_cache_get(vmi, ctx->addr, dtb, symbol, &address); if( VMI_FAILURE == status) { if (vmi->os_interface && vmi->os_interface->os_usym2rva) { status = vmi->os_interface->os_usym2rva(vmi, ctx, symbol, &rva); if ( VMI_SUCCESS == status ) { address = canonical_addr(ctx->addr + rva); sym_cache_set(vmi, ctx->addr, dtb, symbol, address); } } } *vaddr = address; return status; }
/* convert a kernel symbol into an address */ addr_t vmi_translate_ksym2v (vmi_instance_t vmi, const char *symbol) { status_t status = VMI_FAILURE; addr_t address = 0; if (VMI_FAILURE == sym_cache_get(vmi, 0, 0, symbol, &address)) { if (vmi->os_interface && vmi->os_interface->os_ksym2v) { addr_t _base_vaddr; status = vmi->os_interface->os_ksym2v(vmi, symbol, &_base_vaddr, &address); if (status == VMI_SUCCESS) { address = canonical_addr(address); sym_cache_set(vmi, 0, 0, symbol, address); } } } return address; }
static status_t linux_filemode_init(vmi_instance_t vmi) { status_t rc; addr_t swapper_pg_dir = 0, init_level4_pgt = 0; addr_t boundary = 0, phys_start = 0, virt_start = 0; switch (vmi->page_mode) { case VMI_PM_AARCH64: case VMI_PM_IA32E: linux_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_64", NULL, &virt_start); break; case VMI_PM_AARCH32: case VMI_PM_LEGACY: case VMI_PM_PAE: linux_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_32", NULL, &virt_start); break; case VMI_PM_UNKNOWN: linux_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_64", NULL, &virt_start); if (phys_start && virt_start) break; phys_start = virt_start = 0; linux_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_32", NULL, &virt_start); break; } virt_start = canonical_addr(virt_start); if(phys_start && virt_start && phys_start < virt_start) { boundary = virt_start - phys_start; dbprint(VMI_DEBUG_MISC, "--got kernel boundary (0x%.16"PRIx64").\n", boundary); } rc = linux_symbol_to_address(vmi, "swapper_pg_dir", NULL, &swapper_pg_dir); if (VMI_SUCCESS == rc) { dbprint(VMI_DEBUG_MISC, "--got vaddr for swapper_pg_dir (0x%.16"PRIx64").\n", swapper_pg_dir); swapper_pg_dir = canonical_addr(swapper_pg_dir); /* We don't know if VMI_PM_LEGACY, VMI_PM_PAE or VMI_PM_AARCH32 yet * so we do some heuristics below. */ if (boundary) { rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, phys_start, virt_start); if (VMI_SUCCESS == rc) return rc; } /* * So we have a swapper but don't know the physical page of it. * We will make some educated guesses now. */ boundary = 0xC0000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } boundary = 0x80000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } boundary = 0x40000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } return VMI_FAILURE; } rc = linux_symbol_to_address(vmi, "init_level4_pgt", NULL, &init_level4_pgt); if (rc == VMI_SUCCESS) { dbprint(VMI_DEBUG_MISC, "--got vaddr for init_level4_pgt (0x%.16"PRIx64").\n", init_level4_pgt); init_level4_pgt = canonical_addr(init_level4_pgt); if (boundary) { vmi->page_mode = VMI_PM_IA32E; if (VMI_SUCCESS == arch_init(vmi)) { if (phys_start == vmi_pagetable_lookup(vmi, init_level4_pgt - boundary, virt_start)) { vmi->kpgd = init_level4_pgt - boundary; return VMI_SUCCESS; } } } } return VMI_FAILURE; }
static status_t init_task_kaslr_test(vmi_instance_t vmi, addr_t page_vaddr) { status_t ret = VMI_FAILURE; uint32_t pid; addr_t init_task = page_vaddr + (vmi->init_task & VMI_BIT_MASK(0,11)); linux_instance_t linux_instance = vmi->os_data; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = vmi->kpgd }; ctx.addr = init_task + linux_instance->pid_offset; if ( VMI_FAILURE == vmi_read_32(vmi, &ctx, &pid) ) return ret; if ( pid ) return ret; ctx.addr = init_task + linux_instance->name_offset; char* init_task_name = vmi_read_str(vmi, &ctx); if ( init_task_name && !strncmp("swapper", init_task_name, 7) ) ret = VMI_SUCCESS; free(init_task_name); return ret; } status_t init_kaslr(vmi_instance_t vmi) { /* * Let's check if we can translate init_task first as is. */ uint32_t test; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = vmi->kpgd, .addr = vmi->init_task }; if ( VMI_SUCCESS == vmi_read_32(vmi, &ctx, &test) ) return VMI_SUCCESS; status_t ret = VMI_FAILURE; linux_instance_t linux_instance = vmi->os_data; GSList *loop, *pages = vmi_get_va_pages(vmi, vmi->kpgd); loop = pages; while (loop) { page_info_t *info = loop->data; if ( !linux_instance->kaslr_offset ) { switch(vmi->page_mode) { case VMI_PM_AARCH64: case VMI_PM_IA32E: if ( VMI_GET_BIT(info->vaddr, 47) ) ret = init_task_kaslr_test(vmi, info->vaddr); break; default: ret = init_task_kaslr_test(vmi, info->vaddr); break; }; if ( VMI_SUCCESS == ret ) { linux_instance->kaslr_offset = info->vaddr - (vmi->init_task & ~VMI_BIT_MASK(0,11)); vmi->init_task += linux_instance->kaslr_offset; dbprint(VMI_DEBUG_MISC, "**calculated KASLR offset: 0x%"PRIx64"\n", linux_instance->kaslr_offset); } } g_free(info); loop = loop->next; } g_slist_free(pages); return ret; } status_t linux_init(vmi_instance_t vmi) { status_t rc; os_interface_t os_interface = NULL; if (vmi->config == NULL) { errprint("No config table found\n"); return VMI_FAILURE; } if (vmi->os_data != NULL) { errprint("os data already initialized, reinitializing\n"); free(vmi->os_data); } vmi->os_data = safe_malloc(sizeof(struct linux_instance)); bzero(vmi->os_data, sizeof(struct linux_instance)); linux_instance_t linux_instance = vmi->os_data; g_hash_table_foreach(vmi->config, (GHFunc)linux_read_config_ghashtable_entries, vmi); if(linux_instance->rekall_profile) rc = init_from_rekall_profile(vmi); else rc = linux_symbol_to_address(vmi, "init_task", NULL, &vmi->init_task); if (VMI_FAILURE == rc) { errprint("Could not get init_task from Rekall profile or System.map\n"); goto _exit; } vmi->init_task = canonical_addr(vmi->init_task); #if defined(ARM32) || defined(ARM64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, TTBR1, 0); #elif defined(I386) || defined(X86_64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, CR3, 0); #endif /* * The driver failed to get us a pagetable. * As a fall-back, try to init using heuristics. * This path is taken in FILE mode as well. */ if (VMI_FAILURE == rc) if (VMI_FAILURE == linux_filemode_init(vmi)) goto _exit; if ( VMI_FAILURE == init_kaslr(vmi) ) { dbprint(VMI_DEBUG_MISC, "**failed to determine KASLR offset\n"); goto _exit; } dbprint(VMI_DEBUG_MISC, "**set vmi->kpgd (0x%.16"PRIx64").\n", vmi->kpgd); os_interface = safe_malloc(sizeof(struct os_interface)); bzero(os_interface, sizeof(struct os_interface)); os_interface->os_get_offset = linux_get_offset; os_interface->os_pid_to_pgd = linux_pid_to_pgd; os_interface->os_pgd_to_pid = linux_pgd_to_pid; os_interface->os_ksym2v = linux_symbol_to_address; os_interface->os_usym2rva = NULL; os_interface->os_v2sym = linux_system_map_address_to_symbol; os_interface->os_read_unicode_struct = NULL; os_interface->os_teardown = linux_teardown; vmi->os_interface = os_interface; return VMI_SUCCESS; _exit: free(vmi->os_data); vmi->os_data = NULL; return VMI_FAILURE; }
status_t linux_init(vmi_instance_t vmi) { status_t rc; os_interface_t os_interface = NULL; if (vmi->config == NULL) { errprint("No config table found\n"); return VMI_FAILURE; } if (vmi->os_data != NULL) { errprint("os data already initialized, reinitializing\n"); free(vmi->os_data); } vmi->os_data = safe_malloc(sizeof(struct linux_instance)); bzero(vmi->os_data, sizeof(struct linux_instance)); linux_instance_t linux_instance = vmi->os_data; g_hash_table_foreach(vmi->config, (GHFunc)linux_read_config_ghashtable_entries, vmi); if(linux_instance->rekall_profile) rc = init_from_rekall_profile(vmi); else rc = linux_symbol_to_address(vmi, "init_task", NULL, &vmi->init_task); if (VMI_FAILURE == rc) { errprint("Could not get init_task from Rekall profile or System.map\n"); goto _exit; } vmi->init_task = canonical_addr(vmi->init_task); #if defined(ARM) rc = driver_get_vcpureg(vmi, &vmi->kpgd, TTBR1, 0); #elif defined(I386) || defined(X86_64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, CR3, 0); #endif /* * The driver failed to get us a pagetable. * As a fall-back, try to init using heuristics. * This path is taken in FILE mode as well. */ if (VMI_FAILURE == rc) if (VMI_FAILURE == linux_filemode_init(vmi)) goto _exit; dbprint(VMI_DEBUG_MISC, "**set vmi->kpgd (0x%.16"PRIx64").\n", vmi->kpgd); done: os_interface = safe_malloc(sizeof(struct os_interface)); bzero(os_interface, sizeof(struct os_interface)); os_interface->os_get_offset = linux_get_offset; os_interface->os_pid_to_pgd = linux_pid_to_pgd; os_interface->os_pgd_to_pid = linux_pgd_to_pid; os_interface->os_ksym2v = linux_symbol_to_address; os_interface->os_usym2rva = NULL; os_interface->os_v2sym = linux_system_map_address_to_symbol; os_interface->os_read_unicode_struct = NULL; os_interface->os_teardown = linux_teardown; vmi->os_interface = os_interface; return VMI_SUCCESS; _exit: free(vmi->os_data); vmi->os_data = NULL; return VMI_FAILURE; }
GSList* get_va_pages_ia32e(vmi_instance_t vmi, addr_t dtb) { GSList *ret = NULL; uint8_t entry_size = 0x8; #define IA32E_ENTRIES_PER_PAGE 0x200 // 0x1000/0x8 uint64_t *pml4_page = g_malloc(VMI_PS_4KB); uint64_t *pdpt_page = g_malloc0(VMI_PS_4KB); uint64_t *pgd_page = g_malloc0(VMI_PS_4KB); uint64_t *pt_page = g_malloc0(VMI_PS_4KB); if ( !pml4_page || !pdpt_page || !pgd_page || !pt_page ) goto done; addr_t pml4e_location = dtb & VMI_BIT_MASK(12,51); if (VMI_FAILURE == vmi_read_pa(vmi, pml4e_location, VMI_PS_4KB, pml4_page, NULL)) goto done; uint64_t pml4e_index; for (pml4e_index = 0; pml4e_index < IA32E_ENTRIES_PER_PAGE; pml4e_index++, pml4e_location += entry_size) { uint64_t pml4e_value = pml4_page[pml4e_index]; if (!ENTRY_PRESENT(vmi->x86.transition_pages, pml4e_value)) { continue; } uint64_t pdpte_location = pml4e_value & VMI_BIT_MASK(12,51); if (VMI_FAILURE == vmi_read_pa(vmi, pdpte_location, VMI_PS_4KB, pdpt_page, NULL)) goto done; uint64_t pdpte_index; for (pdpte_index = 0; pdpte_index < IA32E_ENTRIES_PER_PAGE; pdpte_index++, pdpte_location++) { uint64_t pdpte_value = pdpt_page[pdpte_index]; if (!ENTRY_PRESENT(vmi->x86.transition_pages, pdpte_value)) { continue; } if (PAGE_SIZE(pdpte_value)) { page_info_t *info = g_malloc0(sizeof(page_info_t)); if ( !info ) goto done; info->vaddr = canonical_addr((pml4e_index << 39) | (pdpte_index << 30)); info->dtb = dtb; info->paddr = get_gigpage_ia32e(info->vaddr, pdpte_value); info->size = VMI_PS_1GB; info->x86_ia32e.pml4e_location = pml4e_location; info->x86_ia32e.pml4e_value = pml4e_value; info->x86_ia32e.pdpte_location = pdpte_location; info->x86_ia32e.pdpte_value = pdpte_value; ret = g_slist_prepend(ret, info); continue; } uint64_t pgd_location = pdpte_value & VMI_BIT_MASK(12,51); if (VMI_FAILURE == vmi_read_pa(vmi, pgd_location, VMI_PS_4KB, pgd_page, NULL)) goto done; uint64_t pgde_index; for (pgde_index = 0; pgde_index < IA32E_ENTRIES_PER_PAGE; pgde_index++, pgd_location += entry_size) { uint64_t pgd_value = pgd_page[pgde_index]; if (ENTRY_PRESENT(vmi->os_type, pgd_value)) { if (PAGE_SIZE(pgd_value)) { page_info_t *info = g_malloc0(sizeof(page_info_t)); if ( !info ) goto done; info->vaddr = canonical_addr((pml4e_index << 39) | (pdpte_index << 30) | (pgde_index << 21)); info->dtb = dtb; info->paddr = get_2megpage_ia32e(info->vaddr, pgd_value); info->size = VMI_PS_2MB; info->x86_ia32e.pml4e_location = pml4e_location; info->x86_ia32e.pml4e_value = pml4e_value; info->x86_ia32e.pdpte_location = pdpte_location; info->x86_ia32e.pdpte_value = pdpte_value; info->x86_ia32e.pgd_location = pgd_location; info->x86_ia32e.pgd_value = pgd_value; ret = g_slist_prepend(ret, info); continue; } uint64_t pte_location = (pgd_value & VMI_BIT_MASK(12,51)); if (VMI_FAILURE == vmi_read_pa(vmi, pte_location, VMI_PS_4KB, pt_page, NULL)) goto done; uint64_t pte_index; for (pte_index = 0; pte_index < IA32E_ENTRIES_PER_PAGE; pte_index++, pte_location += entry_size) { uint64_t pte_value = pt_page[pte_index]; if (ENTRY_PRESENT(vmi->os_type, pte_value)) { page_info_t *info = g_malloc0(sizeof(page_info_t)); if ( !info ) goto done; info->vaddr = canonical_addr((pml4e_index << 39) | (pdpte_index << 30) | (pgde_index << 21) | (pte_index << 12)); info->dtb = dtb; info->paddr = get_paddr_ia32e(info->vaddr, pte_value); info->size = VMI_PS_4KB; info->x86_ia32e.pml4e_location = pml4e_location; info->x86_ia32e.pml4e_value = pml4e_value; info->x86_ia32e.pdpte_location = pdpte_location; info->x86_ia32e.pdpte_value = pdpte_value; info->x86_ia32e.pgd_location = pgd_location; info->x86_ia32e.pgd_value = pgd_value; info->x86_ia32e.pte_location = pte_location; info->x86_ia32e.pte_value = pte_value; ret = g_slist_prepend(ret, info); continue; } } } } } } done: g_free(pt_page); g_free(pgd_page); g_free(pdpt_page); g_free(pml4_page); return ret; }