// 2nd Level Page Table Descriptor (Fine Pages) static inline void get_fine_second_level_descriptor(vmi_instance_t vmi, uint32_t vaddr, page_info_t *info) { info->arm_aarch32.sld_location = (info->arm_aarch32.fld_value & VMI_BIT_MASK(12,31)) | (fine_second_level_table_index(vaddr) << 2); uint32_t sld_v; if (VMI_SUCCESS == vmi_read_32_pa(vmi, info->arm_aarch32.sld_location, &sld_v)) { info->arm_aarch32.sld_value = sld_v; } }
void buffalo_nopae (vmi_instance_t instance, uint32_t entry, int pde) { /* similar techniques are surely doable in linux, but for now * this is only testing for windows domains */ if (instance->os_type != VMI_OS_WINDOWS) { return; } if (!TRANSITION(entry) && !PROTOTYPE(entry)) { uint32_t pfnum = (entry >> 1) & VMI_BIT_MASK(0,3); uint32_t pfframe = entry & VMI_BIT_MASK(12,31); /* pagefile */ if (pfnum != 0 && pfframe != 0) { dbprint(VMI_DEBUG_PTLOOKUP, "--Buffalo: page file = %d, frame = 0x%.8x\n", pfnum, pfframe); } /* demand zero */ else if (pfnum == 0 && pfframe == 0) { dbprint(VMI_DEBUG_PTLOOKUP, "--Buffalo: demand zero page\n"); } }
static inline status_t get_pml4e (vmi_instance_t vmi, addr_t vaddr, reg_t cr3, addr_t *pml4e_address, uint64_t *pml4e_value) { *pml4e_value = 0; *pml4e_address = (cr3 & VMI_BIT_MASK(12,51)) | get_pml4_index(vaddr); if(VMI_FAILURE == vmi_read_64_pa(vmi, *pml4e_address, pml4e_value)) { dbprint(VMI_DEBUG_PTLOOKUP, "--PTLookup: error reading pml4e_address = 0x%.16"PRIx64"\n", *pml4e_address); return VMI_FAILURE; } dbprint(VMI_DEBUG_PTLOOKUP, "--PTLookup: pml4e_address = 0x%.16"PRIx64"\n", *pml4e_address); return VMI_SUCCESS; }
static inline status_t get_pde_ia32e (vmi_instance_t vmi, addr_t vaddr, uint64_t pdpte, addr_t *pde_address, addr_t *pde_value) { *pde_value = 0; *pde_address = (pdpte & VMI_BIT_MASK(12,51)) | get_pd_index_ia32e(vaddr); if(VMI_FAILURE == vmi_read_64_pa(vmi, *pde_address, pde_value)) { dbprint(VMI_DEBUG_PTLOOKUP, "--PTLookup: failed to read pde_address = 0x%.16"PRIx64"\n", *pde_address); return VMI_FAILURE; } dbprint(VMI_DEBUG_PTLOOKUP, "--PTLookup: pde_address = 0x%.16"PRIx64"pde_value= 0x%.16"PRIx64"\n", *pde_address, *pde_value); return VMI_SUCCESS; }
static inline uint32_t get_large_paddr_pae (uint32_t vaddr, uint32_t pgd_entry) { return (pgd_entry & VMI_BIT_MASK(21,31)) | (vaddr & VMI_BIT_MASK(0,20)); }
static inline uint64_t get_pd_index_ia32e (addr_t vaddr) { return (vaddr & VMI_BIT_MASK(21,29)) >> 18; }
static inline uint64_t pte_pfn_pae (uint64_t pte) { return pte & VMI_BIT_MASK(12,35); }
static inline uint64_t get_paddr_pae (uint32_t vaddr, uint64_t pte) { return pte_pfn_pae(pte) | (vaddr & VMI_BIT_MASK(0,11)); }
static inline uint64_t get_gigpage_ia32e (addr_t vaddr, uint64_t pdpte) { return (pdpte & VMI_BIT_MASK(30,51)) | (vaddr & VMI_BIT_MASK(0,29)); }
static inline uint64_t get_paddr_ia32e (addr_t vaddr, uint64_t pte) { return (pte & VMI_BIT_MASK(12,51)) | (vaddr & VMI_BIT_MASK(0,11)); }
static inline uint64_t pdba_base_pae (uint64_t pdpe) { return pdpe & VMI_BIT_MASK(12,51); }
// 2nd Level Page Table Index (Course Pages) static inline uint32_t coarse_second_level_table_index(uint32_t vaddr) { return (vaddr>>12) & VMI_BIT_MASK(0,7); }
/* page directory pointer table */ static inline uint64_t get_pdptb (uint64_t pdpr) { return pdpr & VMI_BIT_MASK(5,63); }
static inline addr_t get_pdpt_index_ia32e (addr_t vaddr) { return (vaddr & VMI_BIT_MASK(30,38)) >> 27; }
static inline uint64_t get_2megpage_ia32e (addr_t vaddr, uint64_t pde) { return (pde & VMI_BIT_MASK(21,51)) | (vaddr & VMI_BIT_MASK(0,20)); }
/* PML4 Table */ static inline addr_t get_pml4_index (addr_t vaddr) { return (vaddr & VMI_BIT_MASK(39,47)) >> 36; }
GSList* get_va_pages_ia32e(vmi_instance_t vmi, addr_t dtb) { GSList *ret = NULL; uint8_t entry_size = 0x8; #define IA32E_ENTRIES_PER_PAGE 0x200 // 0x1000/0x8 uint64_t *pml4_page = malloc(VMI_PS_4KB); addr_t pml4e_location = dtb & VMI_BIT_MASK(12,51); if (VMI_PS_4KB != vmi_read_pa(vmi, pml4e_location, pml4_page, VMI_PS_4KB)) { free(pml4_page); return ret; } uint64_t *pdpt_page = malloc(VMI_PS_4KB); uint64_t *pgd_page = malloc(VMI_PS_4KB); uint64_t *pt_page = malloc(VMI_PS_4KB); uint64_t pml4e_index; for(pml4e_index = 0; pml4e_index < IA32E_ENTRIES_PER_PAGE; pml4e_index++, pml4e_location += entry_size) { uint64_t pml4e_value = pml4_page[pml4e_index]; if(!ENTRY_PRESENT(vmi->os_type, pml4e_value)) { continue; } uint64_t pdpte_location = pml4e_value & VMI_BIT_MASK(12,51); if (VMI_PS_4KB != vmi_read_pa(vmi, pdpte_location, pdpt_page, VMI_PS_4KB)) { continue; } uint64_t pdpte_index; for(pdpte_index = 0; pdpte_index < IA32E_ENTRIES_PER_PAGE; pdpte_index++, pdpte_location++) { uint64_t pdpte_value = pdpt_page[pdpte_index]; if(!ENTRY_PRESENT(vmi->os_type, pdpte_value)) { continue; } if(PAGE_SIZE(pdpte_value)) { page_info_t *info = g_malloc0(sizeof(page_info_t)); info->vaddr = (pml4e_index << 39) | (pdpte_index << 30); info->paddr = get_gigpage_ia32e(info->vaddr, pdpte_value); info->size = VMI_PS_1GB; info->x86_ia32e.pml4e_location = pml4e_location; info->x86_ia32e.pml4e_value = pml4e_value; info->x86_ia32e.pdpte_location = pdpte_location; info->x86_ia32e.pdpte_value = pdpte_value; ret = g_slist_prepend(ret, info); continue; } uint64_t pgd_location = pdpte_value & VMI_BIT_MASK(12,51); if (VMI_PS_4KB != vmi_read_pa(vmi, pgd_location, pgd_page, VMI_PS_4KB)) { continue; } uint64_t pgde_index; for(pgde_index = 0; pgde_index < IA32E_ENTRIES_PER_PAGE; pgde_index++, pgd_location += entry_size) { uint64_t pgd_value = pgd_page[pgde_index]; if(ENTRY_PRESENT(vmi->os_type, pgd_value)) { if(PAGE_SIZE(pgd_value)) { page_info_t *info = g_malloc0(sizeof(page_info_t)); info->vaddr = (pml4e_index << 39) | (pdpte_index << 30) | (pgde_index << 21); info->paddr = get_2megpage_ia32e(info->vaddr, pgd_value); info->size = VMI_PS_2MB; info->x86_ia32e.pml4e_location = pml4e_location; info->x86_ia32e.pml4e_value = pml4e_value; info->x86_ia32e.pdpte_location = pdpte_location; info->x86_ia32e.pdpte_value = pdpte_value; info->x86_ia32e.pgd_location = pgd_location; info->x86_ia32e.pgd_value = pgd_value; ret = g_slist_prepend(ret, info); continue; } uint64_t pte_location = (pgd_value & VMI_BIT_MASK(12,51)); if (VMI_PS_4KB != vmi_read_pa(vmi, pte_location, pt_page, VMI_PS_4KB)) { continue; } uint64_t pte_index; for(pte_index = 0; pte_index < IA32E_ENTRIES_PER_PAGE; pte_index++, pte_location += entry_size) { uint64_t pte_value = pt_page[pte_index]; if(ENTRY_PRESENT(vmi->os_type, pte_value)) { page_info_t *info = g_malloc0(sizeof(page_info_t)); info->vaddr = (pml4e_index << 39) | (pdpte_index << 30) | (pgde_index << 21) | (pte_index << 12); info->paddr = get_paddr_ia32e(info->vaddr, pte_value); info->size = VMI_PS_4KB; info->x86_ia32e.pml4e_location = pml4e_location; info->x86_ia32e.pml4e_value = pml4e_value; info->x86_ia32e.pdpte_location = pdpte_location; info->x86_ia32e.pdpte_value = pdpte_value; info->x86_ia32e.pgd_location = pgd_location; info->x86_ia32e.pgd_value = pgd_value; info->x86_ia32e.pte_location = pte_location; info->x86_ia32e.pte_value = pte_value; ret = g_slist_prepend(ret, info); continue; } } } } } } free(pt_page); free(pgd_page); free(pdpt_page); free(pml4_page); return ret; }
static inline uint32_t pdba_base_nopae (uint32_t pdpe) { return pdpe & VMI_BIT_MASK(12,31); }
static status_t init_task_kaslr_test(vmi_instance_t vmi, addr_t page_vaddr) { status_t ret = VMI_FAILURE; uint32_t pid; addr_t init_task = page_vaddr + (vmi->init_task & VMI_BIT_MASK(0,11)); linux_instance_t linux_instance = vmi->os_data; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = vmi->kpgd }; ctx.addr = init_task + linux_instance->pid_offset; if ( VMI_FAILURE == vmi_read_32(vmi, &ctx, &pid) ) return ret; if ( pid ) return ret; ctx.addr = init_task + linux_instance->name_offset; char* init_task_name = vmi_read_str(vmi, &ctx); if ( init_task_name && !strncmp("swapper", init_task_name, 7) ) ret = VMI_SUCCESS; free(init_task_name); return ret; } status_t init_kaslr(vmi_instance_t vmi) { /* * Let's check if we can translate init_task first as is. */ uint32_t test; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = vmi->kpgd, .addr = vmi->init_task }; if ( VMI_SUCCESS == vmi_read_32(vmi, &ctx, &test) ) return VMI_SUCCESS; status_t ret = VMI_FAILURE; linux_instance_t linux_instance = vmi->os_data; GSList *loop, *pages = vmi_get_va_pages(vmi, vmi->kpgd); loop = pages; while (loop) { page_info_t *info = loop->data; if ( !linux_instance->kaslr_offset ) { switch(vmi->page_mode) { case VMI_PM_AARCH64: case VMI_PM_IA32E: if ( VMI_GET_BIT(info->vaddr, 47) ) ret = init_task_kaslr_test(vmi, info->vaddr); break; default: ret = init_task_kaslr_test(vmi, info->vaddr); break; }; if ( VMI_SUCCESS == ret ) { linux_instance->kaslr_offset = info->vaddr - (vmi->init_task & ~VMI_BIT_MASK(0,11)); vmi->init_task += linux_instance->kaslr_offset; dbprint(VMI_DEBUG_MISC, "**calculated KASLR offset: 0x%"PRIx64"\n", linux_instance->kaslr_offset); } } g_free(info); loop = loop->next; } g_slist_free(pages); return ret; } status_t linux_init(vmi_instance_t vmi) { status_t rc; os_interface_t os_interface = NULL; if (vmi->config == NULL) { errprint("No config table found\n"); return VMI_FAILURE; } if (vmi->os_data != NULL) { errprint("os data already initialized, reinitializing\n"); free(vmi->os_data); } vmi->os_data = safe_malloc(sizeof(struct linux_instance)); bzero(vmi->os_data, sizeof(struct linux_instance)); linux_instance_t linux_instance = vmi->os_data; g_hash_table_foreach(vmi->config, (GHFunc)linux_read_config_ghashtable_entries, vmi); if(linux_instance->rekall_profile) rc = init_from_rekall_profile(vmi); else rc = linux_symbol_to_address(vmi, "init_task", NULL, &vmi->init_task); if (VMI_FAILURE == rc) { errprint("Could not get init_task from Rekall profile or System.map\n"); goto _exit; } vmi->init_task = canonical_addr(vmi->init_task); #if defined(ARM32) || defined(ARM64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, TTBR1, 0); #elif defined(I386) || defined(X86_64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, CR3, 0); #endif /* * The driver failed to get us a pagetable. * As a fall-back, try to init using heuristics. * This path is taken in FILE mode as well. */ if (VMI_FAILURE == rc) if (VMI_FAILURE == linux_filemode_init(vmi)) goto _exit; if ( VMI_FAILURE == init_kaslr(vmi) ) { dbprint(VMI_DEBUG_MISC, "**failed to determine KASLR offset\n"); goto _exit; } dbprint(VMI_DEBUG_MISC, "**set vmi->kpgd (0x%.16"PRIx64").\n", vmi->kpgd); os_interface = safe_malloc(sizeof(struct os_interface)); bzero(os_interface, sizeof(struct os_interface)); os_interface->os_get_offset = linux_get_offset; os_interface->os_pid_to_pgd = linux_pid_to_pgd; os_interface->os_pgd_to_pid = linux_pgd_to_pid; os_interface->os_ksym2v = linux_symbol_to_address; os_interface->os_usym2rva = NULL; os_interface->os_v2sym = linux_system_map_address_to_symbol; os_interface->os_read_unicode_struct = NULL; os_interface->os_teardown = linux_teardown; vmi->os_interface = os_interface; return VMI_SUCCESS; _exit: free(vmi->os_data); vmi->os_data = NULL; return VMI_FAILURE; }
static void extract_ca_file(filedelete* f, drakvuf_t drakvuf, const drakvuf_trap_info_t* info, vmi_instance_t vmi, addr_t control_area, access_context_t* ctx, const char* filename, uint64_t fo_flags) { addr_t subsection = control_area + f->control_area_size; addr_t segment = 0; addr_t test = 0; addr_t test2 = 0; size_t filesize = 0; /* Check whether subsection points back to the control area */ ctx->addr = control_area + f->offsets[CONTROL_AREA_SEGMENT]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &segment) ) return; ctx->addr = segment + f->offsets[SEGMENT_CONTROLAREA]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &test) || test != control_area ) return; ctx->addr = segment + f->offsets[SEGMENT_SIZEOFSEGMENT]; if ( VMI_FAILURE == vmi_read_64(vmi, ctx, &test) ) return; ctx->addr = segment + f->offsets[SEGMENT_TOTALNUMBEROFPTES]; if ( VMI_FAILURE == vmi_read_32(vmi, ctx, (uint32_t*)&test2) ) return; if ( test != (test2 * 4096) ) return; const int curr_sequence_number = ++f->sequence_number; char* file = NULL; if ( asprintf(&file, "%s/file.%06d.mm", f->dump_folder, curr_sequence_number) < 0 ) return; FILE* fp = fopen(file, "w"); free(file); if (!fp) return; while (subsection) { /* Check whether subsection points back to the control area */ ctx->addr = subsection + f->offsets[SUBSECTION_CONTROLAREA]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &test) || test != control_area ) break; addr_t base = 0; addr_t start = 0; uint32_t ptes = 0; ctx->addr = subsection + f->offsets[SUBSECTION_SUBSECTIONBASE]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &base) ) break; ctx->addr = subsection + f->offsets[SUBSECTION_PTESINSUBSECTION]; if ( VMI_FAILURE == vmi_read_32(vmi, ctx, &ptes) ) break; ctx->addr = subsection + f->offsets[SUBSECTION_STARTINGSECTOR]; if ( VMI_FAILURE == vmi_read_32(vmi, ctx, (uint32_t*)&start) ) break; /* * The offset into the file is stored implicitely * based on the PTE's location within the Subsection. */ addr_t subsection_offset = start * 0x200; addr_t ptecount; for (ptecount=0; ptecount < ptes; ptecount++) { addr_t pteoffset = base + f->mmpte_size * ptecount; addr_t fileoffset = subsection_offset + ptecount * 0x1000; addr_t pte = 0; ctx->addr = pteoffset; if ( VMI_FAILURE == vmi_read(vmi, ctx, f->mmpte_size, &pte, NULL) ) break; if ( ENTRY_PRESENT(1, pte) ) { uint8_t page[4096]; if ( VMI_FAILURE == vmi_read_pa(vmi, VMI_BIT_MASK(12,48) & pte, 4096, &page, NULL) ) continue; if ( !fseek ( fp, fileoffset, SEEK_SET ) ) { if ( fwrite(page, 4096, 1, fp) ) filesize = MAX(filesize, fileoffset + 4096); } } } ctx->addr = subsection + f->offsets[SUBSECTION_NEXTSUBSECTION]; if ( !vmi_read_addr(vmi, ctx, &subsection) ) break; } fclose(fp); print_extraction_information(f, drakvuf, info, filename, filesize, fo_flags, curr_sequence_number); save_file_metadata(f, info, curr_sequence_number, control_area, filename, filesize, fo_flags); }
static inline uint64_t ptba_base_pae (uint64_t pde) { return pde & VMI_BIT_MASK(12,35); }
// 2nd Level Page Table Index (Fine Pages) static inline uint32_t fine_second_level_table_index(uint32_t vaddr) { return (vaddr>>10) & VMI_BIT_MASK(0,9); }
/* page */ static inline uint32_t pte_pfn_nopae (uint32_t pte) { return pte & VMI_BIT_MASK(12,31); }
// Based on ARM Reference Manual // Chapter B4 Virtual Memory System Architecture // B4.7 Hardware page table translation status_t v2p_aarch32 (vmi_instance_t vmi, addr_t dtb, addr_t vaddr, page_info_t *info) { status_t status = VMI_FAILURE; dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: vaddr = 0x%.16"PRIx64", dtb = 0x%.16"PRIx64"\n", vaddr, dtb); get_first_level_descriptor(vmi, dtb, vaddr, info); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: fld_location = 0x%"PRIx32"\n", info->arm_aarch32.fld_location); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: fld_value = 0x%"PRIx32"\n", info->arm_aarch32.fld_value); switch (info->arm_aarch32.fld_value & VMI_BIT_MASK(0,1)) { case 0b01: { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry gives the physical address of a coarse second-level table\n"); get_coarse_second_level_descriptor(vmi, vaddr, info); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: l2d = 0x%"PRIx32"\n", info->arm_aarch32.sld_value); switch (info->arm_aarch32.sld_value & VMI_BIT_MASK(0,1)) { case 0b01: // large page info->size = VMI_PS_64KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(16,31)) | (vaddr & VMI_BIT_MASK(0,15)); status = VMI_SUCCESS; break; case 0b10: case 0b11: // small page info->size = VMI_PS_4KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(12,31)) | (vaddr & VMI_BIT_MASK(0,11)); status = VMI_SUCCESS; default: break; } break; } case 0b10: { if (!VMI_GET_BIT(info->arm_aarch32.fld_value, 18)) { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry is a section descriptor for its associated modified virtual addresses\n"); info->size = VMI_PS_1MB; info->paddr = (info->arm_aarch32.fld_value & VMI_BIT_MASK(20,31)) | (vaddr & VMI_BIT_MASK(0,19)); } else { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry is a supersection descriptor for its associated modified virtual addresses\n"); info->size = VMI_PS_16MB; info->paddr = (info->arm_aarch32.fld_value & VMI_BIT_MASK(24,31)) | (vaddr & VMI_BIT_MASK(0,23)); } status = VMI_SUCCESS; break; } case 0b11: { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry gives the physical address of a fine second-level table\n"); get_fine_second_level_descriptor(vmi, vaddr, info); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: sld = 0x%"PRIx32"\n", info->arm_aarch32.sld_value); switch (info->arm_aarch32.sld_value & VMI_BIT_MASK(0,1)) { case 0b01: // large page info->size = VMI_PS_64KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(16,31)) | (vaddr & VMI_BIT_MASK(0,15)); status = VMI_SUCCESS; break; case 0b10: // small page info->size = VMI_PS_4KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(12,31)) | (vaddr & VMI_BIT_MASK(0,11)); status = VMI_SUCCESS; break; case 0b11: // tiny page info->size = VMI_PS_1KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(10,31)) | (vaddr & VMI_BIT_MASK(0,9)); status = VMI_SUCCESS; break; default: break; } break; } default: break; } dbprint(VMI_DEBUG_PTLOOKUP, "--ARM PTLookup: PA = 0x%"PRIx64"\n", info->paddr); return status; }
static inline uint64_t get_pt_index_ia32e (addr_t vaddr) { return (vaddr & VMI_BIT_MASK(12,20)) >> 9; }