// Based on ARM Reference Manual // Chapter B4 Virtual Memory System Architecture // B4.7 Hardware page table translation status_t v2p_aarch32 (vmi_instance_t vmi, addr_t dtb, addr_t vaddr, page_info_t *info) { status_t status = VMI_FAILURE; dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: vaddr = 0x%.16"PRIx64", dtb = 0x%.16"PRIx64"\n", vaddr, dtb); get_first_level_descriptor(vmi, dtb, vaddr, info); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: fld_location = 0x%"PRIx32"\n", info->arm_aarch32.fld_location); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: fld_value = 0x%"PRIx32"\n", info->arm_aarch32.fld_value); switch (info->arm_aarch32.fld_value & VMI_BIT_MASK(0,1)) { case 0b01: { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry gives the physical address of a coarse second-level table\n"); get_coarse_second_level_descriptor(vmi, vaddr, info); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: l2d = 0x%"PRIx32"\n", info->arm_aarch32.sld_value); switch (info->arm_aarch32.sld_value & VMI_BIT_MASK(0,1)) { case 0b01: // large page info->size = VMI_PS_64KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(16,31)) | (vaddr & VMI_BIT_MASK(0,15)); status = VMI_SUCCESS; break; case 0b10: case 0b11: // small page info->size = VMI_PS_4KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(12,31)) | (vaddr & VMI_BIT_MASK(0,11)); status = VMI_SUCCESS; default: break; } break; } case 0b10: { if (!VMI_GET_BIT(info->arm_aarch32.fld_value, 18)) { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry is a section descriptor for its associated modified virtual addresses\n"); info->size = VMI_PS_1MB; info->paddr = (info->arm_aarch32.fld_value & VMI_BIT_MASK(20,31)) | (vaddr & VMI_BIT_MASK(0,19)); } else { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry is a supersection descriptor for its associated modified virtual addresses\n"); info->size = VMI_PS_16MB; info->paddr = (info->arm_aarch32.fld_value & VMI_BIT_MASK(24,31)) | (vaddr & VMI_BIT_MASK(0,23)); } status = VMI_SUCCESS; break; } case 0b11: { dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: the entry gives the physical address of a fine second-level table\n"); get_fine_second_level_descriptor(vmi, vaddr, info); dbprint(VMI_DEBUG_PTLOOKUP, "--ARM AArch32 PTLookup: sld = 0x%"PRIx32"\n", info->arm_aarch32.sld_value); switch (info->arm_aarch32.sld_value & VMI_BIT_MASK(0,1)) { case 0b01: // large page info->size = VMI_PS_64KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(16,31)) | (vaddr & VMI_BIT_MASK(0,15)); status = VMI_SUCCESS; break; case 0b10: // small page info->size = VMI_PS_4KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(12,31)) | (vaddr & VMI_BIT_MASK(0,11)); status = VMI_SUCCESS; break; case 0b11: // tiny page info->size = VMI_PS_1KB; info->paddr = (info->arm_aarch32.sld_value & VMI_BIT_MASK(10,31)) | (vaddr & VMI_BIT_MASK(0,9)); status = VMI_SUCCESS; break; default: break; } break; } default: break; } dbprint(VMI_DEBUG_PTLOOKUP, "--ARM PTLookup: PA = 0x%"PRIx64"\n", info->paddr); return status; }
/* * check that this vm uses a paging method that we support * and set pm/cr3/pae/pse/lme flags optionally on the given pointers */ status_t probe_memory_layout_x86(vmi_instance_t vmi) { // To get the paging layout, the following bits are needed: // 1. CR0.PG // 2. CR4.PAE // 3. Either (a) IA32_EFER.LME, or (b) the guest's address width (32 or // 64). Not all backends allow us to read an MSR; in particular, Xen's PV // backend doessn't. status_t ret = VMI_FAILURE; page_mode_t pm = VMI_PM_UNKNOWN; uint8_t dom_addr_width = 0; // domain address width (bytes) /* pull info from registers, if we can */ reg_t cr0, cr3, cr4, efer; int pae = 0, pse = 0, lme = 0; uint8_t msr_efer_lme = 0; // LME bit in MSR_EFER /* get the control register values */ if (driver_get_vcpureg(vmi, &cr0, CR0, 0) == VMI_FAILURE) { errprint("**failed to get CR0\n"); goto _exit; } /* PG Flag --> CR0, bit 31 == 1 --> paging enabled */ if (!VMI_GET_BIT(cr0, 31)) { dbprint(VMI_DEBUG_CORE, "Paging disabled for this VM, only physical addresses supported.\n"); vmi->page_mode = VMI_PM_UNKNOWN; vmi->pae = 0; vmi->pse = 0; vmi->lme = 0; ret = VMI_SUCCESS; goto _exit; } // // Paging enabled (PG==1) // if (driver_get_vcpureg(vmi, &cr4, CR4, 0) == VMI_FAILURE) { errprint("**failed to get CR4\n"); goto _exit; } /* PSE Flag --> CR4, bit 5 */ pae = VMI_GET_BIT(cr4, 5); dbprint(VMI_DEBUG_CORE, "**set pae = %d\n", pae); /* PSE Flag --> CR4, bit 4 */ pse = VMI_GET_BIT(cr4, 4); dbprint(VMI_DEBUG_CORE, "**set pse = %d\n", pse); ret = driver_get_vcpureg(vmi, &efer, MSR_EFER, 0); if (VMI_SUCCESS == ret) { lme = VMI_GET_BIT(efer, 8); dbprint(VMI_DEBUG_CORE, "**set lme = %d\n", lme); } else { dbprint(VMI_DEBUG_CORE, "**failed to get MSR_EFER, trying method #2\n"); // does this trick work in all cases? ret = driver_get_address_width(vmi, &dom_addr_width); if (VMI_FAILURE == ret) { errprint ("Failed to get domain address width. Giving up.\n"); goto _exit; } lme = (8 == dom_addr_width); dbprint (VMI_DEBUG_CORE, "**found guest address width is %d bytes; assuming IA32_EFER.LME = %d\n", dom_addr_width, lme); } // if // Get current cr3 for sanity checking if (driver_get_vcpureg(vmi, &cr3, CR3, 0) == VMI_FAILURE) { errprint("**failed to get CR3\n"); goto _exit; } // now determine addressing mode if (0 == pae) { dbprint(VMI_DEBUG_CORE, "**32-bit paging\n"); pm = VMI_PM_LEGACY; cr3 &= 0xFFFFF000ull; } // PAE == 1; determine IA-32e or PAE else if (lme) { // PAE == 1, LME == 1 dbprint(VMI_DEBUG_CORE, "**IA-32e paging\n"); pm = VMI_PM_IA32E; cr3 &= 0xFFFFFFFFFFFFF000ull; } else { // PAE == 1, LME == 0 dbprint(VMI_DEBUG_CORE, "**PAE paging\n"); pm = VMI_PM_PAE; cr3 &= 0xFFFFFFE0; } // if-else dbprint(VMI_DEBUG_CORE, "**sanity checking cr3 = 0x%.16"PRIx64"\n", cr3); /* testing to see CR3 value */ if (!driver_is_pv(vmi) && cr3 >= vmi->max_physical_address) { // sanity check on CR3 dbprint(VMI_DEBUG_CORE, "** Note cr3 value [0x%"PRIx64"] exceeds memsize [0x%"PRIx64"]\n", cr3, vmi->size); } vmi->page_mode = pm; vmi->pae = pae; vmi->pse = pse; vmi->lme = lme; _exit: return ret; }
static status_t init_task_kaslr_test(vmi_instance_t vmi, addr_t page_vaddr) { status_t ret = VMI_FAILURE; uint32_t pid; addr_t init_task = page_vaddr + (vmi->init_task & VMI_BIT_MASK(0,11)); linux_instance_t linux_instance = vmi->os_data; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = vmi->kpgd }; ctx.addr = init_task + linux_instance->pid_offset; if ( VMI_FAILURE == vmi_read_32(vmi, &ctx, &pid) ) return ret; if ( pid ) return ret; ctx.addr = init_task + linux_instance->name_offset; char* init_task_name = vmi_read_str(vmi, &ctx); if ( init_task_name && !strncmp("swapper", init_task_name, 7) ) ret = VMI_SUCCESS; free(init_task_name); return ret; } status_t init_kaslr(vmi_instance_t vmi) { /* * Let's check if we can translate init_task first as is. */ uint32_t test; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = vmi->kpgd, .addr = vmi->init_task }; if ( VMI_SUCCESS == vmi_read_32(vmi, &ctx, &test) ) return VMI_SUCCESS; status_t ret = VMI_FAILURE; linux_instance_t linux_instance = vmi->os_data; GSList *loop, *pages = vmi_get_va_pages(vmi, vmi->kpgd); loop = pages; while (loop) { page_info_t *info = loop->data; if ( !linux_instance->kaslr_offset ) { switch(vmi->page_mode) { case VMI_PM_AARCH64: case VMI_PM_IA32E: if ( VMI_GET_BIT(info->vaddr, 47) ) ret = init_task_kaslr_test(vmi, info->vaddr); break; default: ret = init_task_kaslr_test(vmi, info->vaddr); break; }; if ( VMI_SUCCESS == ret ) { linux_instance->kaslr_offset = info->vaddr - (vmi->init_task & ~VMI_BIT_MASK(0,11)); vmi->init_task += linux_instance->kaslr_offset; dbprint(VMI_DEBUG_MISC, "**calculated KASLR offset: 0x%"PRIx64"\n", linux_instance->kaslr_offset); } } g_free(info); loop = loop->next; } g_slist_free(pages); return ret; } status_t linux_init(vmi_instance_t vmi) { status_t rc; os_interface_t os_interface = NULL; if (vmi->config == NULL) { errprint("No config table found\n"); return VMI_FAILURE; } if (vmi->os_data != NULL) { errprint("os data already initialized, reinitializing\n"); free(vmi->os_data); } vmi->os_data = safe_malloc(sizeof(struct linux_instance)); bzero(vmi->os_data, sizeof(struct linux_instance)); linux_instance_t linux_instance = vmi->os_data; g_hash_table_foreach(vmi->config, (GHFunc)linux_read_config_ghashtable_entries, vmi); if(linux_instance->rekall_profile) rc = init_from_rekall_profile(vmi); else rc = linux_symbol_to_address(vmi, "init_task", NULL, &vmi->init_task); if (VMI_FAILURE == rc) { errprint("Could not get init_task from Rekall profile or System.map\n"); goto _exit; } vmi->init_task = canonical_addr(vmi->init_task); #if defined(ARM32) || defined(ARM64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, TTBR1, 0); #elif defined(I386) || defined(X86_64) rc = driver_get_vcpureg(vmi, &vmi->kpgd, CR3, 0); #endif /* * The driver failed to get us a pagetable. * As a fall-back, try to init using heuristics. * This path is taken in FILE mode as well. */ if (VMI_FAILURE == rc) if (VMI_FAILURE == linux_filemode_init(vmi)) goto _exit; if ( VMI_FAILURE == init_kaslr(vmi) ) { dbprint(VMI_DEBUG_MISC, "**failed to determine KASLR offset\n"); goto _exit; } dbprint(VMI_DEBUG_MISC, "**set vmi->kpgd (0x%.16"PRIx64").\n", vmi->kpgd); os_interface = safe_malloc(sizeof(struct os_interface)); bzero(os_interface, sizeof(struct os_interface)); os_interface->os_get_offset = linux_get_offset; os_interface->os_pid_to_pgd = linux_pid_to_pgd; os_interface->os_pgd_to_pid = linux_pgd_to_pid; os_interface->os_ksym2v = linux_symbol_to_address; os_interface->os_usym2rva = NULL; os_interface->os_v2sym = linux_system_map_address_to_symbol; os_interface->os_read_unicode_struct = NULL; os_interface->os_teardown = linux_teardown; vmi->os_interface = os_interface; return VMI_SUCCESS; _exit: free(vmi->os_data); vmi->os_data = NULL; return VMI_FAILURE; }