addr_t get_ntoskrnl_base( vmi_instance_t vmi, addr_t page_paddr) { uint8_t page[VMI_PS_4KB]; addr_t ret = 0; access_context_t ctx = { .translate_mechanism = VMI_TM_NONE, .addr = page_paddr }; for(; ctx.addr + VMI_PS_4KB < vmi->max_physical_address; ctx.addr += VMI_PS_4KB) { uint8_t page[VMI_PS_4KB]; if(VMI_FAILURE == peparse_get_image(vmi, &ctx, VMI_PS_4KB, page)) continue; struct pe_header *pe_header = NULL; struct dos_header *dos_header = NULL; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(page, &dos_header, &pe_header, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); if(!export_header_offset || ctx.addr + export_header_offset >= vmi->max_physical_address) continue; uint32_t nbytes = vmi_read_pa(vmi, ctx.addr + export_header_offset, &et, sizeof(struct export_table)); if(nbytes == sizeof(struct export_table) && !(et.export_flags || !et.name) ) { if(ctx.addr + et.name + 12 >= vmi->max_physical_address) { continue; } unsigned char name[13] = {0}; vmi_read_pa(vmi, ctx.addr + et.name, name, 12); if(!strcmp("ntoskrnl.exe", (char*)name)) { ret = ctx.addr; break; } } else { continue; } } return ret; }
status_t peparse_get_export_table( vmi_instance_t vmi, const access_context_t *ctx, struct export_table *et, addr_t *export_table_rva, size_t *export_table_size) { // Note: this function assumes a "normal" PE where all the headers are in // the first page of the PE and the field DosHeader.OffsetToPE points to // an address in the first page. access_context_t _ctx = *ctx; addr_t export_header_rva = 0; size_t export_header_size = 0; #define MAX_HEADER_BYTES 1024 // keep under 1 page uint8_t image[MAX_HEADER_BYTES]; if (VMI_FAILURE == peparse_get_image(vmi, ctx, MAX_HEADER_BYTES, image)) { return VMI_FAILURE; } void *optional_header = NULL; uint16_t magic = 0; peparse_assign_headers(image, NULL, NULL, &magic, &optional_header, NULL, NULL); export_header_rva = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &magic, optional_header, NULL, NULL); export_header_size = peparse_get_idd_size(IMAGE_DIRECTORY_ENTRY_EXPORT, &magic, optional_header, NULL, NULL); if (export_table_rva) { *export_table_rva=export_header_rva; } if (export_table_size) { *export_table_size=export_header_size; } dbprint(VMI_DEBUG_PEPARSE, "--PEParse: DLL base 0x%.16"PRIx64". Export header [RVA] 0x%.16"PRIx64". Size %" PRIu64 ".\n", ctx->addr, export_header_rva, export_header_size); _ctx.addr = ctx->addr + export_header_rva; if ( VMI_FAILURE == vmi_read(vmi, &_ctx, sizeof(struct export_table), et, NULL) ) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: failed to map export header\n"); /* * Sometimes Windows maps the export table on page-boundaries, * such that the first export_flags field (which is reserved) is. * not actually accessible (the page is not mapped). See Issue #260. */ if (!((_ctx.addr+4) & 0xfff)) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: export table is mapped on page boundary\n"); _ctx.addr += 4; if ( VMI_FAILURE == vmi_read(vmi, &_ctx, sizeof(struct export_table)-4, (void*)((char*)et+4), NULL) ) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: still failed to map export header\n"); return VMI_FAILURE; } // Manually set the reserved field to zero in this case et->export_flags = 0; } else { return VMI_FAILURE; } } /* sanity check */ if (et->export_flags || !et->name) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: bad export directory table\n"); return VMI_FAILURE; } return VMI_SUCCESS; }
status_t find_kdbg_address_faster( vmi_instance_t vmi, addr_t *kdbg_pa, addr_t *kernel_pa, addr_t *kernel_va) { dbprint(VMI_DEBUG_MISC, "**Trying find_kdbg_address_faster\n"); status_t ret = VMI_FAILURE; // This scan requires the location of the KPCR // which we get from the GS/FS register on live machines. // For file mode this needs to be further investigated. if (VMI_FILE == vmi->mode) { return ret; } void *bm = boyer_moore_init((unsigned char *)"KDBG", 4); int find_ofs = 0x10; reg_t cr3 = 0, fsgs = 0; if (VMI_FAILURE == driver_get_vcpureg(vmi, &cr3, CR3, 0)) { goto done; } switch ( vmi->page_mode ) { case VMI_PM_IA32E: if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, GS_BASE, 0)) goto done; break; case VMI_PM_LEGACY: /* Fall-through */ case VMI_PM_PAE: if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, FS_BASE, 0)) goto done; break; default: goto done; }; // We start the search from the KPCR, which has to be mapped into the kernel. // We further know that the Windows kernel is page aligned // so we are just checking if the page has a valid PE header // and if the first item in the export table is "ntoskrnl.exe". // Once the kernel is found, we find the .data section // and limit the string search for "KDBG" into that region. // start searching at the lower part from the kpcr // then switch to the upper part if needed int step = -VMI_PS_4KB; addr_t page_paddr; access_context_t ctx = { .translate_mechanism = VMI_TM_NONE, }; scan: if ( VMI_FAILURE == vmi_pagetable_lookup(vmi, cr3, fsgs, &page_paddr) ) goto done; page_paddr &= ~VMI_BIT_MASK(0,11); for (; page_paddr + step < vmi->max_physical_address; page_paddr += step) { uint8_t page[VMI_PS_4KB]; ctx.addr = page_paddr; status_t rc = peparse_get_image(vmi, &ctx, VMI_PS_4KB, page); if (VMI_FAILURE == rc) { continue; } struct pe_header *pe_header = NULL; struct dos_header *dos_header = NULL; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(page, &dos_header, &pe_header, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); if (!export_header_offset || page_paddr + export_header_offset >= vmi->max_physical_address) continue; if ( VMI_SUCCESS == vmi_read_pa(vmi, page_paddr + export_header_offset, sizeof(struct export_table), &et, NULL)) { if ( !(et.export_flags || !et.name) && page_paddr + et.name + 12 >= vmi->max_physical_address) continue; unsigned char name[13] = {0}; if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr + et.name, 12, name, NULL) ) continue; if (strcmp("ntoskrnl.exe", (const char *)name)) { continue; } } else { continue; } uint32_t c; for (c=0; c < pe_header->number_of_sections; c++) { struct section_header section; addr_t section_addr = page_paddr + dos_header->offset_to_pe + sizeof(struct pe_header) + pe_header->size_of_optional_header + c*sizeof(struct section_header); // Read the section header from memory if ( VMI_FAILURE == vmi_read_pa(vmi, section_addr, sizeof(struct section_header), (uint8_t *)§ion, NULL) ) continue; // .data check if (memcmp(section.short_name, "\x2E\x64\x61\x74\x61", 5) != 0) { continue; } uint8_t *haystack = alloca(section.size_of_raw_data); if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr + section.virtual_address, section.size_of_raw_data, haystack, NULL) ) continue; int match_offset = boyer_moore2(bm, haystack, section.size_of_raw_data); if (-1 != match_offset) { // We found the structure, but let's verify it. // The kernel is always mapped into VA at the same offset // it is found on physical memory + the kernel boundary. // Read "KernBase" from the haystack uint64_t *kernbase = (uint64_t *)&haystack[(unsigned int) match_offset + sizeof(uint64_t)]; int zeroes = __builtin_clzll(page_paddr); if ((*kernbase) << zeroes == page_paddr << zeroes) { *kernel_pa = page_paddr; *kernel_va = *kernbase; *kdbg_pa = page_paddr + section.virtual_address + (unsigned int) match_offset - find_ofs; ret = VMI_SUCCESS; dbprint(VMI_DEBUG_MISC, "--Found KdDebuggerDataBlock at PA %.16"PRIx64"\n", *kdbg_pa); goto done; } else { dbprint(VMI_DEBUG_MISC, "--WARNING: KernBase in KdDebuggerDataBlock at PA %.16"PRIx64" doesn't point back to this page.\n", page_paddr + section.virtual_address + (unsigned int) match_offset - find_ofs); } } break; } } if (step<0) { step = VMI_PS_4KB; goto scan; } done: boyer_moore_fini(bm); return ret; }
int main(int argc, char **argv) { vmi_instance_t vmi = NULL; vmi_mode_t mode; /* this is the VM that we are looking at */ if (argc != 3) { printf("Usage: %s name|domid <domain name|domain id>\n", argv[0]); return 1; } // if void *domain; uint64_t domid = VMI_INVALID_DOMID; uint64_t init_flags = 0; if(strcmp(argv[1],"name")==0) { domain = (void*)argv[2]; init_flags |= VMI_INIT_DOMAINNAME; } else if(strcmp(argv[1],"domid")==0) { domid = strtoull(argv[2], NULL, 0); domain = (void*)&domid; init_flags |= VMI_INIT_DOMAINID; } else { printf("You have to specify either name or domid!\n"); return 1; } if (VMI_FAILURE == vmi_get_access_mode(vmi, domain, init_flags, NULL, &mode) ) return 1; /* initialize the libvmi library */ if (VMI_FAILURE == vmi_init(&vmi, mode, domain, init_flags, NULL, NULL)) { printf("Failed to init LibVMI library.\n"); return 1; } max_mem = vmi_get_max_physical_address(vmi); /* the nice thing about the windows kernel is that it's page aligned */ uint32_t found = 0; access_context_t ctx = { .translate_mechanism = VMI_TM_NONE, }; for(ctx.addr = 0; ctx.addr < max_mem; ctx.addr += PAGE_SIZE) { uint8_t pe[MAX_HEADER_SIZE]; if(VMI_SUCCESS == peparse_get_image(vmi, &ctx, MAX_HEADER_SIZE, pe)) { if(VMI_SUCCESS == is_WINDOWS_KERNEL(vmi, ctx.addr, pe)) { printf("Windows Kernel found @ 0x%" PRIx64 "\n", ctx.addr); print_os_version(pe); print_guid(vmi, ctx.addr, pe); print_pe_header(vmi, ctx.addr, pe); found=1; break; } } } /* cleanup any memory associated with the LibVMI instance */ vmi_destroy(vmi); if(found) return 0; return 1; }