status_t is_WINDOWS_KERNEL(vmi_instance_t vmi, addr_t base_p, uint8_t *pe) { status_t ret = VMI_FAILURE; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(pe, NULL, NULL, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); // The kernel's export table is continuously allocated on the PA level with the PE header // This trick may not work for other PE headers (though may work for some drivers) uint32_t nbytes = vmi_read_pa(vmi, base_p + export_header_offset, &et, sizeof(struct export_table)); if(nbytes == sizeof(struct export_table) && !(et.export_flags || !et.name) ) { char *name = vmi_read_str_pa(vmi, base_p + et.name); if(name) { if(strcmp("ntoskrnl.exe", name)==0) ret = VMI_SUCCESS; free(name); } } return ret; }
addr_t get_ntoskrnl_base( vmi_instance_t vmi, addr_t page_paddr) { uint8_t page[VMI_PS_4KB]; addr_t ret = 0; access_context_t ctx = { .translate_mechanism = VMI_TM_NONE, .addr = page_paddr }; for(; ctx.addr + VMI_PS_4KB < vmi->max_physical_address; ctx.addr += VMI_PS_4KB) { uint8_t page[VMI_PS_4KB]; if(VMI_FAILURE == peparse_get_image(vmi, &ctx, VMI_PS_4KB, page)) continue; struct pe_header *pe_header = NULL; struct dos_header *dos_header = NULL; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(page, &dos_header, &pe_header, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); if(!export_header_offset || ctx.addr + export_header_offset >= vmi->max_physical_address) continue; uint32_t nbytes = vmi_read_pa(vmi, ctx.addr + export_header_offset, &et, sizeof(struct export_table)); if(nbytes == sizeof(struct export_table) && !(et.export_flags || !et.name) ) { if(ctx.addr + et.name + 12 >= vmi->max_physical_address) { continue; } unsigned char name[13] = {0}; vmi_read_pa(vmi, ctx.addr + et.name, name, 12); if(!strcmp("ntoskrnl.exe", (char*)name)) { ret = ctx.addr; break; } } else { continue; } } return ret; }
addr_t get_ntoskrnl_base( vmi_instance_t vmi, addr_t page_paddr) { uint8_t page[VMI_PS_4KB]; addr_t ret = 0; for(; page_paddr + VMI_PS_4KB < vmi->size; page_paddr += VMI_PS_4KB) { uint8_t page[VMI_PS_4KB]; status_t rc = peparse_get_image_phys(vmi, page_paddr, VMI_PS_4KB, page); if(VMI_FAILURE == rc) { continue; } struct pe_header *pe_header = NULL; struct dos_header *dos_header = NULL; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(page, &dos_header, &pe_header, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); if(!export_header_offset || page_paddr + export_header_offset > vmi->size) continue; uint32_t nbytes = vmi_read_pa(vmi, page_paddr + export_header_offset, &et, sizeof(struct export_table)); if(nbytes == sizeof(struct export_table) && !(et.export_flags || !et.name) ) { if(page_paddr + et.name + 12 > vmi->size) { continue; } unsigned char name[13] = {0}; vmi_read_pa(vmi, page_paddr + et.name, name, 12); if(!strcmp("ntoskrnl.exe", (char*)name)) { ret = page_paddr; break; } } else { continue; } } return ret; }
status_t is_WINDOWS_KERNEL(vmi_instance_t vmi, addr_t base_p, uint8_t *pe) { status_t ret = VMI_FAILURE; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(pe, NULL, NULL, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); // The kernel's export table is continuously allocated on the PA level with the PE header // This trick may not work for other PE headers (though may work for some drivers) if ( base_p + export_header_offset < base_p + VMI_PS_4KB ) { if( VMI_SUCCESS == vmi_read_pa(vmi, base_p + export_header_offset, sizeof(struct export_table), &et, NULL) && !(et.export_flags || !et.name)) { char *name = vmi_read_str_pa(vmi, base_p + et.name); if(name) { if(strcmp("ntoskrnl.exe", name)==0) ret = VMI_SUCCESS; free(name); } } } // The export header may be stripped from the kernel so check section names. // This is commonly the case with Windows 10. if ( ret == VMI_FAILURE ) { ret = check_sections(vmi, base_p, pe); } return ret; }
void print_guid(vmi_instance_t vmi, addr_t kernel_base_p, uint8_t* pe) { uint16_t major_os_version; uint16_t minor_os_version; uint32_t size_of_image; struct pe_header *pe_header = NULL; uint16_t optional_header_type = 0; struct optional_header_pe32 *oh32 = NULL; struct optional_header_pe32plus *oh32plus = NULL; peparse_assign_headers(pe, NULL, &pe_header, &optional_header_type, NULL, &oh32, &oh32plus); addr_t debug_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_DEBUG, NULL, NULL, oh32, oh32plus); if(optional_header_type == IMAGE_PE32_MAGIC) { major_os_version=oh32->major_os_version; minor_os_version=oh32->minor_os_version; size_of_image=oh32->size_of_image; } else if(optional_header_type == IMAGE_PE32_PLUS_MAGIC) { major_os_version=oh32plus->major_os_version; minor_os_version=oh32plus->minor_os_version; size_of_image=oh32plus->size_of_image; } struct image_debug_directory debug_directory; vmi_read_pa(vmi, kernel_base_p + debug_offset, (uint8_t *)&debug_directory, sizeof(struct image_debug_directory)); if(debug_directory.type == IMAGE_DEBUG_TYPE_MISC) { printf("This operating system uses .dbg instead of .pdb\n"); if(major_os_version == 5 && minor_os_version == 0) { printf("GUID: %.8x%.8x\n",pe_header->time_date_stamp,size_of_image); } return; } else if(debug_directory.type != IMAGE_DEBUG_TYPE_CODEVIEW) { printf("The header is not in CodeView format, unable to deal with that!\n"); return; } struct cv_info_pdb70 *pdb_header = malloc(debug_directory.size_of_data); vmi_read_pa(vmi, kernel_base_p + debug_directory.address_of_raw_data, pdb_header, debug_directory.size_of_data); // The PDB header has to be PDB 7.0 // http://www.debuginfo.com/articles/debuginfomatch.html if(pdb_header->cv_signature != RSDS) { printf("The CodeView debug information has to be in PDB 7.0 for the kernel!\n"); return; } printf("\tGUID: "); printf("%.8x", pdb_header->signature.data1); printf("%.4x", pdb_header->signature.data2); printf("%.4x", pdb_header->signature.data3); int c; for(c=0;c<8;c++) printf("%.2x", pdb_header->signature.data4[c]); printf("%.1x", pdb_header->age & 0xf); printf("\n"); printf("\tKernel filename: %s\n", pdb_header->pdb_file_name); free(pdb_header); }
status_t peparse_get_export_table( vmi_instance_t vmi, addr_t base_vaddr, vmi_pid_t pid, struct export_table *et, addr_t *export_table_rva, size_t *export_table_size) { // Note: this function assumes a "normal" PE where all the headers are in // the first page of the PE and the field DosHeader.OffsetToPE points to // an address in the first page. addr_t export_header_rva = 0; addr_t export_header_va = 0; size_t export_header_size = 0; size_t nbytes = 0; #define MAX_HEADER_BYTES 1024 // keep under 1 page uint8_t image[MAX_HEADER_BYTES]; if(VMI_FAILURE == peparse_get_image_virt(vmi, base_vaddr, pid, MAX_HEADER_BYTES, image)) { return VMI_FAILURE; } void *optional_header = NULL; uint16_t magic = 0; peparse_assign_headers(image, NULL, NULL, &magic, &optional_header, NULL, NULL); export_header_rva = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &magic, optional_header, NULL, NULL); export_header_size = peparse_get_idd_size(IMAGE_DIRECTORY_ENTRY_EXPORT, &magic, optional_header, NULL, NULL); if(export_table_rva) { *export_table_rva=export_header_rva; } if(export_table_size) { *export_table_size=export_header_size; } /* Find & read the export header; assume a different page than the headers */ export_header_va = base_vaddr + export_header_rva; dbprint (VMI_DEBUG_MISC, "--PEParse: found export table at [VA] 0x%.16"PRIx64" = 0x%.16"PRIx64" + 0x%"PRIx64"\n", export_header_va, ((windows_instance_t)vmi->os_data)->ntoskrnl_va, export_header_rva); nbytes = vmi_read_va(vmi, export_header_va, pid, et, sizeof(*et)); if (nbytes != sizeof(struct export_table)) { dbprint(VMI_DEBUG_MISC, "--PEParse: failed to map export header\n"); return VMI_FAILURE; } /* sanity check */ if (et->export_flags || !et->name) { dbprint(VMI_DEBUG_MISC, "--PEParse: bad export directory table\n"); return VMI_FAILURE; } return VMI_SUCCESS; }
status_t find_kdbg_address_faster( vmi_instance_t vmi, addr_t *kdbg_pa, addr_t *kernel_pa, addr_t *kernel_va) { dbprint(VMI_DEBUG_MISC, "**Trying find_kdbg_address_faster\n"); status_t ret = VMI_FAILURE; // This scan requires the location of the KPCR // which we get from the GS/FS register on live machines. // For file mode this needs to be further investigated. if (VMI_FILE == vmi->mode) { return ret; } void *bm = boyer_moore_init((unsigned char *)"KDBG", 4); int find_ofs = 0x10; reg_t cr3 = 0, fsgs = 0; if (VMI_FAILURE == driver_get_vcpureg(vmi, &cr3, CR3, 0)) { goto done; } switch ( vmi->page_mode ) { case VMI_PM_IA32E: if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, GS_BASE, 0)) goto done; break; case VMI_PM_LEGACY: /* Fall-through */ case VMI_PM_PAE: if (VMI_FAILURE == driver_get_vcpureg(vmi, &fsgs, FS_BASE, 0)) goto done; break; default: goto done; }; // We start the search from the KPCR, which has to be mapped into the kernel. // We further know that the Windows kernel is page aligned // so we are just checking if the page has a valid PE header // and if the first item in the export table is "ntoskrnl.exe". // Once the kernel is found, we find the .data section // and limit the string search for "KDBG" into that region. // start searching at the lower part from the kpcr // then switch to the upper part if needed int step = -VMI_PS_4KB; addr_t page_paddr; access_context_t ctx = { .translate_mechanism = VMI_TM_NONE, }; scan: if ( VMI_FAILURE == vmi_pagetable_lookup(vmi, cr3, fsgs, &page_paddr) ) goto done; page_paddr &= ~VMI_BIT_MASK(0,11); for (; page_paddr + step < vmi->max_physical_address; page_paddr += step) { uint8_t page[VMI_PS_4KB]; ctx.addr = page_paddr; status_t rc = peparse_get_image(vmi, &ctx, VMI_PS_4KB, page); if (VMI_FAILURE == rc) { continue; } struct pe_header *pe_header = NULL; struct dos_header *dos_header = NULL; void *optional_pe_header = NULL; uint16_t optional_header_type = 0; struct export_table et; peparse_assign_headers(page, &dos_header, &pe_header, &optional_header_type, &optional_pe_header, NULL, NULL); addr_t export_header_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &optional_header_type, optional_pe_header, NULL, NULL); if (!export_header_offset || page_paddr + export_header_offset >= vmi->max_physical_address) continue; if ( VMI_SUCCESS == vmi_read_pa(vmi, page_paddr + export_header_offset, sizeof(struct export_table), &et, NULL)) { if ( !(et.export_flags || !et.name) && page_paddr + et.name + 12 >= vmi->max_physical_address) continue; unsigned char name[13] = {0}; if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr + et.name, 12, name, NULL) ) continue; if (strcmp("ntoskrnl.exe", (const char *)name)) { continue; } } else { continue; } uint32_t c; for (c=0; c < pe_header->number_of_sections; c++) { struct section_header section; addr_t section_addr = page_paddr + dos_header->offset_to_pe + sizeof(struct pe_header) + pe_header->size_of_optional_header + c*sizeof(struct section_header); // Read the section header from memory if ( VMI_FAILURE == vmi_read_pa(vmi, section_addr, sizeof(struct section_header), (uint8_t *)§ion, NULL) ) continue; // .data check if (memcmp(section.short_name, "\x2E\x64\x61\x74\x61", 5) != 0) { continue; } uint8_t *haystack = alloca(section.size_of_raw_data); if ( VMI_FAILURE == vmi_read_pa(vmi, page_paddr + section.virtual_address, section.size_of_raw_data, haystack, NULL) ) continue; int match_offset = boyer_moore2(bm, haystack, section.size_of_raw_data); if (-1 != match_offset) { // We found the structure, but let's verify it. // The kernel is always mapped into VA at the same offset // it is found on physical memory + the kernel boundary. // Read "KernBase" from the haystack uint64_t *kernbase = (uint64_t *)&haystack[(unsigned int) match_offset + sizeof(uint64_t)]; int zeroes = __builtin_clzll(page_paddr); if ((*kernbase) << zeroes == page_paddr << zeroes) { *kernel_pa = page_paddr; *kernel_va = *kernbase; *kdbg_pa = page_paddr + section.virtual_address + (unsigned int) match_offset - find_ofs; ret = VMI_SUCCESS; dbprint(VMI_DEBUG_MISC, "--Found KdDebuggerDataBlock at PA %.16"PRIx64"\n", *kdbg_pa); goto done; } else { dbprint(VMI_DEBUG_MISC, "--WARNING: KernBase in KdDebuggerDataBlock at PA %.16"PRIx64" doesn't point back to this page.\n", page_paddr + section.virtual_address + (unsigned int) match_offset - find_ofs); } } break; } } if (step<0) { step = VMI_PS_4KB; goto scan; } done: boyer_moore_fini(bm); return ret; }
status_t peparse_get_export_table( vmi_instance_t vmi, const access_context_t *ctx, struct export_table *et, addr_t *export_table_rva, size_t *export_table_size) { // Note: this function assumes a "normal" PE where all the headers are in // the first page of the PE and the field DosHeader.OffsetToPE points to // an address in the first page. access_context_t _ctx = *ctx; addr_t export_header_rva = 0; size_t export_header_size = 0; #define MAX_HEADER_BYTES 1024 // keep under 1 page uint8_t image[MAX_HEADER_BYTES]; if (VMI_FAILURE == peparse_get_image(vmi, ctx, MAX_HEADER_BYTES, image)) { return VMI_FAILURE; } void *optional_header = NULL; uint16_t magic = 0; peparse_assign_headers(image, NULL, NULL, &magic, &optional_header, NULL, NULL); export_header_rva = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_EXPORT, &magic, optional_header, NULL, NULL); export_header_size = peparse_get_idd_size(IMAGE_DIRECTORY_ENTRY_EXPORT, &magic, optional_header, NULL, NULL); if (export_table_rva) { *export_table_rva=export_header_rva; } if (export_table_size) { *export_table_size=export_header_size; } dbprint(VMI_DEBUG_PEPARSE, "--PEParse: DLL base 0x%.16"PRIx64". Export header [RVA] 0x%.16"PRIx64". Size %" PRIu64 ".\n", ctx->addr, export_header_rva, export_header_size); _ctx.addr = ctx->addr + export_header_rva; if ( VMI_FAILURE == vmi_read(vmi, &_ctx, sizeof(struct export_table), et, NULL) ) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: failed to map export header\n"); /* * Sometimes Windows maps the export table on page-boundaries, * such that the first export_flags field (which is reserved) is. * not actually accessible (the page is not mapped). See Issue #260. */ if (!((_ctx.addr+4) & 0xfff)) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: export table is mapped on page boundary\n"); _ctx.addr += 4; if ( VMI_FAILURE == vmi_read(vmi, &_ctx, sizeof(struct export_table)-4, (void*)((char*)et+4), NULL) ) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: still failed to map export header\n"); return VMI_FAILURE; } // Manually set the reserved field to zero in this case et->export_flags = 0; } else { return VMI_FAILURE; } } /* sanity check */ if (et->export_flags || !et->name) { dbprint(VMI_DEBUG_PEPARSE, "--PEParse: bad export directory table\n"); return VMI_FAILURE; } return VMI_SUCCESS; }
void print_guid(vmi_instance_t vmi, addr_t kernel_base_p, uint8_t* pe) { uint32_t size_of_image; bool debug_directory_valid = 0; struct pe_header *pe_header = NULL; uint16_t optional_header_type = 0; struct optional_header_pe32 *oh32 = NULL; struct optional_header_pe32plus *oh32plus = NULL; peparse_assign_headers(pe, NULL, &pe_header, &optional_header_type, NULL, &oh32, &oh32plus); addr_t debug_offset = peparse_get_idd_rva(IMAGE_DIRECTORY_ENTRY_DEBUG, NULL, NULL, oh32, oh32plus); switch(optional_header_type) { case IMAGE_PE32_MAGIC: size_of_image=oh32->size_of_image; break; case IMAGE_PE32_PLUS_MAGIC: size_of_image=oh32plus->size_of_image; break; default: return; } struct image_debug_directory debug_directory = { 0 }; struct cv_info_pdb70 *pdb_header = g_malloc0(sizeof(struct cv_info_pdb70)+PDB_FILENAME_LENGTH+1); if ( VMI_FAILURE == vmi_read_pa(vmi, kernel_base_p + debug_offset, sizeof(struct image_debug_directory), (uint8_t *)&debug_directory, NULL) ) return; printf("\tPE GUID: %.8x%.5x\n",pe_header->time_date_stamp,size_of_image); switch(debug_directory.type) { case IMAGE_DEBUG_TYPE_CODEVIEW: // OK debug_directory_valid = 1; break; case IMAGE_DEBUG_TYPE_MISC: printf("This operating system uses .dbg instead of .pdb\n"); return; default: //printf("The debug directory header is not in CodeView format, will do a brute-force search!\n"); break; } if (debug_directory_valid) { if(debug_directory.size_of_data > VMI_PS_4KB/4) { // Normal size of the debug directory on Windows 7 for example is 0x25 bytes. printf("The size of the debug directory is huge, something might be wrong.\n"); goto done; } if ( VMI_FAILURE == vmi_read_pa(vmi, kernel_base_p + debug_directory.address_of_raw_data, sizeof(struct cv_info_pdb70)+PDB_FILENAME_LENGTH, pdb_header, NULL) ) goto done; // The PDB header has to be PDB 7.0 // http://www.debuginfo.com/articles/debuginfomatch.html if(RSDS != pdb_header->cv_signature) { printf("The CodeView debug information has to be in PDB 7.0 for the kernel!\n"); goto done; } } else { if(!kernel_debug_search(vmi, pdb_header)) goto done; } printf("\tPDB GUID: "); printf("%.8x", pdb_header->signature.data1); printf("%.4x", pdb_header->signature.data2); printf("%.4x", pdb_header->signature.data3); int c; for(c=0;c<8;c++) printf("%.2x", pdb_header->signature.data4[c]); printf("%.1x", pdb_header->age & 0xf); printf("\n"); printf("\tKernel filename: %s\n", (char*)pdb_header->pdb_file_name); if(!strcmp("ntoskrnl.pdb", (char*)pdb_header->pdb_file_name)) { printf("\tSingle-processor without PAE\n"); } else if(!strcmp("ntkrnlmp.pdb", (char*)pdb_header->pdb_file_name)) { printf("\tMulti-processor without PAE\n"); } else if(!strcmp("ntkrnlpa.pdb", (char*)pdb_header->pdb_file_name)) { printf("\tSingle-processor with PAE (version 5.0 and higher)\n"); } else if(!strcmp("ntkrpamp.pdb", (char*)pdb_header->pdb_file_name)) { printf("\tMulti-processor with PAE (version 5.0 and higher)\n"); } done: free(pdb_header); }