bool drakvuf_get_module_list(drakvuf_t drakvuf, addr_t eprocess_base, addr_t *module_list) { vmi_instance_t vmi = drakvuf->vmi; addr_t peb=0, ldr=0, modlist=0; access_context_t ctx = {.translate_mechanism = VMI_TM_PROCESS_DTB}; if(!eprocess_base) return false; if(VMI_FAILURE == vmi_read_addr_va(vmi, eprocess_base + drakvuf->offsets[EPROCESS_PDBASE], 0, &ctx.dtb)) return false; if(VMI_FAILURE == vmi_read_addr_va(vmi, eprocess_base + drakvuf->offsets[EPROCESS_PEB], 0, &peb)) return false; ctx.addr = peb + drakvuf->offsets[PEB_LDR]; if(VMI_FAILURE == vmi_read_addr(vmi, &ctx, &ldr)) return false; ctx.addr = ldr + drakvuf->offsets[PEB_LDR_DATA_INLOADORDERMODULELIST]; if(VMI_FAILURE == vmi_read_addr(vmi, &ctx, &modlist)) return false; if(!modlist) return false; *module_list = modlist; return true; }
addr_t eprocess_sym2va (drakvuf_t drakvuf, addr_t eprocess_base, const char *mod_name, const char *symbol) { addr_t peb, ldr, inloadorder, dtb, ret = 0; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, }; if(VMI_FAILURE==vmi_read_addr_va(drakvuf->vmi, eprocess_base + drakvuf->offsets[EPROCESS_PDBASE], 0, &ctx.dtb)) return 0; if(VMI_FAILURE==vmi_read_addr_va(drakvuf->vmi, eprocess_base + drakvuf->offsets[EPROCESS_PEB], 0, &peb)) return 0; ctx.addr = peb + drakvuf->offsets[PEB_LDR]; if(VMI_FAILURE==vmi_read_addr(drakvuf->vmi, &ctx, &ldr)) return 0; ctx.addr = ldr + drakvuf->offsets[PEB_LDR_DATA_INLOADORDERMODULELIST]; if(VMI_FAILURE==vmi_read_addr(drakvuf->vmi, &ctx, &inloadorder)) return 0; PRINT_DEBUG("Found PEB @ 0x%lx. LDR @ 0x%lx. INLOADORDER @ 0x%lx.\n", peb, ldr, inloadorder); modlist_sym2va(drakvuf, inloadorder, &ctx, mod_name, symbol, &ret); return ret; }
// search for the given module+symbol in the given module list static status_t modlist_sym2va(drakvuf_t drakvuf, addr_t list_head, access_context_t *ctx, const char *mod_name, const char *symbol, addr_t *va) { vmi_instance_t vmi = drakvuf->vmi; addr_t next_module = list_head; /* walk the module list */ while (1) { /* follow the next pointer */ addr_t tmp_next = 0; ctx->addr = next_module; if(VMI_FAILURE==vmi_read_addr(vmi, ctx, &tmp_next)) break; /* if we are back at the list head, we are done */ if (list_head == tmp_next || !tmp_next) { break; } ctx->addr = next_module + drakvuf->offsets[LDR_DATA_TABLE_ENTRY_BASEDLLNAME]; unicode_string_t *us = vmi_read_unicode_str(vmi, ctx); unicode_string_t out = { .contents = NULL }; if (us && VMI_SUCCESS == vmi_convert_str_encoding(us, &out, "UTF-8")) { PRINT_DEBUG("Found module %s\n", out.contents); if (!strcasecmp((char*) out.contents, mod_name)) { addr_t dllbase; ctx->addr = next_module + drakvuf->offsets[LDR_DATA_TABLE_ENTRY_DLLBASE]; vmi_read_addr(vmi, ctx, &dllbase); ctx->addr = dllbase; *va = vmi_translate_sym2v(vmi, ctx, (char *) symbol); PRINT_DEBUG("\t%s @ 0x%lx\n", symbol, *va); free(out.contents); vmi_free_unicode_str(us); return VMI_SUCCESS; } free(out.contents); } if (us) vmi_free_unicode_str(us); next_module = tmp_next; } return VMI_FAILURE; }
// search for the given module+symbol in the given module list static status_t modlist_va2sym(drakvuf_t drakvuf, addr_t list_head, addr_t va, access_context_t *ctx, char **out_mod, char **out_sym) { vmi_instance_t vmi = drakvuf->vmi; addr_t next_module = list_head; /* walk the module list */ while (1) { /* follow the next pointer */ addr_t tmp_next = 0; ctx->addr = next_module; if(VMI_FAILURE == vmi_read_addr(vmi, ctx, &tmp_next)) break; /* if we are back at the list head, we are done */ if (list_head == tmp_next || !tmp_next) { break; } ctx->addr = next_module + drakvuf->offsets[LDR_DATA_TABLE_ENTRY_BASEDLLNAME]; unicode_string_t *us = vmi_read_unicode_str(vmi, ctx); unicode_string_t out = { .contents = NULL }; if (us && VMI_SUCCESS == vmi_convert_str_encoding(us, &out, "UTF-8")) { addr_t dllbase; ctx->addr = next_module + drakvuf->offsets[LDR_DATA_TABLE_ENTRY_DLLBASE]; if(VMI_FAILURE == vmi_read_addr(vmi, ctx, &dllbase)) { free(us); break; } ctx->addr = dllbase; const char *sym = vmi_translate_v2sym(vmi, ctx, va); if (sym) { *out_mod = g_strdup((char*)out.contents); *out_sym = (char*) sym; free(out.contents); vmi_free_unicode_str(us); return VMI_SUCCESS; } else { free(out.contents); } } if (us) vmi_free_unicode_str(us); next_module = tmp_next; } return VMI_FAILURE; }
static void extract_file(filedelete* f, drakvuf_t drakvuf, const drakvuf_trap_info_t* info, vmi_instance_t vmi, addr_t file_pa, access_context_t* ctx, const char* filename, uint64_t fo_flags) { addr_t sop = 0; addr_t datasection = 0; addr_t sharedcachemap = 0; addr_t imagesection = 0; ctx->addr = file_pa + f->offsets[FILE_OBJECT_SECTIONOBJECTPOINTER]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &sop) ) return; ctx->addr = sop + f->offsets[SECTIONOBJECTPOINTER_DATASECTIONOBJECT]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &datasection) ) return; if ( datasection ) extract_ca_file(f, drakvuf, info, vmi, datasection, ctx, filename, fo_flags); ctx->addr = sop + f->offsets[SECTIONOBJECTPOINTER_SHAREDCACHEMAP]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &sharedcachemap) ) return; // TODO: extraction from sharedcachemap ctx->addr = sop + f->offsets[SECTIONOBJECTPOINTER_IMAGESECTIONOBJECT]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &imagesection) ) return; if ( imagesection != datasection ) extract_ca_file(f, drakvuf, info, vmi, imagesection, ctx, filename, fo_flags); }
addr_t linux_get_current_process(drakvuf_t drakvuf, uint64_t vcpu_id) { addr_t process = 0; vmi_instance_t vmi = drakvuf->vmi; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = drakvuf->regs[vcpu_id]->cr3, .addr = drakvuf->regs[vcpu_id]->gs_base + drakvuf->offsets[CURRENT_TASK], }; if ( VMI_FAILURE == vmi_read_addr(vmi, &ctx, &process) || process < MIN_KERNEL_BOUNDARY ) { /* * The kernel stack also has a structure called thread_info that points * to a task_struct but it doesn't seem to always agree with current_task. * However, when current_task obviously is wrong (for example during a CPUID) * we can fall back to it to find the correct process. * On most newer kernels the kernel stack size is 16K. This is just a guess * so for older kernels this may not work as well if the VA happens to map * something that resembles a kernel-address. * See https://www.cs.columbia.edu/~smb/classes/s06-4118/l06.pdf for more info. */ ctx.addr = drakvuf->kpcr[vcpu_id] & ~STACK_SIZE_16K; if ( VMI_FAILURE == vmi_read_addr(vmi, &ctx, &process) || process < MIN_KERNEL_BOUNDARY ) { ctx.addr = drakvuf->kpcr[vcpu_id] & ~STACK_SIZE_8K; if ( VMI_FAILURE == vmi_read_addr(vmi, &ctx, &process) || process < MIN_KERNEL_BOUNDARY ) process = 0; } } return process; } /* * Threads are really just processes on Linux. */ addr_t linux_get_current_thread(drakvuf_t drakvuf, uint64_t vcpu_id) { return linux_get_current_process(drakvuf, vcpu_id); } char* linux_get_process_name(drakvuf_t drakvuf, addr_t process_base) { access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_PID, .pid = 0, .addr = process_base + drakvuf->offsets[TASK_STRUCT_COMM] }; return vmi_read_str(drakvuf->vmi, &ctx); } status_t linux_get_process_pid(drakvuf_t drakvuf, addr_t process_base, vmi_pid_t* pid ) { /* * On Linux PID is actually a thread ID, while the TGID (Thread Group-ID) is * what getpid() would return. Because THAT makes sense. */ access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_PID, .pid = 0, .addr = process_base + drakvuf->offsets[TASK_STRUCT_TGID] }; return vmi_read_32(drakvuf->vmi, &ctx, (uint32_t*)pid); } char* linux_get_current_process_name(drakvuf_t drakvuf, uint64_t vcpu_id) { addr_t process_base = linux_get_current_process(drakvuf, vcpu_id); if ( !process_base ) return NULL; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = drakvuf->regs[vcpu_id]->cr3, .addr = process_base + drakvuf->offsets[TASK_STRUCT_COMM] }; return vmi_read_str(drakvuf->vmi, &ctx); } int64_t linux_get_process_userid(drakvuf_t drakvuf, addr_t process_base) { access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_PID, .pid = 0, .addr = process_base + drakvuf->offsets[TASK_STRUCT_CRED] }; addr_t cred; if ( VMI_FAILURE == vmi_read_addr(drakvuf->vmi, &ctx, &cred) ) return -1; uint32_t uid; ctx.addr = cred + drakvuf->offsets[CRED_UID]; if ( VMI_FAILURE == vmi_read_32(drakvuf->vmi, &ctx, &uid) ) return -1; return uid; }; int64_t linux_get_current_process_userid(drakvuf_t drakvuf, uint64_t vcpu_id) { addr_t process_base = linux_get_current_process(drakvuf, vcpu_id); if ( !process_base ) return -1; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = drakvuf->regs[vcpu_id]->cr3, .addr = process_base + drakvuf->offsets[TASK_STRUCT_CRED] }; addr_t cred; if ( VMI_FAILURE == vmi_read_addr(drakvuf->vmi, &ctx, &cred) ) return -1; uint32_t uid; ctx.addr = cred + drakvuf->offsets[CRED_UID]; if ( VMI_FAILURE == vmi_read_32(drakvuf->vmi, &ctx, &uid) ) return -1; return uid; } bool linux_get_current_thread_id( drakvuf_t drakvuf, uint64_t vcpu_id, uint32_t* thread_id ) { /* * On Linux PID is actually the thread ID....... ... ... */ addr_t process_base = linux_get_current_process(drakvuf, vcpu_id); if ( !process_base ) return false; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_DTB, .dtb = drakvuf->regs[vcpu_id]->cr3, .addr = process_base + drakvuf->offsets[TASK_STRUCT_PID] }; uint32_t _thread_id; if ( VMI_FAILURE == vmi_read_32(drakvuf->vmi, &ctx, &_thread_id) ) return false; *thread_id = _thread_id; return true; } status_t linux_get_process_ppid( drakvuf_t drakvuf, addr_t process_base, vmi_pid_t* ppid ) { status_t ret ; addr_t parent_proc_base = 0 ; access_context_t ctx = { .translate_mechanism = VMI_TM_PROCESS_PID, .pid = 0, .addr = process_base + drakvuf->offsets[TASK_STRUCT_REALPARENT] }; ret = vmi_read_addr( drakvuf->vmi, &ctx, &parent_proc_base ); /* If we were unable to get the "proc->real_parent *" get "proc->parent *"... */ /* Assuming a parent_proc_base == 0 is a fail... */ if ( (ret == VMI_FAILURE ) || ! parent_proc_base ) { ctx.addr = process_base + drakvuf->offsets[TASK_STRUCT_PARENT]; ret = vmi_read_addr( drakvuf->vmi, &ctx, &parent_proc_base ); } /* Get pid from parent/real_parent...*/ if ( ( ret == VMI_SUCCESS ) && parent_proc_base ) { ctx.addr = parent_proc_base + drakvuf->offsets[TASK_STRUCT_TGID]; return vmi_read_32( drakvuf->vmi, &ctx, (uint32_t*)ppid ); } return VMI_FAILURE ; } bool linux_get_current_process_data( drakvuf_t drakvuf, uint64_t vcpu_id, proc_data_t* proc_data ) { proc_data->base_addr = linux_get_current_process( drakvuf, vcpu_id ); if ( proc_data->base_addr ) { if ( linux_get_process_pid( drakvuf, proc_data->base_addr, &proc_data->pid ) == VMI_SUCCESS ) { proc_data->name = linux_get_process_name( drakvuf, proc_data->base_addr ); if ( proc_data->name ) { proc_data->userid = linux_get_process_userid( drakvuf, proc_data->base_addr ); linux_get_process_ppid( drakvuf, proc_data->base_addr, &proc_data->ppid ); return true ; } } } return false ; }
static void extract_ca_file(filedelete* f, drakvuf_t drakvuf, const drakvuf_trap_info_t* info, vmi_instance_t vmi, addr_t control_area, access_context_t* ctx, const char* filename, uint64_t fo_flags) { addr_t subsection = control_area + f->control_area_size; addr_t segment = 0; addr_t test = 0; addr_t test2 = 0; size_t filesize = 0; /* Check whether subsection points back to the control area */ ctx->addr = control_area + f->offsets[CONTROL_AREA_SEGMENT]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &segment) ) return; ctx->addr = segment + f->offsets[SEGMENT_CONTROLAREA]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &test) || test != control_area ) return; ctx->addr = segment + f->offsets[SEGMENT_SIZEOFSEGMENT]; if ( VMI_FAILURE == vmi_read_64(vmi, ctx, &test) ) return; ctx->addr = segment + f->offsets[SEGMENT_TOTALNUMBEROFPTES]; if ( VMI_FAILURE == vmi_read_32(vmi, ctx, (uint32_t*)&test2) ) return; if ( test != (test2 * 4096) ) return; const int curr_sequence_number = ++f->sequence_number; char* file = NULL; if ( asprintf(&file, "%s/file.%06d.mm", f->dump_folder, curr_sequence_number) < 0 ) return; FILE* fp = fopen(file, "w"); free(file); if (!fp) return; while (subsection) { /* Check whether subsection points back to the control area */ ctx->addr = subsection + f->offsets[SUBSECTION_CONTROLAREA]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &test) || test != control_area ) break; addr_t base = 0; addr_t start = 0; uint32_t ptes = 0; ctx->addr = subsection + f->offsets[SUBSECTION_SUBSECTIONBASE]; if ( VMI_FAILURE == vmi_read_addr(vmi, ctx, &base) ) break; ctx->addr = subsection + f->offsets[SUBSECTION_PTESINSUBSECTION]; if ( VMI_FAILURE == vmi_read_32(vmi, ctx, &ptes) ) break; ctx->addr = subsection + f->offsets[SUBSECTION_STARTINGSECTOR]; if ( VMI_FAILURE == vmi_read_32(vmi, ctx, (uint32_t*)&start) ) break; /* * The offset into the file is stored implicitely * based on the PTE's location within the Subsection. */ addr_t subsection_offset = start * 0x200; addr_t ptecount; for (ptecount=0; ptecount < ptes; ptecount++) { addr_t pteoffset = base + f->mmpte_size * ptecount; addr_t fileoffset = subsection_offset + ptecount * 0x1000; addr_t pte = 0; ctx->addr = pteoffset; if ( VMI_FAILURE == vmi_read(vmi, ctx, f->mmpte_size, &pte, NULL) ) break; if ( ENTRY_PRESENT(1, pte) ) { uint8_t page[4096]; if ( VMI_FAILURE == vmi_read_pa(vmi, VMI_BIT_MASK(12,48) & pte, 4096, &page, NULL) ) continue; if ( !fseek ( fp, fileoffset, SEEK_SET ) ) { if ( fwrite(page, 4096, 1, fp) ) filesize = MAX(filesize, fileoffset + 4096); } } } ctx->addr = subsection + f->offsets[SUBSECTION_NEXTSUBSECTION]; if ( !vmi_read_addr(vmi, ctx, &subsection) ) break; } fclose(fp); print_extraction_information(f, drakvuf, info, filename, filesize, fo_flags, curr_sequence_number); save_file_metadata(f, info, curr_sequence_number, control_area, filename, filesize, fo_flags); }