static BOOL process_vtbl(ea_t &ea_sect) { VTBL_info_t vftable_info_t; if(get_vtbl_info(ea_sect, vftable_info_t)) { ea_sect = vftable_info_t.ea_end; ea_t ea_assumed; verify_32_t((vftable_info_t.ea_begin - 4), ea_assumed); if(vftable_info_t.methods > 1) { if(has_user_name(getFlags(vftable_info_t.ea_begin))) { vftable_info_t.vtbl_name = get_short_name(vftable_info_t.ea_begin); qstring vtbl_info_str; vtbl_info_str.cat_sprnt(" 0x%x - 0x%x: %s methods count: %d", vftable_info_t.ea_begin, vftable_info_t.ea_end, vftable_info_t.vtbl_name.c_str(), vftable_info_t.methods); vtbl_list.push_back(vtbl_info_str); vtbl_t_list.push_back(vftable_info_t); return(TRUE); } } return(FALSE); } ea_sect += sizeof(UINT); return(FALSE); }
void process_rtti() { ea_t start = getnseg(0)->startEA; while (TRUE) { ea_t rt = find_RTTI(start, inf.maxEA); start = rt + 4; if (rt == BADADDR) break; char* name = get_demangle_name(rt); ea_t rtd = rt - 8; rtti_addr.push_back(rtd); qstring tmp; #ifndef __EA64__ tmp.cat_sprnt(" 0x%x: %s", rtd, name); #else tmp.cat_sprnt(_T(" 0x%I64X: %s"), rtd, name); #endif rtti_list.push_back(tmp); } }
static void get_xrefs_to_vtbl() { ea_t cur_vt_ea = vtbl_t_list[current_line_pos].ea_begin; for (ea_t addr = get_first_dref_to(cur_vt_ea); addr != BADADDR; addr = get_next_dref_to(cur_vt_ea, addr)) { qstring name; get_func_name2(&name, addr); xref_addr.push_back(addr); qstring tmp; tmp.cat_sprnt(" 0x%x: %s", addr, name); xref_list.push_back(tmp); } }
static BOOL process_vtbl(ea_t &ea_sect) { VTBL_info_t vftable_info_t; if (get_vtbl_info(ea_sect, vftable_info_t)) { ea_sect = vftable_info_t.ea_end; ea_t ea_assumed; #ifndef __EA64__ verify_32_t((vftable_info_t.ea_begin - 4), ea_assumed); #else verify_64_t((vftable_info_t.ea_begin - sizeof(UINT64)), ea_assumed); #endif if (vftable_info_t.methods > 0) { /*if(has_user_name(getFlags(vftable_info_t.ea_begin))) { */ vftable_info_t.vtbl_name = f_get_short_name(vftable_info_t.ea_begin); qstring vtbl_info_str; #ifndef __EA64__ vtbl_info_str.cat_sprnt(" 0x%x - 0x%x: %s methods count: ", vftable_info_t.ea_begin, vftable_info_t.ea_end, vftable_info_t.vtbl_name); vtbl_info_str.cat_sprnt(" %u", vftable_info_t.methods); #else vtbl_info_str.cat_sprnt(_T(" 0x%I64X - 0x%I64X: %s methods count: "), vftable_info_t.ea_begin, vftable_info_t.ea_end, vftable_info_t.vtbl_name); vtbl_info_str.cat_sprnt(_T(" %d"), (vftable_info_t.methods)); #endif vtbl_list.push_back(vtbl_info_str); vtbl_t_list.push_back(vftable_info_t); return(TRUE); //} } return(FALSE); } #ifndef __EA64__ ea_sect += sizeof(UINT); #else ea_sect += sizeof(UINT64); #endif return(FALSE); }
static void add_mapping(ea_t from, ea_t to) { if ( from != to ) { deb(IDA_DEBUG_IDP, "add_mapping %a -> %a\n", from, to); portmap_t &p = map.push_back(); p.from = from; p.to = to; } }
static void get_xrefs_to_vtbl() { // the list is repeat while select another vtable xref_list.clear(); xref_addr.clear(); ea_t cur_vt_ea = vtbl_t_list[current_line_pos].ea_begin; for (ea_t addr = get_first_dref_to(cur_vt_ea); addr != BADADDR; addr = get_next_dref_to(cur_vt_ea, addr)) { qstring name; f_get_func_name2(&name, addr); xref_addr.push_back(addr); qstring tmp; #ifndef __EA64__ tmp.cat_sprnt(" 0x%x: %s", addr, name); #else tmp.cat_sprnt(_T(" 0x%I64X: %s"), addr, name); #endif xref_list.push_back(tmp); } }
//queue up a received datagram for eventual handlng via IDA's execute_sync mechanism //call no sdk functions other than execute_sync void disp_request_t::queueObject(json_object *obj) { bool call_exec = false; qmutex_lock(mtx); objects.push_back(obj); call_exec = objects.size() == 1; qmutex_unlock(mtx); if (call_exec) { //only invoke execute_sync if the buffer just added was at the head of the queue //in theory this allows multiple datagrams to get queued for handling //in a single execute_sync callback execute_sync(*this, MFF_WRITE); } }
//--------------------------------------------------------------------------- bool x86seh_ctx_t::get_sehlist() { uint64 fs_sel; ea_t fs_base; uint32 excr_ea; handlers.clear(); if ( !get_reg_val("fs", &fs_sel) || internal_get_sreg_base(tid, int(fs_sel), &fs_base) <= 0 || read_dbg_memory(fs_base, &excr_ea, sizeof(excr_ea)) != sizeof(excr_ea) ) { warning("Failed to build the SEH list for thread %08X", tid); return false; } struct EXC_REG_RECORD { uint32 p_prev; uint32 p_handler; }; EXC_REG_RECORD rec; std::set<uint32> seen; while ( excr_ea != 0xffffffff ) { if ( read_dbg_memory(excr_ea, &rec, sizeof(rec)) != sizeof(rec) ) break; if ( !seen.insert(excr_ea).second ) { msg("Circular SEH record has been detected\n"); break; } handlers.push_back(rec.p_handler); excr_ea = rec.p_prev; } return true; }
void IDAP_run(int arg) { FILE *f, *f2; char *filename = construct_output_filename(".import_allocs.txt"); f = qfopen(filename, "wb"); char *filename2 = construct_output_filename(".import_allocs_wrappers.txt"); f2 = qfopen(filename2, "wb"); //r0 allocators funcMalloc.push_back(TFuncMalloc(" ExAllocatePoolWithQuota", 2)); funcMalloc.push_back(TFuncMalloc(" __imp__ExAllocatePoolWithQuota@8", 2));//ntoskrnl.exe funcMalloc.push_back(TFuncMalloc(" ExAllocatePoolWithQuotaTag", 2)); funcMalloc.push_back(TFuncMalloc(" __imp__ExAllocatePoolWithQuotaTag@12", 2));//ntoskrnl.exe funcMalloc.push_back(TFuncMalloc("ExAllocatePoolWithTag", 2)); funcMalloc.push_back(TFuncMalloc("__imp__ExAllocatePoolWithTag@12", 2));//ntoskrnl.exe funcMalloc.push_back(TFuncMalloc("ExAllocatePoolWithTagPriority", 2)); funcMalloc.push_back(TFuncMalloc("__imp__ExAllocatePoolWithTagPriority@16", 2));//ntoskrnl.exe funcMalloc.push_back(TFuncMalloc("IoAllocateMdl", 2)); funcMalloc.push_back(TFuncMalloc("__imp__IoAllocateMdl@20", 2));//ntoskrnl.exe funcMalloc.push_back(TFuncMalloc("RtlAllocateHeap", 3)); funcMalloc.push_back(TFuncMalloc("__imp__RtlAllocateHeap", 3));//ntoskrnl.exe funcMalloc.push_back(TFuncMalloc("EngAllocMem", 2)); funcMalloc.push_back(TFuncMalloc("__imp__EngAllocMem", 2));//win32k.sys funcMalloc.push_back(TFuncMalloc("__imp__EngAllocMem@12", 2));//win32k.sys //type pointer to size!!! //funcMalloc.push_back(TFuncMalloc("ZwAllocateVirtualMemory", 4)); //funcMalloc.push_back(TFuncMalloc("__imp__ZwAllocateVirtualMemory@24", 4));//ntoskrnl.exe //funcMalloc.push_back(TFuncMalloc("NtAllocateVirtualMemory", 4)); //funcMalloc.push_back(TFuncMalloc("__imp__NtAllocateVirtualMemory@24", 4));//ntoskrnl.exe //funcMalloc.push_back(TFuncMalloc("RtlReAllocateHeap", 4)); //funcMalloc.push_back(TFuncMalloc("HeapAlloc", 3)); //r3 allocators funcMalloc.push_back(TFuncMalloc("GlobalAlloc", 2));//kernel32.dll funcMalloc.push_back(TFuncMalloc("HeapAlloc", 3));//kernel32.dll funcMalloc.push_back(TFuncMalloc("__imp__HeapAlloc@12", 3));//kernel32.dll funcMalloc.push_back(TFuncMalloc("__imp__HeapReAlloc@16", 4));//kernel32.dll funcMalloc.push_back(TFuncMalloc("HeapReAlloc", 4));//kernel32.dll funcMalloc.push_back(TFuncMalloc("__imp__LocalAlloc@8", 2));//kernel32.dll funcMalloc.push_back(TFuncMalloc("LocalAlloc", 2));//kernel32.dll funcMalloc.push_back(TFuncMalloc("__imp__LocalReAlloc@12", 3));//kernel32.dll funcMalloc.push_back(TFuncMalloc("LocalReAlloc", 3));//kernel32.dll funcMalloc.push_back(TFuncMalloc("VirtualAlloc", 2)); funcMalloc.push_back(TFuncMalloc("__imp__VirtualAlloc@16", 2));//kernel32.dll funcMalloc.push_back(TFuncMalloc("__imp__MpHeapAlloc", 3));//msdart.dll export funcMalloc.push_back(TFuncMalloc("__imp__MpHeapReAlloc", 3));//msdart.dll export funcMalloc.push_back(TFuncMalloc("__imp__GdipAlloc@4", 1));//gdiplus.dll export //funcMalloc.push_back(TFuncMalloc("GpMalloc", 1));//gdiplus.dll funcMalloc.push_back(TFuncMalloc("__imp__malloc", 1));//msvcrt.dll funcMalloc.push_back(TFuncMalloc("_malloc", 1));//msvcrt.dll, the same as __imp__malloc funcMalloc.push_back(TFuncMalloc("__imp__realloc", 2));//msvcrt.dll funcMalloc.push_back(TFuncMalloc("_realloc", 2));//msvcrt.dll //funcMalloc.push_back(TFuncMalloc("_alloca", 1)); //funcMalloc.push_back(TFuncMalloc("_malloca", 1)); uint i = 0, j = funcMalloc.size(); msg("standart funcMalloc.size() = %d\n", funcMalloc.size()); for(; i< funcMalloc.size(); i++){ find_alloc_calls_ex(f, funcMalloc[i]); pretty_printing_ex(f, funcMalloc[i]); if(Malloc_calls.size() > 0 ) Malloc_calls.clear(); qfprintf(f,"\n\n"); qflush( f ); } qfclose( f ); j = funcMalloc_wrappers.size(); msg("standart funcMalloc_wrappers.size() = %d\n", j); //TODO: add level property i = 0; while( i < j){ find_alloc_calls_warreps_ex(f2, funcMalloc_wrappers[i]); msg("[%d].funcMalloc.size() = %d\n", i, funcMalloc_wrappers.size()); msg("[%d].Malloc_calls.size() = %d\n", i, Malloc_calls.size()); pretty_printing_ex(f2, funcMalloc_wrappers[i]); if(Malloc_calls.size() > 0 ) Malloc_calls.clear(); j = funcMalloc_wrappers.size(); i++; qflush( f2 ); } qfclose( f2 ); return ; }
void analyze_malloc_xref_ex(char *name, ea_t xref_addr, int push_count) { qstring buffer; int value = 0; int type; op_t memory_size_var; if(is_trampoline(xref_addr)){ func_t *func = get_func(xref_addr); //msg("analyze_malloc_xref: %s %a - trampoline!\n", name, xref_addr); //too easy, how about mov ebp,esp jmp:malloc? if(func){ qstring buff; get_short_name(&buff, func->startEA); //get_long_name(BADADDR, func->startEA, buffer, MAXSTR); TFuncMallocWrapper new_malloc = TFuncMallocWrapper((char *)buff.c_str(), name, push_count, xref_addr, TRAMPOLINE); if(!does_exist(new_malloc)) funcMalloc_wrappers.push_back(new_malloc); } else{ TFuncMallocWrapper new_malloc = TFuncMallocWrapper("new malloc", name, push_count, xref_addr, TRAMPOLINE); if(!does_exist(new_malloc)) funcMalloc_wrappers.push_back(new_malloc); } } ea_t push_malloc_size_addr = find_instruction_N_times_backward(xref_addr, NN_push, push_count); if(push_malloc_size_addr != BADADDR){ memory_size_var = get_first_operand_new(push_malloc_size_addr); //Allocation of const memory size if(memory_size_var.type == o_imm){ //msg("analyze_malloc_xref: %s %a - immediate!\n", name, push_malloc_size_addr); Malloc_calls.push_back(TFuncMalloc_call(xref_addr, CONSTVALUE, memory_size_var.value)); //msg("analyze_malloc_xref: %s Malloc_call_list.size() = %d!\n", name, Malloc_calls.size()); return; } //Allocation of var memory size //Allocation of var by register if(memory_size_var.type == o_reg){ ea_t addr_of_src = find_instruction_that_changes_operand_backward_smart(push_malloc_size_addr, memory_size_var); msg("analyze_malloc_xref: %s %a - var!\n", name, push_malloc_size_addr); if(addr_of_src != BADADDR){ type = assign_type(addr_of_src, &value); Malloc_calls.push_back(TFuncMalloc_call(xref_addr, type, value, addr_of_src)); } else Malloc_calls.push_back(TFuncMalloc_call(xref_addr, UNDEFINED)); msg("analyze_malloc_xref: %s Malloc_call_list.size() = %d!\n", name, Malloc_calls.size()); return; } //Allocation of var by argument - new trampoline //TODO: better algo! if( (memory_size_var.type == o_displ) && (memory_size_var.reg == 5) && (memory_size_var.phrase == 5) && (memory_size_var.value == 0) && ( (int)memory_size_var.addr > 0) ){ func_t *func = get_func(xref_addr); //too easy, how about mov ebp,esp call:malloc? if(func) get_short_name(&buffer, func->startEA); //get_long_name(BADADDR, func->startEA, buffer, MAXSTR); TFuncMallocWrapper new_malloc = TFuncMallocWrapper((char *)buffer.c_str(), name, memory_size_var.addr / sizeof(ea_t) - 1, func->startEA, WRAPPER); if(!does_exist(new_malloc)) funcMalloc_wrappers.push_back(new_malloc); //funcMalloc.push_back(TFuncMalloc("here var name", memory_size_var.addr / sizeof(ea_t),func->startEA, WRAPPER)); else{ TFuncMallocWrapper new_malloc = TFuncMallocWrapper("new malloc", name, memory_size_var.addr / sizeof(ea_t) - 1, xref_addr, WRAPPER);//maybetter name malloc_at_%a? if(!does_exist(new_malloc)) funcMalloc_wrappers.push_back(new_malloc); //funcMalloc.push_back(TFuncMalloc("here name of addr", memory_size_var.addr / sizeof(ea_t), xref_addr, WRAPPER)); } return; msg("analyze_malloc_xref: %s Malloc_call_list.size() = %d!\n", name, Malloc_calls.size()); } else{ Malloc_calls.push_back(TFuncMalloc_call(xref_addr, VARVALUE)); } } else{ Malloc_calls.push_back(TFuncMalloc_call(xref_addr, UNDEFINED)); } msg("analyze_malloc_xref: %s Malloc_call_list.size() = %d!\n", name, Malloc_calls.size()); }