void rdis_check_references (struct _rdis * rdis) { struct _graph_it * git; // for each node for (git = graph_iterator(rdis->graph); git != NULL; git = graph_it_next(git)) { struct _graph_node * node = graph_it_node(git); struct _list_it * lit; // for each instruction for (lit = list_iterator(node->data); lit != NULL; lit = lit->next) { struct _ins * ins = lit->data; struct _list_it * rit; // for each reference for (rit = list_iterator(ins->references); rit != NULL; rit = rit->next) { struct _reference * reference = rit->data; if (reference->type == REFERENCE_CONSTANT) { uint64_t lower = map_fetch_max_key(rdis->memory, reference->address); struct _buffer * buffer = map_fetch(rdis->memory, lower); if (buffer == NULL) continue; uint64_t upper = lower + buffer->size; if ( (reference->address < lower) || (reference->address >= upper)) continue; reference->type = REFERENCE_CONSTANT_ADDRESSABLE; } } } } }
struct _map * rdis_g_references (struct _rdis * rdis) { struct _map * references = map_create(); struct _graph_it * git; // for each node for (git = graph_iterator(rdis->graph); git != NULL; git = graph_it_next(git)) { struct _graph_node * node = graph_it_node(git); struct _list_it * lit; // for each instruction for (lit = list_iterator(node->data); lit != NULL; lit = lit->next) { struct _ins * ins = lit->data; struct _list_it * rit; // for each reference for (rit = list_iterator(ins->references); rit != NULL; rit = rit->next) { struct _reference * reference = rit->data; int delete_reference = 0; if (reference->type == REFERENCE_CONSTANT) { uint64_t lower = map_fetch_max_key(rdis->memory, reference->address); struct _buffer * buffer = map_fetch(rdis->memory, lower); if (buffer == NULL) continue; uint64_t upper = lower + buffer->size; if ( (reference->address < lower) || (reference->address >= upper)) continue; reference = object_copy(reference); reference->type = REFERENCE_CONSTANT_ADDRESSABLE; delete_reference = 1; } struct _list * ref_list = map_fetch(references, reference->address); if (ref_list == NULL) { ref_list = list_create(); map_insert(references, reference->address, ref_list); object_delete(ref_list); ref_list = map_fetch(references, reference->address); } list_append(ref_list, reference); if (delete_reference) object_delete(reference); } } } return references; }
void x8664_functions_r (struct _map * functions, struct _tree * disassembled, uint64_t address, struct _map * memory) { ud_t ud_obj; int continue_disassembling = 1; struct _buffer * buffer = map_fetch_max(memory, address); if (buffer == NULL) return; uint64_t base_address = map_fetch_max_key(memory, address); if (base_address + buffer->size < address) return; uint64_t offset = address - base_address; ud_init (&ud_obj); ud_set_mode (&ud_obj, 64); ud_set_syntax(&ud_obj, UD_SYN_INTEL); ud_set_input_buffer(&ud_obj, &(buffer->bytes[offset]), buffer->size - offset); while (continue_disassembling == 1) { size_t bytes_disassembled = ud_disassemble(&ud_obj); if (bytes_disassembled == 0) { break; } if ( (ud_obj.mnemonic == UD_Icall) && (ud_obj.operand[0].type == UD_OP_JIMM)) { uint64_t target_addr = address + ud_insn_len(&ud_obj) + udis86_sign_extend_lval(&(ud_obj.operand[0])); if (map_fetch(functions, target_addr) == NULL) { struct _function * function = function_create(target_addr); map_insert(functions, target_addr, function); object_delete(function); } } struct _index * index = index_create(address); if (tree_fetch(disassembled, index) != NULL) { object_delete(index); return; } tree_insert(disassembled, index); object_delete(index); // these mnemonics cause us to continue disassembly somewhere else struct ud_operand * operand; switch (ud_obj.mnemonic) { case UD_Ijo : case UD_Ijno : case UD_Ijb : case UD_Ijae : case UD_Ijz : case UD_Ijnz : case UD_Ijbe : case UD_Ija : case UD_Ijs : case UD_Ijns : case UD_Ijp : case UD_Ijnp : case UD_Ijl : case UD_Ijge : case UD_Ijle : case UD_Ijg : case UD_Ijmp : case UD_Iloop : case UD_Icall : operand = &(ud_obj.operand[0]); if (operand->type == UD_OP_JIMM) { x8664_functions_r(functions, disassembled, address + ud_insn_len(&ud_obj) + udis86_sign_extend_lval(operand), memory); } break; default : break; } // these mnemonics cause disassembly to stop switch (ud_obj.mnemonic) { case UD_Iret : case UD_Ihlt : case UD_Ijmp : continue_disassembling = 0; break; default : break; } address += bytes_disassembled; } }
/* * This is the initial phase, used to populate the graph with all reachable * nodes. We will worry about fixing edges from jmp-like mnemonics later. */ void x8664_graph_0 (struct _graph * graph, uint64_t address, struct _map * memory) { ud_t ud_obj; int continue_disassembling = 1; uint64_t last_address = -1; int edge_type = INS_EDGE_NORMAL; struct _buffer * buffer = map_fetch_max(memory, address); if (buffer == NULL) return; uint64_t base_address = map_fetch_max_key(memory, address); if (base_address + buffer->size < address) return; uint64_t offset = address - base_address; ud_init (&ud_obj); ud_set_mode (&ud_obj, 64); ud_set_syntax(&ud_obj, UD_SYN_INTEL); ud_set_input_buffer(&ud_obj, &(buffer->bytes[offset]), buffer->size - offset); while (continue_disassembling == 1) { size_t bytes_disassembled = ud_disassemble(&ud_obj); if (bytes_disassembled == 0) { break; } // even if we have already added this node, make sure we add the edge // from the preceeding node, in case this node was added from a jump // previously. otherwise we won't have an edge from its preceeding // instruction if (graph_fetch_node(graph, address) != NULL) { if (last_address != -1) { // not concerned if this call fails struct _ins_edge * ins_edge = ins_edge_create(edge_type); graph_add_edge(graph, last_address, address, ins_edge); object_delete(ins_edge); } break; } // create graph node for this instruction struct _ins * ins = x8664_ins(address, &ud_obj); struct _list * ins_list = list_create(); list_append(ins_list, ins); graph_add_node(graph, address, ins_list); object_delete(ins_list); object_delete(ins); // add edge from previous instruction to this instruction if (last_address != -1) { struct _ins_edge * ins_edge = ins_edge_create(edge_type); graph_add_edge(graph, last_address, address, ins_edge); object_delete(ins_edge); } // these mnemonics cause us to continue disassembly somewhere else struct ud_operand * operand; switch (ud_obj.mnemonic) { case UD_Ijo : case UD_Ijno : case UD_Ijb : case UD_Ijae : case UD_Ijz : case UD_Ijnz : case UD_Ijbe : case UD_Ija : case UD_Ijs : case UD_Ijns : case UD_Ijp : case UD_Ijnp : case UD_Ijl : case UD_Ijge : case UD_Ijle : case UD_Ijg : case UD_Ijmp : case UD_Iloop : //case UD_Icall : operand = &(ud_obj.operand[0]); if (operand->type != UD_OP_JIMM) break; if (ud_obj.mnemonic == UD_Icall) edge_type = INS_EDGE_NORMAL; else if (ud_obj.mnemonic == UD_Ijmp) edge_type = INS_EDGE_NORMAL; // not important, will terminate else edge_type = INS_EDGE_JCC_FALSE; if (operand->type == UD_OP_JIMM) { x8664_graph_0(graph, address + ud_insn_len(&ud_obj) + udis86_sign_extend_lval(operand), memory); } break; default : edge_type = INS_EDGE_NORMAL; break; } // these mnemonics cause disassembly to stop switch (ud_obj.mnemonic) { case UD_Iret : case UD_Ihlt : case UD_Ijmp : continue_disassembling = 0; break; default : break; } last_address = address; address += bytes_disassembled; } }
struct _ins * x86_disassemble_ins_ (const struct _map * mem_map, const uint64_t address, uint8_t mode) { struct _buffer * buf = map_fetch_max(mem_map, address); uint64_t buf_addr = map_fetch_max_key(mem_map, address); if (buf == NULL) return NULL; size_t offset = address - buf_addr; ud_t ud_obj; ud_init(&ud_obj); ud_set_mode(&ud_obj, mode); ud_set_syntax(&ud_obj, UD_SYN_INTEL); ud_set_input_buffer(&ud_obj, &(buf->bytes[offset]), buf->size - offset); if (ud_disassemble(&ud_obj) == 0) return NULL; struct _ins * ins = ins_create(address, ud_insn_ptr(&ud_obj), ud_insn_len(&ud_obj), ud_insn_asm(&ud_obj), NULL); switch (ud_obj.mnemonic) { case UD_Ijo : case UD_Ijno : case UD_Ijb : case UD_Ijae : case UD_Ijz : case UD_Ijnz : case UD_Ijbe : case UD_Ija : case UD_Ijs : case UD_Ijns : case UD_Ijp : case UD_Ijnp : case UD_Ijl : case UD_Ijge : case UD_Ijle : case UD_Ijg : case UD_Iloop : ins_add_successor(ins, address + ud_insn_len(&ud_obj), INS_SUC_JCC_FALSE); if (ud_obj.operand[0].type == UD_OP_JIMM) { ins_add_successor(ins, address + ud_insn_len(&ud_obj) + x86_sign_extend_lval(&(ud_obj.operand[0])), INS_SUC_JCC_TRUE); } break; case UD_Ijmp : if (ud_obj.operand[0].type == UD_OP_JIMM) { ins_add_successor(ins, address + ud_insn_len(&ud_obj) + x86_sign_extend_lval(&(ud_obj.operand[0])), INS_SUC_JUMP); } break; case UD_Icall : ins_add_successor(ins, address + ud_insn_len(&ud_obj), INS_SUC_NORMAL); if (ud_obj.operand[0].type == UD_OP_JIMM) { ins_add_successor(ins, address + ud_insn_len(&ud_obj) + x86_sign_extend_lval(&(ud_obj.operand[0])), INS_SUC_CALL); } break; case UD_Iret : case UD_Ihlt : break; default : ins_add_successor(ins, address + ud_insn_len(&ud_obj), INS_SUC_NORMAL); } return ins; }
struct _map * elf32_memory_map (struct _elf32 * elf32) { struct _map * map = map_create(); // if there are no program headers, load the entire file into memory at // offset 0 if (elf32->ehdr->e_phnum == 0) { struct _buffer * buffer = buffer_create(elf32->data, elf32->data_size); map_insert(map, 0, buffer); object_delete(buffer); return map; } int phdr_i; for (phdr_i = 0; phdr_i < elf32->ehdr->e_phnum; phdr_i++) { Elf32_Phdr * phdr = elf32_phdr(elf32, phdr_i); uint64_t bottom = phdr->p_vaddr; uint64_t top = phdr->p_vaddr + phdr->p_memsz; if (top - bottom == 0) continue; uint8_t * tmp = malloc(phdr->p_memsz); memset(tmp, 0, phdr->p_memsz); memcpy(tmp, &(elf32->data[phdr->p_offset]), phdr->p_filesz); struct _buffer * buffer; uint64_t key; // do we already have a buffer that this section overlaps? buffer = map_fetch_max(map, phdr->p_vaddr + phdr->p_memsz); key = map_fetch_max_key(map, phdr->p_vaddr + phdr->p_memsz); if ( (buffer != NULL) && ( ((bottom <= key) && (top >= key)) || ((bottom <= key + buffer->size) && (top >= key + buffer->size)) || ((bottom >= key) && (top <= key + buffer->size)))) { // create a temporary buffer to hold this sections contents. // if this section fits inside a previous section, then modify in place if ((bottom >= key) && (top <= key + buffer->size)) { memcpy(&(buffer->bytes[bottom - key]), tmp, phdr->p_memsz); } // if this section comes before a previous section (or contains // previous section) else if (bottom <= key) { uint64_t new_size; new_size = ((key + buffer->size) > top ? (key + buffer->size) : top); new_size -= bottom; uint8_t * tmp2 = malloc(new_size); memcpy(&(tmp2[key - bottom]), buffer->bytes, buffer->size); memcpy(tmp2, tmp, phdr->p_memsz); struct _buffer * new_buffer = buffer_create(tmp2, new_size); map_remove(map, key); map_insert(map, bottom, new_buffer); object_delete(new_buffer); free(tmp2); } // if this section overlaps previous section but starts after // previous section starts else { uint64_t new_size = top - key; uint8_t * tmp2 = malloc(new_size); memcpy(tmp2, buffer->bytes, buffer->size); memcpy(&(tmp2[bottom - key]), tmp, phdr->p_memsz); struct _buffer * new_buffer = buffer_create(tmp2, new_size); map_remove(map, key); map_insert(map, key, new_buffer); object_delete(new_buffer); free(tmp2); } } // we don't have a previous section that this buffer overlaps else { struct _buffer * new_buffer = buffer_create(tmp, top - bottom); map_insert(map, bottom, new_buffer); object_delete(new_buffer); } free(tmp); } return map; }