char *ropit_x86_list_disasm (uint8_t *bytes, int len) { int pos = 0; // current position in buffer int size; // size of instruction x86_insn_t insn; // instruction char line[4096] = {0}; int linelen = 4096; char *listing = NULL; listing = calloc(2048, sizeof(*listing)); x86_init(opt_none, NULL, NULL); while ( pos < len ) { // disassemble address size = x86_disasm(bytes, len, 0, pos, &insn); if ( size ) { // print instruction x86_format_insn(&insn, line, linelen, intel_syntax); // printf("%s\n", line); strcat(listing, line); strcat(listing, "\n"); pos += size; } else { pos++; } x86_oplist_free(&insn); } x86_cleanup(); return listing; }
void disasm_init(Disassembler *disasm, Patcher *patcher) { disasm->patcher = patcher; x86_init(opt_none, NULL, NULL); disasm->num_funcs = 0; disasm->num_with_int80s = 0; }
void perplex_section(pefile_t *pefile, int section_number) { char *data; size_t size; x86_insn_t insn; int pos = 0; int section_start; assert(pefile != NULL); section_start = pefile->pimage_section_headers[section_number].VirtualAddress + pefile->image_nt_headers.OptionalHeader.ImageBase; data = get_section_data(pefile, section_number, &size); if (data == NULL) return; x86_init(opt_none, 0); while (pos < size) { char line[512]; int insn_size = x86_disasm(data, size, 0, pos, &insn); if (insn_size > 0) { x86_format_insn(&insn, line, sizeof line, intel_syntax); //printf("%08X: %s\n", pos + section_start, line); pos += insn_size; } else { //printf("%08X: Invalid Instruction\n", pos + section_start); pos++; } } x86_cleanup(); }
BOOL WINAPI DllMain(HINSTANCE hinstDLL,DWORD fdwReason, LPVOID lpvReserved) { if(fdwReason == DLL_PROCESS_ATTACH && init == 0) { init = 1; x86_init (opt_none, NULL, NULL); hook((char *)(GetProcAddress(LoadLibrary("user32"),"MessageBoxA")),(char *)&newMessageBox,(unsigned long *)&oldMessageBox); return TRUE; } return TRUE; }
// disasm 1 instruction char *ropit_x86_disasm (uint8_t *bytes, int len, int *sz_dis) { int sz_inst, len_disasm; /* sz_inst of instruction */ x86_insn_t insn; /* instruction */ char line[1024]; int sz_line = 1024; char *disasm; if (!bytes || len <= 0 || !sz_dis) { debug_printf (MESSAGE_ERROR, stderr, "error: ropit_x86_disasm(): Bad parameter(s)\n"); return NULL; } len_disasm = 0; x86_init(opt_none, NULL, NULL); sz_inst = x86_disasm (bytes, len, 0, 0, &insn); if (sz_inst > 0) { len_disasm = x86_format_insn (&insn, line, sz_line, intel_syntax); *sz_dis = sz_inst; } x86_oplist_free (&insn); x86_cleanup(); if (len_disasm == 0) return NULL; disasm = calloc (len_disasm, sizeof(*disasm)); if (!disasm) { debug_printf (MESSAGE_ERROR, stderr, "error: ropit_x86_disasm(): Failed disasm alloc\n"); return NULL; } memcpy (disasm, line, len_disasm); return disasm; }
/** * @brief Architecture dependent Initialization * * call other init fns. */ void arch_init() { x86_init(); }
void *decode_instructions( void *buf, void *buf_limit, void *(*putxml_fptr)(void *putxml_data, const char *element_format, void *element_data), void *putxml_data, void *(*printf_fptr)(void *printf_data, const char *format, ...), void *printf_data, const char *config_string) { #define PUTXML(efmt, edata) (*putxml_fptr)(putxml_data, (const char*)(efmt), (void*)(edata)) #define PRINTF(fmt, x) (*printf_fptr)(printf_data, (const char*)(fmt), x) #define PRINTF2(fmt, x, y) (*printf_fptr)(printf_data, (const char*)(fmt), x, y) size_t buf_size = (char*)buf_limit - (char*)buf; unsigned char *uc_buf = (unsigned char*) buf; int have_opnd, which; x86_insn_t insn; x86_op_t *opnd; intptr_t addr; char sbuf[MAX_OP_STRING * 2]; size_t pos, size; if (!putxml_fptr) { putxml_fptr = empty_putxml; } if (!printf_fptr) { intptr_t stdio_fprintf_addr = (intptr_t) &fprintf; printf_fptr = (void * (*)(void *, const char *, ...)) stdio_fprintf_addr; if (!printf_data) printf_data = stdout; } x86_init(0, 0, 0); PUTXML("insns", buf); PUTXML("mach name='%s'/", "i386(base-hsdis)"); PUTXML("format bytes-per-line='%p'/", (intptr_t)FORMAT_BYTES_PER_LINE); for (pos = 0; pos < buf_size; pos += size) { size = x86_disasm(uc_buf, buf_size, (intptr_t) buf, pos, &insn); PUTXML("insn", (char*)buf + pos); if (size == 0) { PRINTF("invalid\t0x%02x", uc_buf[pos]); size = 1; /* to make progress */ } else { PRINTF2("%s%s", insn.prefix_string, insn.mnemonic); have_opnd = 0; for (which = 0; which < 3; which++) { /* dest comes first, then src and/or imm */ switch (which) { case 0: opnd = x86_operand_1st(&insn); break; case 1: opnd = x86_operand_2nd(&insn); break; default: opnd = x86_operand_3rd(&insn); break; } if (!opnd) continue; if (opnd->flags & op_implied) continue; if (!have_opnd++) PRINTF("\t", 0); else PRINTF(", ", 0); addr = addr_from_operand(opnd); if (!(addr && PUTXML("addr/", addr))) { x86_format_operand(opnd, sbuf, sizeof(sbuf), intel_syntax); PRINTF("%s", sbuf); } } } PUTXML("/insn", (char*)buf + pos + size); PRINTF("\n", 0); } PUTXML("/insns", (char*)buf + pos); x86_cleanup(); return (char*)buf + pos; }
int main(int argc, char **argv) { if (argc != 2) { print_usage(); return 1; } if (strcmp(argv[1], "demo") == 0) { test(); return 0; } /* Code below adapted from 'libelf by Example' */ Elf *e; GElf_Phdr phdr; int fd; size_t n, i; if (elf_version(EV_CURRENT) == EV_NONE) { printf("Error with elf library [%s]\n", elf_errmsg(-1)); exit(1); } if ((fd = open(argv[1], O_RDONLY, 0)) < 0) { printf("Error reading file.\n"); exit(1); } if ((e = elf_begin(fd, ELF_C_READ, NULL)) == NULL) { printf("elf_begin() failed [%s]\n", elf_errmsg(-1)); exit(1); } if (elf_kind(e) != ELF_K_ELF) { printf("File is not an ELF object.\n"); exit(1); } if (elf_getphdrnum(e, &n) != 0) { printf("elf_getphdrnum() failed [%s]\n", elf_errmsg(-1)); exit(1); } /* Initialize libdis */ x86_init(opt_none, NULL, NULL); gadget_t gadgets; gadget_init(&gadgets); /* Loop over each program header (segment) */ for (i = 0; i < n; i++) { if (gelf_getphdr(e, i, &phdr) != &phdr) { printf("gelf_getphdr() failed [%s]\n", elf_errmsg(-1)); exit(1); } /* If the segment is loaded into memory AND is executable */ if (phdr.p_type == PT_LOAD && phdr.p_flags & PF_X) { /* Load the segment from the ELF file */ unsigned char *buffer = malloc(phdr.p_filesz); lseek(fd, phdr.p_offset, SEEK_SET); ssize_t count = read(fd, buffer, phdr.p_filesz); if (count != phdr.p_filesz) { printf("read sucks.\n"); // FIXME: EINTR etc. exit(1); } /* Add all gadgets in the segment to our gadget tree */ gadget_add(&gadgets, buffer, phdr.p_filesz, phdr.p_vaddr); free(buffer); } } elf_end(e); close(fd); gadget_print(&gadgets); gadget_destroy(&gadgets); return 0; }
int main(int argc, char *argv[]) { char line[128]; struct stat sbuf; char *input = NULL; char *buf = NULL; FILE *fd = NULL; int ret = 0; int offset = 0; int size = 0; x86_insn_t x86dis; printf("\n[*] simple x86 shellcode disassembler\n"); printf("[*] trivially implemented thanks 2 libdisasm!\n"); printf("[*] written by [email protected]\n"); input = argv[1]; if (!argv[1]) { printf("[*] usage: %s <shellcode as binary.file>\n", argv[0]); ret = 1; goto die; } memset((char *)&sbuf, 0, sizeof(sbuf)); if (stat(input, &sbuf) == -1) { printf("[!] unable to stat %s!\n", input); ret = 1; goto die; } buf = malloc(sbuf.st_size); if (!buf) { printf("[!] failed to allocate %u bytes!\n", sbuf.st_size); ret = 1; goto die; } fd = fopen(input, "r"); if (!fd) { printf("[!] unable to open %s\n", input); ret = 1; goto die; } if (fread(buf, 1, sbuf.st_size, fd) != sbuf.st_size) { printf("[!] failed to read in %u bytes!\n", sbuf.st_size); ret = 1; goto die; } fclose(fd); fd = NULL; printf("\n"); x86_init(opt_none, NULL, NULL); while (offset < sbuf.st_size) { size = x86_disasm(buf, sbuf.st_size, 0, offset, &x86dis); if (!size) { printf("[!] invalid op at %i!\n", offset); break; } memset(line, 0, sizeof(line)); x86_format_insn(&x86dis, line, sizeof(line), intel_syntax); printf("%08X: %s\n", offset, line); offset += size; } x86_cleanup(); die: printf("\n"); if (buf) { free(buf); } if (fd) { fclose(fd); } return ret; }
int ethr_init_common__(ethr_init_data *id) { int res; #if defined(ETHR_X86_RUNTIME_CONF__) x86_init(); #endif if (id) { ethr_thr_prepare_func__ = id->thread_create_prepare_func; ethr_thr_parent_func__ = id->thread_create_parent_func; ethr_thr_child_func__ = id->thread_create_child_func; } ethr_cpu_info__ = erts_cpu_info_create(); if (!ethr_cpu_info__) return ENOMEM; #ifdef _SC_PAGESIZE ethr_pagesize__ = (size_t) sysconf(_SC_PAGESIZE); #elif defined(HAVE_GETPAGESIZE) ethr_pagesize__ = (size_t) getpagesize(); #else ethr_pagesize__ = (size_t) 4*1024; /* Guess 4 KB */ #endif /* User needs at least 4 KB */ ethr_min_stack_size__ = 4*1024; #if SIZEOF_VOID_P == 8 /* Double that on 64-bit archs */ ethr_min_stack_size__ *= 2; #endif /* On some systems as much as about 4 KB is used by the system */ ethr_min_stack_size__ += 4*1024; /* There should be room for signal handlers */ #ifdef SIGSTKSZ ethr_min_stack_size__ += SIGSTKSZ; #else ethr_min_stack_size__ += ethr_pagesize__; #endif /* The system may think that we need more stack */ #if defined(PTHREAD_STACK_MIN) if (ethr_min_stack_size__ < PTHREAD_STACK_MIN) ethr_min_stack_size__ = PTHREAD_STACK_MIN; #elif defined(_SC_THREAD_STACK_MIN) { size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN); if (ethr_min_stack_size__ < thr_min_stk_sz) ethr_min_stack_size__ = thr_min_stk_sz; } #endif /* The guard is at least on some platforms included in the stack size passed when creating threads */ #ifdef ETHR_STACK_GUARD_SIZE ethr_min_stack_size__ += ETHR_STACK_GUARD_SIZE; #endif ethr_min_stack_size__ = ETHR_PAGE_ALIGN(ethr_min_stack_size__); ethr_min_stack_size__ = ETHR_B2KW(ethr_min_stack_size__); ethr_max_stack_size__ = 32*1024*1024; #if SIZEOF_VOID_P == 8 ethr_max_stack_size__ *= 2; #endif ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__); res = ethr_init_atomics(); if (res != 0) return res; res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__)); if (res != 0) return res; xhndl_list = NULL; return 0; }
// find valid instructions offsets before ret int _ropit_x86_find_gadgets (uint8_t *bytes, int len, int64_t *rets, int n_rets) { int sz_inst; /* size of instruction */ x86_insn_t insn; /* instruction */ int idx_ret, sz_ret; int valid_gadget; // back track instruction count int n_backtrack_inst, n_backtrack_bytes; // start for rop search uint8_t *start, *gadget_start; // disassemble int len_disasm; int sz_dst; char disassembled[DISASSEMBLED_SIZE_MAX] = {0}; // cache FILE *fp_cache; struct cache_t *caches; int idx_caches, n_caches; struct gadget_t *gadgets; int idx_gadgets, n_gadgets; // cache queue struct gadget_cache_queue_t *cache_queue; // count int count_gadgets; // check params if (!bytes || len <= 0) { debug_printf (MESSAGE_ERROR, stderr, "error: _ropit_x86_find_gadgets(): Bytes null or len <= 0\n"); return NULL; } // search rets if (!rets || n_rets <= 0) { debug_printf (MESSAGE_ERROR, stderr, "error: _ropit_x86_find_gadgets(): No rets\n"); return NULL; } // init gadget_cache fp_cache = fopen("tmp/gadget_cache", "w"); if (!fp_cache) { debug_printf (MESSAGE_ERROR, stderr, "error: _ropit_x86_find_gadgets(): Failed open (w)\n"); return NULL; } // init cache_queue cache_queue = NULL; if (!gadget_cache_queue_init(&cache_queue)) { debug_printf (MESSAGE_ERROR, stderr, "error: _ropit_x86_find_gadgets(): Cache queue allocation failed\n"); return NULL; } gadget_cache_queue_set_file (cache_queue, fp_cache); // init gadgets n_gadgets = 1024; gadgets = calloc(sizeof(struct gadget_t), n_gadgets); if (!gadgets) { debug_printf (MESSAGE_ERROR, stderr, "error: _ropit_x86_find_gadgets(): Failed allocating caches\n"); return NULL; } for (idx_gadgets = 0; idx_gadgets < n_gadgets; idx_gadgets++) gadget_init(&(gadgets[idx_gadgets]), DISASSEMBLED_SIZE_MAX); // init caches n_caches = 1; caches = calloc(sizeof(struct cache_t), n_caches); if (!caches) { debug_printf (MESSAGE_ERROR, stderr, "error: _ropit_x86_find_gadgets(): Failed allocating caches\n"); return NULL; } for (idx_caches = 0; idx_caches < n_caches; idx_caches++) { cache_init(&(caches[idx_caches]), 1024); } // init disasm x86_init(opt_none, NULL, NULL); idx_caches = 0; idx_gadgets = 0; count_gadgets = 0; for (idx_ret = 0; idx_ret < n_rets; idx_ret++) { start = bytes + rets[idx_ret]; n_backtrack_inst = 0; n_backtrack_bytes = 0; while ( bytes <= start && start <= bytes + len ) { /* disassemble address */ sz_inst = x86_disasm(start, len - (start - bytes), 0, 0, &insn); x86_oplist_free(&insn); if (sz_inst <= 0) { // printf("not found inst\n"); n_backtrack_bytes++; } else { // printf("found inst\n"); n_backtrack_bytes = 0; valid_gadget = 0; // gadget_start = start; if (gadget_start == bytes + rets[idx_ret]) valid_gadget = 1; else { n_backtrack_inst = 0; // check gadget validity while (bytes <= gadget_start && gadget_start <= bytes + rets[idx_ret]) { /* disassemble address */ sz_inst = x86_disasm(gadget_start, gadget_start - bytes, 0, 0, &insn); x86_oplist_free(&insn); if (sz_inst <= 0) break; else { n_backtrack_inst++; gadget_start += sz_inst; if (gadget_start == bytes + rets[idx_ret]) { valid_gadget = 1; break; } } } } if (valid_gadget == 1) { // ++count_gadgets; // get ret size sz_ret = x86_disasm(bytes + rets[idx_ret], 10, 0, 0, &insn); x86_oplist_free(&insn); // fill gadget structure gadgets[idx_gadgets].ret_addr = rets[idx_ret]; gadgets[idx_gadgets].ret_bytes = rets[idx_ret] + bytes; gadgets[idx_gadgets].address = start - bytes; gadgets[idx_gadgets].len_bytes = (rets[idx_ret] - (start - bytes)) + sz_ret; if (gadgets[idx_gadgets].sz_bytes < gadgets[idx_gadgets].len_bytes) { gadgets[idx_gadgets].bytes = realloc (gadgets[idx_gadgets].bytes, gadgets[idx_gadgets].len_bytes); gadgets[idx_gadgets].sz_bytes = gadgets[idx_gadgets].len_bytes; } memcpy(gadgets[idx_gadgets].bytes, start, gadgets[idx_gadgets].len_bytes); if (cache_add (&(caches[idx_caches]), &(gadgets[idx_gadgets])) == -ERR_CACHE_FULL) { gadget_cache_queue_add (cache_queue, &(caches[idx_caches])); gadget_cache_queue_fwrite_worker (cache_queue); cache_reset (&(caches[idx_caches])); } idx_gadgets = (idx_gadgets + 1) % n_gadgets; } } --start; // maximum intel instruction size is 15 // maximum instructions in a gadget is hardcoded to 8 here /* TODO : Get more gadgets with n_backtrack_inst instructions * Effectively, we stop at the first gadget which has * n_backtrack_inst instructions while there might be multiple * possibilities. */ if (n_backtrack_bytes >= 15 || n_backtrack_inst == 8) break; } } x86_cleanup(); // write remaining gadgets gadget_cache_queue_add (cache_queue, &(caches[idx_caches])); gadget_cache_queue_fwrite_worker (cache_queue); cache_reset (&(caches[idx_caches])); // clean up for (idx_caches = 0; idx_caches < n_caches; idx_caches++) free(caches[idx_caches].objects); free(caches); for (idx_gadgets = 0; idx_gadgets < n_gadgets; idx_gadgets++) { // gadget_free(&(gadgets[idx_gadgets])); // free (gadgets[idx_gadgets].repr); free (gadgets[idx_gadgets].bytes); } free(gadgets); gadget_cache_queue_destroy (&cache_queue); fclose (fp_cache); return count_gadgets; }
// Disasm.cpp : Defines the entry point for the DLL application. // #include "stdafx.h" BOOL APIENTRY DllMain( HANDLE hModule, DWORD ul_reason_for_call, LPVOID lpReserved ) { return TRUE; } extern "C" { #include "disasm/libdis.h" static char __init = (char)(x86_init(opt_none, NULL), 1); };
int ethr_init_common__(ethr_init_data *id) { int res; ethr_init_event__(); #if defined(ETHR_X86_RUNTIME_CONF__) x86_init(); #endif if (id) { ethr_thr_prepare_func__ = id->thread_create_prepare_func; ethr_thr_parent_func__ = id->thread_create_parent_func; ethr_thr_child_func__ = id->thread_create_child_func; } ethr_cpu_info__ = erts_cpu_info_create(); if (!ethr_cpu_info__) return ENOMEM; #ifdef _SC_PAGESIZE ethr_pagesize__ = (size_t) sysconf(_SC_PAGESIZE); #elif defined(HAVE_GETPAGESIZE) ethr_pagesize__ = (size_t) getpagesize(); #else ethr_pagesize__ = (size_t) 4*1024; /* Guess 4 KB */ #endif /* User needs at least 4 KB */ ethr_min_stack_size__ = 4*1024; #if SIZEOF_VOID_P == 8 /* Double that on 64-bit archs */ ethr_min_stack_size__ *= 2; #endif /* On some systems as much as about 4 KB is used by the system */ ethr_min_stack_size__ += 4*1024; /* There should be room for signal handlers */ #ifdef SIGSTKSZ ethr_min_stack_size__ += SIGSTKSZ; #else ethr_min_stack_size__ += ethr_pagesize__; #endif /* The system may think that we need more stack */ #if defined(PTHREAD_STACK_MIN) if (ethr_min_stack_size__ < PTHREAD_STACK_MIN) ethr_min_stack_size__ = PTHREAD_STACK_MIN; #elif defined(_SC_THREAD_STACK_MIN) { size_t thr_min_stk_sz = (size_t) sysconf(_SC_THREAD_STACK_MIN); if (ethr_min_stack_size__ < thr_min_stk_sz) ethr_min_stack_size__ = thr_min_stk_sz; } #endif /* The guard is at least on some platforms included in the stack size passed when creating threads */ #ifdef ETHR_STACK_GUARD_SIZE ethr_min_stack_size__ += ETHR_STACK_GUARD_SIZE; #endif ethr_min_stack_size__ = ETHR_PAGE_ALIGN(ethr_min_stack_size__); ethr_min_stack_size__ = ETHR_B2KW(ethr_min_stack_size__); #ifdef __OSE__ /* For supervisor processes, OSE adds a number of bytes to the requested stack. With this * addition, the resulting size must not exceed the largest available stack size. The number * of bytes that will be added is configured in the monolith and can therefore not be * specified here. We simply assume that it is less than 0x1000. The available stack sizes * are configured in the .lmconf file and the largest one is usually 65536 bytes. * Consequently, the requested stack size is limited to 0xF000. */ ethr_max_stack_size__ = 0xF000; #else ethr_max_stack_size__ = 32*1024*1024; #endif #if SIZEOF_VOID_P == 8 ethr_max_stack_size__ *= 2; #endif ethr_max_stack_size__ = ETHR_B2KW(ethr_max_stack_size__); res = ethr_init_atomics(); if (res != 0) return res; res = ethr_mutex_lib_init(erts_get_cpu_configured(ethr_cpu_info__)); if (res != 0) return res; xhndl_list = NULL; return 0; }
void ApiHook_init() { x86_init(opt_none, 0, 0); }