dr_emit_flags_t funcwrap_bb_instrumentation(void *drcontext, void *tag, instrlist_t *bb, instr_t *instr, bool for_trace, bool translating, void *user_data) { instr_t * first = instrlist_first(bb); app_pc pc = instr_get_app_pc(first); module_data_t * module_data; per_thread_t * data = drmgr_get_tls_field(dr_get_current_drcontext(), tls_index); module_t * md; app_pc offset; if (instr != first || data->nesting != 0){ return DR_EMIT_DEFAULT; } module_data = dr_lookup_module(pc); data = drmgr_get_tls_field(drcontext, tls_index); if (module_data != NULL){ md = md_lookup_module(head, module_data->full_path); if (md != NULL){ offset = pc - module_data->start; for (int i = 1; i <= md->bbs[0].start_addr; i++){ if (offset == md->bbs[i].start_addr){ DEBUG_PRINT("bb instrumenting function\n"); data->filter_func = true; dr_insert_clean_call(drcontext, bb, instr, clean_call, false, 1, OPND_CREATE_INTPTR(instr_get_app_pc(instr))); wrap_thread_id = dr_get_thread_id(drcontext); DEBUG_PRINT("done bb instrumenting function\n"); } } } } /*if (data->filter_func){ instrlist_disassemble(drcontext, instr_get_app_pc(instrlist_first(bb)), bb, logfile); }*/ dr_free_module_data(module_data); return DR_EMIT_DEFAULT; }
static void event_thread_exit(void *drcontext) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_idx); log_stream_close(data->logf); dr_thread_free(drcontext, data, sizeof(per_thread_t)); }
static void event_thread_exit(void *drcontext) { per_thread_t *data = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx); ASSERT(data != NULL, "data must not be NULL"); dr_thread_free(drcontext, data, sizeof(*data)); }
/* called when the mode is ins trace to populate the memory operands */ static void clean_call_populate_mem(reg_t regvalue, uint pos, uint dest_or_src){ char string_ins[MAX_STRING_LENGTH]; per_thread_t * data; void * drcontext = dr_get_current_drcontext(); instr_trace_t * trace; data = drmgr_get_tls_field(drcontext, tls_index); trace = (instr_trace_t *)data->buf_ptr; trace->mem_opnds[trace->num_mem] = regvalue; trace->pos[trace->num_mem] = pos; /* assuming the thread init gives out stack bounds properly we can select the memory type as follows */ if (regvalue <= data->stack_base && regvalue >= data->deallocation_stack){ trace->mem_type[trace->num_mem] = MEM_STACK_TYPE; } else{ trace->mem_type[trace->num_mem] = MEM_HEAP_TYPE; } trace->dst_or_src[trace->num_mem++] = dest_or_src; }
static dr_emit_flags_t bb_event(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating) { fuzz_pass_context_t *fp; pass_target_t *live; if (for_trace || translating) return DR_EMIT_DEFAULT; /* It is ok to be racy, so hold no locks for updating. */ /* update global num_bbs */ num_total_bbs++; /* update num_bbs for each live target */ fp = (fuzz_pass_context_t *) drmgr_get_tls_field(drcontext, tls_idx_fuzzer); live = fp->live_targets; if (live != NULL) { /* XXX: the function entry basic block is not counted because the live target * is only added on its first execution after bb_event. */ live->target->num_bbs++; DRFUZZ_LOG(3, "basic block "UINT64_FORMAT_STRING" @"PFX" during fuzzing "PFX"\n", live->target->num_bbs, tag, live->target->func_pc); } return DR_EMIT_DEFAULT; }
static void check_tls_from_cache(void *tls_val) { CHECK(tls_val == drmgr_get_tls_field(dr_get_current_drcontext(), tls_idx), "tls read from cache incorrect"); checked_tls_from_cache = true; }
static void memtrace(void *drcontext) { per_thread_t *data; mem_ref_t *mem_ref, *buf_ptr; data = drmgr_get_tls_field(drcontext, tls_idx); buf_ptr = BUF_PTR(data->seg_base); /* Example of dumpped file content: * 0x00007f59c2d002d3: 5, call * 0x00007ffeacab0ec8: 8, w */ /* We use libc's fprintf as it is buffered and much faster than dr_fprintf * for repeated printing that dominates performance, as the printing does here. */ for (mem_ref = (mem_ref_t *)data->buf_base; mem_ref < buf_ptr; mem_ref++) { /* We use PIFX to avoid leading zeroes and shrink the resulting file. */ fprintf(data->logf, "" PIFX ": %2d, %s\n", (ptr_uint_t)mem_ref->addr, mem_ref->size, (mem_ref->type > REF_TYPE_WRITE) ? decode_opcode_name(mem_ref->type) /* opcode for instr */ : (mem_ref->type == REF_TYPE_WRITE ? "w" : "r")); data->num_refs++; } BUF_PTR(data->seg_base) = data->buf_base; }
void instrace_thread_exit(void *drcontext) { per_thread_t *data; int i; if (client_arg->instrace_mode == INS_TRACE){ ins_trace(drcontext); } data = drmgr_get_tls_field(drcontext, tls_index); dr_mutex_lock(mutex); num_refs += data->num_refs; dr_mutex_unlock(mutex); dr_close_file(data->outfile); if (log_mode){ dr_close_file(data->logfile); } dr_thread_free(drcontext, data->buf_base, INSTR_BUF_SIZE); dr_thread_free(drcontext, data->output_array, OUTPUT_BUF_SIZE); DEBUG_PRINT("%s - thread id : %d, cloned instructions freeing now - %d\n",ins_pass_name, dr_get_thread_id(drcontext),data->static_ptr); for(i=0 ; i<data->static_ptr; i++){ instr_destroy(dr_get_current_drcontext(),data->static_array[i]); } dr_thread_free(drcontext, data->static_array, sizeof(instr_t *)*client_arg->static_info_size); dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
static bool event_pre_syscall(void *drcontext, int sysnum) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_index); //dr_fprintf(STDERR, "pre sysnum %d\n", sysnum); switch (sysnum) { case BRK_SYSCALL : data->param[0] = dr_syscall_get_param(drcontext, 0); // dr_fprintf(STDERR, " brk %u\n", nm); break; case MMAP_SYSCALL : data->param[1] = dr_syscall_get_param(drcontext, 1); // dr_fprintf(STDERR, " mmap %u\n", nm); break; case MUNMAP_SYSCALL : data->param[0] = dr_syscall_get_param(drcontext, 0); data->param[1] = dr_syscall_get_param(drcontext, 1); // dr_fprintf(STDERR, " mmap %u\n", nm); break; } return true; }
static void event_thread_exit(void *drcontext) { CHECK(drmgr_get_tls_field(drcontext, tls_idx) == (void *)(ptr_int_t)dr_get_thread_id(drcontext), "tls not preserved"); }
static void instrace(void *drcontext) { per_thread_t *data; int num_refs; ins_ref_t *ins_ref; #ifdef OUTPUT_TEXT int i; #endif data = drmgr_get_tls_field(drcontext, tls_index); ins_ref = (ins_ref_t *)data->buf_base; num_refs = (int)((ins_ref_t *)data->buf_ptr - ins_ref); #ifdef OUTPUT_TEXT /* We use libc's fprintf as it is buffered and much faster than dr_fprintf * for repeated printing that dominates performance, as the printing does here. */ for (i = 0; i < num_refs; i++) { /* We use PIFX to avoid leading zeroes and shrink the resulting file. */ fprintf(data->logf, PIFX",%s\n", (ptr_uint_t)ins_ref->pc, decode_opcode_name(ins_ref->opcode)); ++ins_ref; } #else dr_write_file(data->log, data->buf_base, (size_t)(data->buf_ptr - data->buf_base)); #endif memset(data->buf_base, 0, MEM_BUF_SIZE); data->num_refs += num_refs; data->buf_ptr = data->buf_base; }
<client_name>_thread_exit(void *drcontext){ per_thread_t * data; data = drmgr_get_tls_field(drcontext, tls_index); dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
static void post_pwrite64(void *drcontext) { int fd; struct per_thread_journal_state *jstate; jstate = (struct per_thread_journal_state *) drmgr_get_tls_field(drcontext, tls_idx); if (jstate->state != THREAD_STATE_WRITING_JFILE) return; fd = jstate->using_fd; DR_ASSERT(fd == jfile_fds[0] || fd == jfile_fds[1]); switch (sid) { case SID_DEATH_AFTER_STORE: if (jstate->pwrite_state != PWRITE_WRITING_STORE) return; fi_printf("scenario is death after writing normal store," " exiting\n"); exit(1); break; default: die("invalid SID: %d\n", sid); break; } }
static void pre_pwrite(void *drcontext) { int fd; struct per_thread_journal_state *jstate; struct journal_descriptor *jd; fd = (int)dr_syscall_get_param(drcontext, 0); if (fd != jfile_fds[0] && fd != jfile_fds[1]) return; jstate = (struct per_thread_journal_state *) drmgr_get_tls_field(drcontext, tls_idx); fi_printf("writing journal\n"); jstate->using_fd = fd; jstate->state = THREAD_STATE_WRITING_JFILE; jd = (struct journal_descriptor *)dr_syscall_get_param(drcontext, 1); DR_ASSERT(jd->magic == JOURNAL_DESC_MAGIC); if (jd->flag == JF_STORE) jstate->pwrite_state = PWRITE_WRITING_STORE; else if (jd->flag == JF_REMOVE_OBJ) fi_printf("FIXME: testing object removal is not supported yet"); else die("unknown journal flag: %d\n", jd->flag); }
/* this is only called when the instrace mode is operand trace (this happens at the instrumentation time) */ static void operand_trace(instr_t * instr, void * drcontext){ int i; char stringop[MAX_STRING_LENGTH]; int pc = 0; per_thread_t * data = drmgr_get_tls_field(drcontext, tls_index); module_data_t * module_data = dr_lookup_module(instr_get_app_pc(instr)); if (module_data != NULL){ pc = instr_get_app_pc(instr) - module_data->start; } instr_disassemble_to_buffer(drcontext, instr, stringop, MAX_STRING_LENGTH); if (client_arg->instrace_mode == OPERAND_TRACE){ dr_fprintf(data->outfile, "%s\n", stringop); for (i = 0; i < instr_num_dsts(instr); i++){ opnd_disassemble_to_buffer(drcontext, instr_get_dst(instr, i), stringop, MAX_STRING_LENGTH); if ((instr_get_opcode(instr) == OP_lea) && opnd_is_base_disp(instr_get_dst(instr,i))){ dr_fprintf(data->outfile, "dst-\n"); print_base_disp_for_lea(data->outfile, instr_get_dst(instr, i)); } else{ dr_fprintf(data->outfile, "dst-%d-%s\n", i, stringop); } } for (i = 0; i < instr_num_srcs(instr); i++){ opnd_disassemble_to_buffer(drcontext, instr_get_src(instr, i), stringop, MAX_STRING_LENGTH); if ((instr_get_opcode(instr) == OP_lea) && opnd_is_base_disp(instr_get_src(instr, i))){ dr_fprintf(data->outfile, "src-\n"); print_base_disp_for_lea(data->outfile, instr_get_src(instr, i)); } else{ dr_fprintf(data->outfile, "src-%d-%s\n", i, stringop); } } if (module_data != NULL){ dr_fprintf(data->outfile, "app_pc-%d\n", pc); } } else if (client_arg->instrace_mode == INS_DISASM_TRACE){ if (module_data != NULL){ if (md_get_module_position(instrace_head, module_data->full_path) == -1){ md_add_module(instrace_head, module_data->full_path, MAX_BBS_PER_MODULE); } dr_fprintf(data->outfile, "%d,%d,%s\n", md_get_module_position(instrace_head, module_data->full_path), pc, stringop); } else{ dr_fprintf(data->outfile, "%d,%d,%s\n",0, 0, stringop); } } dr_free_module_data(module_data); }
static void thread_exit_event(void *drcontext) { struct per_thread_journal_state *jstate; jstate = (struct per_thread_journal_state *) drmgr_get_tls_field(drcontext, tls_idx); xfree(jstate); }
DR_EXPORT void * drfuzz_get_fuzzcxt(void) { /* XXX i#1734: might prefer to return a status code, because this may fail, * e.g. during startup the client may call this before any thread init events, * in which case the fuzzcxt will not have been initialized into our TLS slot. */ return drmgr_get_tls_field(dr_get_current_drcontext(), tls_idx_fuzzer); }
bool should_filter_func(){ per_thread_t * data = drmgr_get_tls_field(dr_get_current_drcontext(), tls_index); if (!file_registered){ return true; } else{ return data->filter_func; } }
static void check_tls_write_from_cache(void) { void *drcontext = dr_get_current_drcontext(); CHECK(drmgr_get_tls_field(drcontext, tls_idx) == (void *) MAGIC_NUMBER_FROM_CACHE, "cls write from cache incorrect"); /* now restore */ drmgr_set_tls_field(drcontext, tls_idx, (void *)(ptr_int_t)dr_get_thread_id(drcontext)); checked_tls_write_from_cache = true; }
static void wrap_unwindtest_pre(void *wrapcxt, OUT void **user_data) { if (drwrap_get_func(wrapcxt) != addr_longdone) { void *drcontext = dr_get_current_drcontext(); ptr_uint_t val = (ptr_uint_t) drmgr_get_tls_field(drcontext, tls_idx); dr_fprintf(STDERR, " <pre-long%d>\n", val); /* increment per level of regular calls on way up */ val++; drmgr_set_tls_field(drcontext, tls_idx, (void *)val); } }
void funcwrap_thread_exit(void *drcontext){ per_thread_t * data; data = drmgr_get_tls_field(drcontext, tls_index); if (log_mode){ dr_close_file(data->logfile); } dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
/* We collect the basic block information including offset from module base, * size, and num of instructions, and add it into a basic block table without * instrumentation. */ static dr_emit_flags_t event_basic_block_analysis(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating, OUT void **user_data) { per_thread_t *data; instr_t *instr; app_pc tag_pc, start_pc, end_pc; /* do nothing for translation */ if (translating) return DR_EMIT_DEFAULT; data = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx); /* Collect the number of instructions and the basic block size, * assuming the basic block does not have any elision on control * transfer instructions, which is true for default options passed * to DR but not for -opt_speed. */ /* We separate the tag from the instr pc ranges to handle displaced code * such as for the vsyscall hook. */ tag_pc = dr_fragment_app_pc(tag); start_pc = instr_get_app_pc(instrlist_first_app(bb)); end_pc = start_pc; /* for finding the size */ for (instr = instrlist_first_app(bb); instr != NULL; instr = instr_get_next_app(instr)) { app_pc pc = instr_get_app_pc(instr); int len = instr_length(drcontext, instr); /* -opt_speed (elision) is not supported */ /* For rep str expansion pc may be one back from start pc but equal to the tag. */ ASSERT(pc != NULL && (pc >= start_pc || pc == tag_pc), "-opt_speed is not supported"); if (pc + len > end_pc) end_pc = pc + len; } /* We allow duplicated basic blocks for the following reasons: * 1. Avoids handling issues like code cache consistency, e.g., * module load/unload, self-modifying code, etc. * 2. Avoids the overhead on duplication check. * 3. Stores more information on code cache events, e.g., trace building, * repeated bb building, etc. * 4. The duplication can be easily handled in a post-processing step, * which is required anyway. */ bb_table_entry_add(drcontext, data, tag_pc, (uint)(end_pc - start_pc)); if (go_native) return DR_EMIT_GO_NATIVE; else return DR_EMIT_DEFAULT; }
static void event_fork(void *drcontext) { if (!drcov_per_thread) { log_file_create(NULL, global_data); } else { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_idx); if (data != NULL) { thread_data_destroy(drcontext, data); } event_thread_init(drcontext); } }
static void event_thread_exit(void *drcontext) { per_thread_t *data; memtrace(drcontext); /* dump any remaining buffer entries */ data = drmgr_get_tls_field(drcontext, tls_idx); dr_mutex_lock(mutex); num_refs += data->num_refs; dr_mutex_unlock(mutex); log_stream_close(data->logf); /* closes fd too */ dr_raw_mem_free(data->buf_base, MEM_BUF_SIZE); dr_thread_free(drcontext, data, sizeof(per_thread_t)); }
static void post_func_cb(void * wrapcxt, void ** user_data){ DEBUG_PRINT("funcwrap - post_func_cb\n"); per_thread_t * data = drmgr_get_tls_field(dr_get_current_drcontext(), tls_index); data->nesting--; //dr_unlink_flush_region(0, ~((ptr_uint_t)0)); DR_ASSERT(data->nesting >= 0); if (data->nesting == 0){ data->filter_func = false; } DEBUG_PRINT("funcwrap - post_func_cb done \n"); }
static void event_post_syscall(void *drcontext, int sysnum) { unsigned long ret; per_thread_t *data = drmgr_get_tls_field(drcontext, tls_index); // dr_fprintf(STDERR, "post sysnum %d\n", sysnum); switch (sysnum) { case BRK_SYSCALL : ret = dr_syscall_get_result(drcontext); if (!data->param[0]) { global_range.upper = ret; global_range.upper_addr = (void *)ret; heap_range.lower = ret; heap_range.lower_addr = (void *)ret; } else { heap_range.upper = ret; heap_range.upper_addr = (void *)ret; markAlloc(heap_range.lower, heap_range.upper - heap_range.lower); } // dr_printf("brk %u ret %x\n", data->param[0], ret); // dr_fprintf(STDERR, " ret %x\n", ret); break; case MMAP_SYSCALL : ret = dr_syscall_get_result(drcontext); // dr_printf("mmap %u %x\n", data->param[1], ret); // dr_fprintf(STDERR, " ret %x\n", ret); markAlloc(ret, data->param[1]); break; case MUNMAP_SYSCALL : // dr_printf("unmap %x %u\n", data->param[0], data->param[1]); ret = dr_syscall_get_result(drcontext); // dr_fprintf(STDERR, "ret %u\n", ret); unmarkAlloc(data->param[0], data->param[1]); break; } }
static dr_emit_flags_t event_app_analysis(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating, void **user_data) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_idx); *user_data = (void *)&data->reg_addr; /* If we have an outstanding write, that means we did not correctly handle a case * where there was a write but no fall-through NOP or terminating instruction in * the previous basic block. */ DR_ASSERT(data->reg_addr == DR_REG_NULL); return DR_EMIT_DEFAULT; }
static instr_t * static_info_instrumentation(void * drcontext, instr_t* instr){ /* for each src and dest add the information accordingly this should return canonicalized static info about an instruction; breaking down any complex instructions if necessary 1) check whether this instruction needs to be instrumented 2) if yes, then get a location and then proceed to instrument -> return the struct 3) if no, return null */ /* main variables */ per_thread_t * data = drmgr_get_tls_field(drcontext,tls_index); /* helper variables */ int opcode; instr_t * ret; /* loop variables */ int i; /* 1) */ opcode = instr_get_opcode(instr); if (client_arg->instrace_mode == OPCODE_TRACE){ opcodes_visited[opcode] = true; return NULL; } if ( (client_arg->instrace_mode == OPERAND_TRACE) || (client_arg->instrace_mode == INS_DISASM_TRACE) ){ operand_trace(instr, drcontext); return NULL; } /* check whether this instr needs instrumentation - check for ones to skip and skip if */ /*switch(opcode){ case OP_jecxz: return NULL; }*/ /* 2) */ data->static_array[data->static_ptr++] = instr_clone(drcontext,instr); DR_ASSERT(data->static_ptr < data->static_array_size); return data->static_array[data->static_ptr - 1]; }
static void event_thread_exit(void *drcontext) { CHECK(drmgr_get_tls_field(drcontext, tls_idx) == (void *)(ptr_int_t)dr_get_thread_id(drcontext), "tls not preserved"); if (!in_event_thread_exit) { dr_mutex_lock(threadlock); if (!in_event_thread_exit) { dr_fprintf(STDERR, "in event_thread_exit\n"); in_event_thread_exit = true; } dr_mutex_unlock(threadlock); } }
drcovlib_status_t drmodtrack_lookup(void *drcontext, app_pc pc, OUT uint *mod_index, OUT app_pc *mod_base) { per_thread_t *data = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx); module_entry_t *entry; int i; /* We assume we never change an entry's data field, even on unload, * and thus it is ok to check its value without a lock. */ /* lookup thread module cache */ for (i = 0; i < NUM_THREAD_MODULE_CACHE; i++) { entry = data->cache[i]; if (pc_is_in_module(entry, pc)) { if (i > 0) { thread_module_cache_adjust(data->cache, entry, i, NUM_THREAD_MODULE_CACHE); } lookup_helper_set_fields(entry, mod_index, mod_base); return DRCOVLIB_SUCCESS; } } /* lookup global module cache */ /* we use a direct map cache, so it is ok to access it without lock */ for (i = 0; i < NUM_GLOBAL_MODULE_CACHE; i++) { entry = module_table.cache[i]; if (pc_is_in_module(entry, pc)) { lookup_helper_set_fields(entry, mod_index, mod_base); return DRCOVLIB_SUCCESS; } } /* lookup module table */ entry = NULL; drvector_lock(&module_table.vector); for (i = module_table.vector.entries - 1; i >= 0; i--) { entry = drvector_get_entry(&module_table.vector, i); ASSERT(entry != NULL, "fail to get module entry"); if (pc_is_in_module(entry, pc)) { global_module_cache_add(module_table.cache, entry); thread_module_cache_add(data->cache, NUM_THREAD_MODULE_CACHE, entry); break; } entry = NULL; } if (entry != NULL) lookup_helper_set_fields(entry, mod_index, mod_base); drvector_unlock(&module_table.vector); return entry == NULL ? DRCOVLIB_ERROR_NOT_FOUND : DRCOVLIB_SUCCESS; }