void instrace_thread_exit(void *drcontext) { per_thread_t *data; int i; if (client_arg->instrace_mode == INS_TRACE){ ins_trace(drcontext); } data = drmgr_get_tls_field(drcontext, tls_index); dr_mutex_lock(mutex); num_refs += data->num_refs; dr_mutex_unlock(mutex); dr_close_file(data->outfile); if (log_mode){ dr_close_file(data->logfile); } dr_thread_free(drcontext, data->buf_base, INSTR_BUF_SIZE); dr_thread_free(drcontext, data->output_array, OUTPUT_BUF_SIZE); DEBUG_PRINT("%s - thread id : %d, cloned instructions freeing now - %d\n",ins_pass_name, dr_get_thread_id(drcontext),data->static_ptr); for(i=0 ; i<data->static_ptr; i++){ instr_destroy(dr_get_current_drcontext(),data->static_array[i]); } dr_thread_free(drcontext, data->static_array, sizeof(instr_t *)*client_arg->static_info_size); dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
<client_name>_thread_exit(void *drcontext){ per_thread_t * data; data = drmgr_get_tls_field(drcontext, tls_index); dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
static void umbra_client_thread_exit(void *drcontext, umbra_info_t *umbra_info) { dr_thread_free(drcontext, umbra_info->client_tls_data, sizeof(client_tls_data_t)); return; }
static void event_thread_exit(void *drcontext) { per_thread_t *data = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx); ASSERT(data != NULL, "data must not be NULL"); dr_thread_free(drcontext, data, sizeof(*data)); }
static void event_thread_exit(void *drcontext) { per_thread_t *data = dr_get_tls_field(drcontext); dr_raw_mem_free(data->buf_base, TLS_BUF_SIZE); dr_thread_free(drcontext, data, sizeof(*data)); }
static void event_thread_exit(void *drcontext) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_idx); log_stream_close(data->logf); dr_thread_free(drcontext, data, sizeof(per_thread_t)); }
static void fini_bytes_table(void *drcontext, umbra_info_t *info) { dr_thread_free(drcontext, info->table.bytes_table, MAX_BYTES_TABLE_SIZE); }
/* This event is called separately for each individual instruction in the bb. */ static dr_emit_flags_t event_insert_instrumentation(void *drcontext, void *tag, instrlist_t *bb, instr_t *instr, bool for_trace, bool translating, void *user_data) { per_bb_data_t *per_bb = (per_bb_data_t *)user_data; /* We increment the per-bb counters just once, at the top of the bb. */ if (drmgr_is_first_instr(drcontext, instr)) { /* drx will analyze whether to save the flags for us. */ uint flags = DRX_COUNTER_LOCK; if (per_bb->num_instrs > 0) { drx_insert_counter_update(drcontext, bb, instr, SPILL_SLOT_MAX+1, &stats->num_instrs, per_bb->num_instrs, flags); } if (per_bb->num_flops > 0) { drx_insert_counter_update(drcontext, bb, instr, SPILL_SLOT_MAX+1, &stats->num_flops, per_bb->num_flops, flags); } if (per_bb->num_syscalls > 0) { drx_insert_counter_update(drcontext, bb, instr, SPILL_SLOT_MAX+1, &stats->num_syscalls, per_bb->num_syscalls, flags); } } if (drmgr_is_last_instr(drcontext, instr)) dr_thread_free(drcontext, per_bb, sizeof(*per_bb)); return DR_EMIT_DEFAULT; }
static void event_thread_context_exit(void *drcontext, bool thread_exit) { if (thread_exit) { per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, cls_idx); dr_thread_free(drcontext, data, sizeof(per_thread_t)); } /* else, nothing to do: we leave the struct for re-use on next context */ }
void memtrace_thread_exit(void *drcontext) { per_thread_t *data; memtrace(drcontext); data = drmgr_get_tls_field(drcontext, tls_index); dr_mutex_lock(mutex); num_refs += data->num_refs; dr_mutex_unlock(mutex); if (log_mode){ dr_close_file(data->logfile); } dr_close_file(data->outfile); dr_thread_free(drcontext, data->buf_base, MEM_BUF_SIZE); dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
static void event_thread_exit(void *drcontext) { per_thread_t *data; instrace(drcontext); data = drmgr_get_tls_field(drcontext, tls_index); dr_mutex_lock(mutex); num_refs += data->num_refs; dr_mutex_unlock(mutex); #ifdef OUTPUT_TEXT log_stream_close(data->logf); /* closes fd too */ #else log_file_close(data->log); #endif dr_thread_free(drcontext, data->buf_base, MEM_BUF_SIZE); dr_thread_free(drcontext, data, sizeof(per_thread_t)); }
void funcwrap_thread_exit(void *drcontext){ per_thread_t * data; data = drmgr_get_tls_field(drcontext, tls_index); if (log_mode){ dr_close_file(data->logfile); } dr_thread_free(drcontext, data, sizeof(per_thread_t)); DEBUG_PRINT("%s - exiting thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }
static void fini_code_hash(void *drcontext, umbra_info_t *info) { code_hash_t *code, **hash_table; int i = 0; /* free all code entry */ hash_table = info->table.code_hash_table; for (i = 0; i < info->table.code_hash_size; i++) { code = hash_table[i]; while (code != NULL) { hash_table[i] = code->next; dr_thread_free(drcontext, code, sizeof(code_hash_t)); code = hash_table[i]; } } /* free code hash table */ dr_thread_free(drcontext, info->table.code_hash_table, info->table.code_hash_size * sizeof(code_hash_t *)); }
static void event_thread_exit(void *drcontext) { per_thread_t *data; memtrace(drcontext); /* dump any remaining buffer entries */ data = drmgr_get_tls_field(drcontext, tls_idx); dr_mutex_lock(mutex); num_refs += data->num_refs; dr_mutex_unlock(mutex); log_stream_close(data->logf); /* closes fd too */ dr_raw_mem_free(data->buf_base, MEM_BUF_SIZE); dr_thread_free(drcontext, data, sizeof(per_thread_t)); }
static void event_thread_context_exit(void *drcontext, bool thread_exit) { #ifdef SHOW_RESULTS dr_fprintf(STDERR, "resuming prior thread context id="TIDFMT"\n", dr_get_thread_id(drcontext)); #endif if (thread_exit) { per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, tcls_idx); dr_thread_free(drcontext, data, sizeof(per_thread_t)); } /* else, nothing to do: we leave the struct for re-use on next context */ }
static void fini_ref_cache_table(void *drcontext, umbra_info_t *info) { ref_cache_t *ref_table, *next_table; for(ref_table = info->table.ref_cache_table; info->table.max_num_ref_cache > 0; info->table.max_num_ref_cache -= INIT_REF_CACHE_SIZE) { next_table = (ref_cache_t *) ref_table[INIT_REF_TABLE_SIZE - 1].offset[0]; dr_thread_free(drcontext, ref_table, INIT_REF_CACHE_SIZE * sizeof(ref_cache_t)); ref_table = next_table; } }
static void fini_func_table(void *drcontext, umbra_info_t *info) { func_t *func_table, *next_table; for(func_table = info->table.func_table; info->table.max_num_funcs > 0; info->table.max_num_funcs -= INIT_FUNC_TABLE_SIZE) { next_table = (func_t *) func_table[INIT_FUNC_TABLE_SIZE - 1].pc; dr_thread_free(drcontext, func_table, INIT_FUNC_TABLE_SIZE * sizeof(func_t)); func_table = next_table; } }
static void fini_ref_table(void *drcontext, umbra_info_t *info) { mem_ref_t *ref_table, *next_table; for(ref_table = info->table.ref_table; info->table.max_num_refs > 0; info->table.max_num_refs -= INIT_REF_TABLE_SIZE) { next_table = (mem_ref_t *) ref_table[INIT_REF_TABLE_SIZE - 1].pc; dr_thread_free(drcontext, ref_table, INIT_REF_TABLE_SIZE * sizeof(mem_ref_t)); ref_table = next_table; } }
static void fini_edge_table(void *drcontext, umbra_info_t *info) { link_edge_t *edge_table, *next_table; for(edge_table = info->table.edge_table; info->table.max_num_edges > 0; info->table.max_num_edges -= INIT_EDGE_TABLE_SIZE) { next_table = (link_edge_t *) edge_table[INIT_EDGE_TABLE_SIZE - 1].dst_tag; dr_thread_free(drcontext, edge_table, INIT_EDGE_TABLE_SIZE * sizeof(link_edge_t)); edge_table = next_table; } }
static void thread_data_destroy(void *drcontext, per_thread_t *data) { /* destroy the bb table */ bb_table_destroy(data->bb_table, data); dr_close_file(data->log); /* free thread data */ if (drcontext == NULL) { ASSERT(!drcov_per_thread, "drcov_per_thread should not be set"); dr_global_free(data, sizeof(*data)); } else { ASSERT(drcov_per_thread, "drcov_per_thread is not set"); dr_thread_free(drcontext, data, sizeof(*data)); } }
static void fini_bb_table(void *drcontext, umbra_info_t *info) { basic_block_t *bb_table, *next_table; for (bb_table = info->table.bb_table; info->table.max_num_bbs > 0; info->table.max_num_bbs -= INIT_BB_TABLE_SIZE) { next_table = (basic_block_t *) bb_table[INIT_BB_TABLE_SIZE - 1].tag; dr_thread_free(drcontext, bb_table, INIT_BB_TABLE_SIZE * sizeof(basic_block_t)); bb_table = next_table; } }
static void event_thread_exit(void *drcontext) { per_thread_t *data; data = (per_thread_t *)drmgr_get_tls_field(drcontext, tls_idx); ASSERT(data != NULL, "data must not be NULL"); if (drcov_per_thread) { dump_drcov_data(drcontext, data); thread_data_destroy(drcontext, data); } else { /* the per-thread data is a copy of global data */ dr_thread_free(drcontext, data, sizeof(*data)); } }
static void event_thread_exit(void *drcontext) { per_thread_t *data = (per_thread_t *) dr_get_tls_field(drcontext); char msg[512]; int len; len = dr_snprintf(msg, sizeof(msg)/sizeof(msg[0]), "Thread %d exited - ", dr_get_thread_id(drcontext)); DR_ASSERT(len > 0); NULL_TERMINATE(msg); /* display thread private counts data */ display_results(data, msg); /* clean up memory */ dr_thread_free(drcontext, data, sizeof(per_thread_t)); }
/* test unregistering from inside an event */ static dr_emit_flags_t one_time_bb_event(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating) { int i; # define STRESS_REGISTER_ITERS 64 # define NAME_SZ 32 char *names[STRESS_REGISTER_ITERS]; drmgr_priority_t pri = { sizeof(pri), }; one_time_exec++; if (!drmgr_unregister_bb_app2app_event(one_time_bb_event)) CHECK(false, "drmgr unregistration failed"); /* stress-test adding and removing */ for (i = 0; i < STRESS_REGISTER_ITERS; i++) { /* force sorted insertion on each add */ pri.priority = STRESS_REGISTER_ITERS - i; names[i] = dr_thread_alloc(drcontext, NAME_SZ); dr_snprintf(names[i], NAME_SZ, "%d", pri.priority); pri.name = names[i]; if (!drmgr_register_bb_app2app_event(one_time_bb_event, &pri)) CHECK(false, "drmgr app2app registration failed"); } /* XXX: drmgr lets us add multiple instances of the same callback * so long as they have different priority names (or use default * priority) -- but on removal it only asks for callback and * removes the first it finds. Thus we cannot free any memory * tied up in a priority until we remove *all* of them. * Normally priorities use string literals, so seems ok. */ for (i = 0; i < STRESS_REGISTER_ITERS; i++) { if (!drmgr_unregister_bb_app2app_event(one_time_bb_event)) CHECK(false, "drmgr app2app unregistration failed"); } for (i = 0; i < STRESS_REGISTER_ITERS; i++) { dr_thread_free(drcontext, names[i], NAME_SZ); } return DR_EMIT_DEFAULT; }
/* Called when the trace buffer has filled up, and needs to be flushed to disk. */ static void trace_fault(void *drcontext, void *buf_base, size_t size) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_idx); mem_ref_t *trace_base = (mem_ref_t *)(char *)buf_base; mem_ref_t *trace_ptr = (mem_ref_t *)((char *)buf_base + size); byte *write_base = drx_buf_get_buffer_base(drcontext, write_buffer); byte *write_ptr = drx_buf_get_buffer_ptr(drcontext, write_buffer); int largest_size = 0; mem_ref_t *mem_ref; char *hex_buf; /* find the largest necessary buffer so we only perform a single allocation */ for (mem_ref = trace_base; mem_ref < trace_ptr; mem_ref++) { if (mem_ref->size > largest_size) largest_size = mem_ref->size; } hex_buf = dr_thread_alloc(drcontext, 2 * largest_size + 1); /* write the memrefs to disk */ for (mem_ref = trace_base; mem_ref < trace_ptr; mem_ref++) { /* Each memref in the trace buffer has an "associated" write in the write buffer. * We pull mem_ref->size bytes from the write buffer, and assert we haven't yet * gone too far. */ /* We use libc's fprintf as it is buffered and much faster than dr_fprintf for * repeated printing that dominates performance, as the printing does here. Note * that a binary dump is *much* faster than fprintf still. */ fprintf(data->logf, "" PFX ": %s %2d %s\n", (ptr_uint_t)mem_ref->addr, decode_opcode_name(mem_ref->type), mem_ref->size, write_hexdump(hex_buf, write_base, mem_ref)); write_base += mem_ref->size; DR_ASSERT(write_base <= write_ptr); } dr_thread_free(drcontext, hex_buf, 2 * largest_size + 1); /* reset the write buffer (note: the trace buffer gets reset automatically) */ drx_buf_set_buffer_ptr(drcontext, write_buffer, drx_buf_get_buffer_base(drcontext, write_buffer)); }
static bool drmgr_cls_stack_exit(void *drcontext) { tls_array_t *tls = (tls_array_t *) dr_get_tls_field(drcontext); tls_array_t *nxt, *tmp; generic_event_entry_t *e; if (tls == NULL) return false; for (nxt = tls; nxt->prev != NULL; nxt = nxt->prev) ; /* nothing */ dr_rwlock_read_lock(cls_event_lock); while (nxt != NULL) { tmp = nxt; nxt = nxt->next; /* set the field in case client queries */ dr_set_tls_field(drcontext, (void *)tmp); for (e = cblist_cls_exit; e != NULL; e = e->next) (*e->cb.cls_cb)(drcontext, true/*thread_exit*/); dr_thread_free(drcontext, tmp, sizeof(*tmp)); } dr_rwlock_read_unlock(cls_event_lock); dr_set_tls_field(drcontext, NULL); return true; }
static dr_emit_flags_t drmgr_bb_event(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating) { cb_entry_t *e; dr_emit_flags_t res = DR_EMIT_DEFAULT; instr_t *inst, *next_inst; void **pair_data = NULL, **quartet_data = NULL; uint pair_idx, quartet_idx; dr_rwlock_read_lock(bb_cb_lock); /* We need per-thread user_data */ if (pair_count > 0) pair_data = (void **) dr_thread_alloc(drcontext, sizeof(void*)*pair_count); if (quartet_count > 0) quartet_data = (void **) dr_thread_alloc(drcontext, sizeof(void*)*quartet_count); /* Pass 1: app2app */ for (quartet_idx = 0, e = cblist_app2app; e != NULL; e = e->next) { if (e->has_quartet) { res |= (*e->cb.app2app_ex_cb) (drcontext, tag, bb, for_trace, translating, &quartet_data[quartet_idx]); quartet_idx++; } else res |= (*e->cb.xform_cb)(drcontext, tag, bb, for_trace, translating); } /* Pass 2: analysis */ for (quartet_idx = 0, pair_idx = 0, e = cblist_instrumentation; e != NULL; e = e->next) { if (e->has_quartet) { res |= (*e->cb.pair_ex.analysis_ex_cb) (drcontext, tag, bb, for_trace, translating, quartet_data[quartet_idx]); quartet_idx++; } else { res |= (*e->cb.pair.analysis_cb) (drcontext, tag, bb, for_trace, translating, &pair_data[pair_idx]); pair_idx++; } /* XXX: add checks that cb followed the rules */ } /* Pass 3: instru, per instr */ for (inst = instrlist_first(bb); inst != NULL; inst = next_inst) { next_inst = instr_get_next(inst); for (quartet_idx = 0, pair_idx = 0, e = cblist_instrumentation; e != NULL; e = e->next) { if (e->has_quartet) { res |= (*e->cb.pair_ex.insertion_ex_cb) (drcontext, tag, bb, inst, for_trace, translating, quartet_data[quartet_idx]); quartet_idx++; } else { res |= (*e->cb.pair.insertion_cb) (drcontext, tag, bb, inst, for_trace, translating, pair_data[pair_idx]); pair_idx++; } /* XXX: add checks that cb followed the rules */ } } /* Pass 4: final */ for (quartet_idx = 0, e = cblist_instru2instru; e != NULL; e = e->next) { if (e->has_quartet) { res |= (*e->cb.instru2instru_ex_cb) (drcontext, tag, bb, for_trace, translating, quartet_data[quartet_idx]); quartet_idx++; } else res |= (*e->cb.xform_cb)(drcontext, tag, bb, for_trace, translating); } /* Pass 5: our private pass to support multiple non-meta ctis in app2app phase */ drmgr_fix_app_ctis(drcontext, bb); if (pair_count > 0) dr_thread_free(drcontext, pair_data, sizeof(void*)*pair_count); if (quartet_count > 0) dr_thread_free(drcontext, quartet_data, sizeof(void*)*quartet_count); dr_rwlock_read_unlock(bb_cb_lock); return res; }