static void event_thread_init(void *drcontext) { per_thread_t *data; /* allocate thread private data */ data = dr_thread_alloc(drcontext, sizeof(per_thread_t)); drmgr_set_tls_field(drcontext, tls_index, data); data->buf_base = dr_thread_alloc(drcontext, MEM_BUF_SIZE); data->buf_ptr = data->buf_base; /* set buf_end to be negative of address of buffer end for the lea later */ data->buf_end = -(ptr_int_t)(data->buf_base + MEM_BUF_SIZE); data->num_refs = 0; /* We're going to dump our data to a per-thread file. * On Windows we need an absolute path so we place it in * the same directory as our library. We could also pass * in a path as a client argument. */ data->log = log_file_open(client_id, drcontext, NULL /* using client lib path */, "instrace", #ifndef WINDOWS DR_FILE_CLOSE_ON_FORK | #endif DR_FILE_ALLOW_LARGE); #ifdef OUTPUT_TEXT data->logf = log_stream_from_file(data->log); fprintf(data->logf, "Format: <instr address>,<opcode>\n"); #endif }
static void event_thread_init(void *drcontext) { per_thread_t *data = dr_thread_alloc(drcontext, sizeof(per_thread_t)); DR_ASSERT(data != NULL); drmgr_set_tls_field(drcontext, tls_idx, data); /* Keep seg_base in a per-thread data structure so we can get the TLS * slot and find where the pointer points to in the buffer. */ data->seg_base = dr_get_dr_segment_base(tls_seg); data->buf_base = dr_raw_mem_alloc(MEM_BUF_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE, NULL); DR_ASSERT(data->seg_base != NULL && data->buf_base != NULL); /* put buf_base to TLS as starting buf_ptr */ BUF_PTR(data->seg_base) = data->buf_base; data->num_refs = 0; /* We're going to dump our data to a per-thread file. * On Windows we need an absolute path so we place it in * the same directory as our library. We could also pass * in a path as a client argument. */ data->log = log_file_open(client_id, drcontext, NULL /* using client lib path */, "memtrace", #ifndef WINDOWS DR_FILE_CLOSE_ON_FORK | #endif DR_FILE_ALLOW_LARGE); data->logf = log_stream_from_file(data->log); fprintf(data->logf, "Format: <data address>: <data size>, <(r)ead/(w)rite/opcode>\n"); }
static void event_thread_init(void *drcontext) { per_thread_t *data = dr_thread_alloc(drcontext, sizeof(*data)); DR_ASSERT(data != NULL); dr_set_tls_field(drcontext, data); /* Keep seg_base in a per-thread data structure so we can get the TLS * slot and find where the pointer points to in the buffer. * It is mainly for users using a debugger to get the execution history. */ data->seg_base = dr_get_dr_segment_base(tls_seg); /* We allocate a 128KB buffer to make sure we have a 64KB buffer with * 64KB-aligned starting address, so that we can fill the buffer * cyclically by incrementing the bottom 16 bits of the pointer. */ data->buf_base = dr_raw_mem_alloc(TLS_BUF_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE, NULL); DR_ASSERT(data->seg_base != NULL && data->buf_base != NULL); memset(data->buf_base, 0, TLS_BUF_SIZE); /* put the 64KB-aligned address into TLS slot as the pointer pointing * to the 64KB cyclic buffer */ *(void **)((byte *)(data->seg_base) + tls_offs) = (void *) ALIGN_FORWARD(data->buf_base, BUF_64K_BYTE); }
basic_block_t * table_alloc_bb(void *drcontext, umbra_info_t *info) { int i, num_bbs; basic_block_t *bb_table; basic_block_t *bb; num_bbs = info->table.num_bbs++; bb_table = info->table.bb_table; for (i = 1; true; i++) { if (num_bbs < (i * INIT_BB_TABLE_SIZE)) break; bb_table = (basic_block_t *) bb_table[INIT_BB_TABLE_SIZE - 1].tag; } if((num_bbs % INIT_BB_TABLE_SIZE) == (INIT_BB_TABLE_SIZE - 1)) { bb_table[INIT_BB_TABLE_SIZE - 1].id = num_bbs; bb_table[INIT_BB_TABLE_SIZE - 1].tag = (app_pc) dr_thread_alloc(drcontext, INIT_BB_TABLE_SIZE * sizeof(basic_block_t)); ++info->table.num_bbs; ++num_bbs; info->table.max_num_bbs += INIT_BB_TABLE_SIZE; bb_table = (basic_block_t *)bb_table[INIT_BB_TABLE_SIZE - 1].tag; memset(bb_table, 0, INIT_BB_TABLE_SIZE * sizeof(basic_block_t)); } bb = &bb_table[num_bbs % INIT_BB_TABLE_SIZE]; bb->id = num_bbs; return bb; }
static void event_thread_init(void *drcontext) { per_thread_t *data = dr_thread_alloc(drcontext, sizeof(*data)); memset(data->cache, 0, sizeof(data->cache)); drmgr_set_tls_field(drcontext, tls_idx, data); }
link_edge_t * table_alloc_edge(void *drcontext, umbra_info_t *info) { int i, num_edges; link_edge_t *edge_table; link_edge_t *edge; num_edges = info->table.num_edges++; edge_table = info->table.edge_table; for(i = 1; true; i++) { if(num_edges < (i * INIT_EDGE_TABLE_SIZE)) break; edge_table = (link_edge_t *)edge_table[INIT_EDGE_TABLE_SIZE - 1].dst_tag; } if((num_edges % INIT_EDGE_TABLE_SIZE) == (INIT_EDGE_TABLE_SIZE - 1)) { edge_table[INIT_EDGE_TABLE_SIZE - 1].id = num_edges; edge_table[INIT_EDGE_TABLE_SIZE - 1].dst_tag = (app_pc) dr_thread_alloc(drcontext, INIT_EDGE_TABLE_SIZE * sizeof(link_edge_t)); ++num_edges; ++info->table.num_edges; info->table.max_num_edges += INIT_EDGE_TABLE_SIZE; edge_table = (link_edge_t *)edge_table[INIT_EDGE_TABLE_SIZE - 1].dst_tag; memset(edge_table, 0, INIT_EDGE_TABLE_SIZE * sizeof(link_edge_t)); } edge = &edge_table[num_edges % INIT_EDGE_TABLE_SIZE]; edge->id = num_edges; return edge; }
static bool drmgr_cls_stack_push(void) { void *drcontext = dr_get_current_drcontext(); tls_array_t *tls_parent = (tls_array_t *) dr_get_tls_field(drcontext); tls_array_t *tls_child; bool new_depth = false; if (tls_parent == NULL) { ASSERT(false, "internal error"); return false; } tls_child = tls_parent->next; /* we re-use to avoid churn */ if (tls_child == NULL) { tls_child = dr_thread_alloc(drcontext, sizeof(*tls_child)); memset(tls_child, 0, sizeof(*tls_child)); tls_parent->next = tls_child; tls_child->prev = tls_parent; tls_child->next = NULL; new_depth = true; } else ASSERT(tls_child->prev == tls_parent, "cls stack corrupted"); /* share the tls slots */ memcpy(tls_child->tls, tls_parent->tls, sizeof(*tls_child->tls)*MAX_NUM_TLS); /* swap in as the current structure */ dr_set_tls_field(drcontext, (void *)tls_child); return drmgr_cls_stack_push_event(drcontext, new_depth); }
mem_ref_t * table_alloc_ref(void *drcontext, umbra_info_t *info) { int i, num_refs; mem_ref_t *ref_table; mem_ref_t *ref; num_refs = info->table.num_refs++; ref_table = info->table.ref_table; for(i = 1; true; i++) { if(num_refs < (i * INIT_REF_TABLE_SIZE)) break; ref_table = (mem_ref_t *)ref_table[INIT_REF_TABLE_SIZE - 1].pc; } if((num_refs % INIT_REF_TABLE_SIZE) == (INIT_REF_TABLE_SIZE - 1)) { ref_table[INIT_REF_TABLE_SIZE - 1].id = num_refs; ref_table[INIT_REF_TABLE_SIZE - 1].pc = (app_pc) dr_thread_alloc(drcontext, INIT_REF_TABLE_SIZE * sizeof(mem_ref_t)); info->table.num_refs++; num_refs++; info->table.max_num_refs += INIT_REF_TABLE_SIZE; ref_table = (mem_ref_t *)ref_table[INIT_REF_TABLE_SIZE - 1].pc; memset(ref_table, 0, INIT_REF_TABLE_SIZE * sizeof(mem_ref_t)); } ref = &ref_table[num_refs % INIT_REF_TABLE_SIZE]; ref->id = num_refs; ref->count = 0; if (proc_info.options.stat == true) { info->num_app_refs++; } return ref; }
func_t * table_alloc_func(void *drcontext, umbra_info_t *info) { int i, num_funcs; func_t *func_table; func_t *func; num_funcs = info->table.num_funcs++; func_table = info->table.func_table; for(i = 1; true; i++) { if(num_funcs < (i * INIT_FUNC_TABLE_SIZE)) break; func_table = (func_t *)func_table[INIT_FUNC_TABLE_SIZE - 1].pc; } if((num_funcs % INIT_FUNC_TABLE_SIZE) == (INIT_FUNC_TABLE_SIZE - 1)) { func_table[INIT_FUNC_TABLE_SIZE - 1].id = num_funcs; func_table[INIT_FUNC_TABLE_SIZE - 1].pc = (app_pc) dr_thread_alloc(drcontext, INIT_FUNC_TABLE_SIZE * sizeof(func_t)); ++info->table.num_funcs; ++num_funcs; info->table.max_num_funcs += INIT_FUNC_TABLE_SIZE; func_table = (func_t *)func_table[INIT_FUNC_TABLE_SIZE - 1].pc; memset(func_table, 0, INIT_FUNC_TABLE_SIZE * sizeof(func_t)); } func = &func_table[num_funcs % INIT_FUNC_TABLE_SIZE]; func->id = num_funcs; return func; }
ref_cache_t * table_alloc_ref_cache(void *drcontext, umbra_info_t *info) { int i, num_refs; ref_cache_t *ref_table; ref_cache_t *ref; num_refs = info->table.num_ref_cache++; ref_table = info->table.ref_cache_table; for(i = 1; true; i++) { if(num_refs < (i * INIT_REF_CACHE_SIZE)) break; ref_table = (ref_cache_t *)ref_table[INIT_REF_CACHE_SIZE - 1].offset[0]; } if((num_refs % INIT_REF_CACHE_SIZE) == (INIT_REF_CACHE_SIZE - 1)) { ref_table[INIT_REF_CACHE_SIZE - 1].offset[0] = (reg_t) dr_thread_alloc(drcontext, INIT_REF_CACHE_SIZE * sizeof(ref_cache_t)); ++info->table.num_ref_cache; ++num_refs; info->table.max_num_ref_cache += INIT_REF_CACHE_SIZE; ref_table = (ref_cache_t *)ref_table[INIT_REF_CACHE_SIZE - 1].offset[0]; memset(ref_table, 0, INIT_REF_CACHE_SIZE * sizeof(ref_cache_t)); } ref = &ref_table[num_refs % INIT_REF_CACHE_SIZE]; if (proc_info.options.stat == true) { info->num_ref_caches++; } return ref; }
/* callbacks for threads */ void <client_name>_thread_init(void *drcontext){ per_thread_t * data; DEBUG_PRINT("%s - initializing thread %d\n", ins_pass_name, dr_get_thread_id(drcontext)); data = dr_thread_alloc(drcontext, sizeof(per_thread_t)); drmgr_set_tls_field(drcontext, tls_index, data); }
/* make a copy of global data for pre-thread cache */ static per_thread_t * thread_data_copy(void *drcontext) { per_thread_t *data; ASSERT(drcontext != NULL, "drcontext must not be NULL"); data = dr_thread_alloc(drcontext, sizeof(*data)); *data = *global_data; return data; }
static void init_bytes_table(void *drcontext, umbra_info_t *info) { info->table.bytes_size = MAX_BYTES_TABLE_SIZE; info->table.max_bytes_size = MAX_BYTES_TABLE_SIZE; info->table.bytes_table = dr_thread_alloc(drcontext, MAX_BYTES_TABLE_SIZE); info->table.bytes_ptr = info->table.bytes_table; }
static void umbra_client_thread_init(void *drcontext, umbra_info_t *umbra_info) { client_tls_data_t *tls_data; /* allocate client tls data */ tls_data = dr_thread_alloc(drcontext, sizeof(client_tls_data_t)); umbra_info->client_tls_data = tls_data; tls_data->tid = dr_get_thread_id(drcontext); }
static void init_func_table(void *drcontext, umbra_info_t *info) { info->table.num_funcs = 1; info->table.max_num_funcs = INIT_FUNC_TABLE_SIZE; info->table.func_table = dr_thread_alloc(drcontext, INIT_FUNC_TABLE_SIZE * sizeof(func_t)); memset(info->table.func_table, 0, INIT_FUNC_TABLE_SIZE * sizeof(func_t)); }
static void init_ref_table(void *drcontext, umbra_info_t *info) { info->table.num_refs = 1; info->table.max_num_refs = INIT_REF_TABLE_SIZE; info->table.ref_table = dr_thread_alloc(drcontext, INIT_REF_TABLE_SIZE * sizeof(mem_ref_t)); memset(info->table.ref_table, 0, INIT_REF_TABLE_SIZE * sizeof(mem_ref_t)); }
static void init_code_hash(void *drcontext, umbra_info_t *info) { info->table.code_hash_size = INIT_HASH_TABLE_SIZE; info->table.code_hash_mask = (INIT_HASH_TABLE_SIZE - 1); info->table.code_hash_table = dr_thread_alloc(drcontext, INIT_HASH_TABLE_SIZE * sizeof(code_hash_t *)); memset(info->table.code_hash_table, 0, INIT_HASH_TABLE_SIZE * sizeof(code_hash_t *)); }
static void init_bb_table(void *drcontext, umbra_info_t *info) { info->table.num_bbs = 1; info->table.max_num_bbs = INIT_BB_TABLE_SIZE; info->table.bb_table = dr_thread_alloc(drcontext, INIT_BB_TABLE_SIZE * sizeof(basic_block_t)); memset(info->table.bb_table, 0, INIT_BB_TABLE_SIZE * sizeof(basic_block_t)); }
static void init_edge_table(void *drcontext, umbra_info_t *info) { info->table.num_edges = 1; info->table.max_num_edges = INIT_EDGE_TABLE_SIZE; info->table.edge_table = dr_thread_alloc(drcontext, INIT_EDGE_TABLE_SIZE * sizeof(link_edge_t)); memset(info->table.edge_table, 0, INIT_EDGE_TABLE_SIZE * sizeof(link_edge_t)); }
static void event_thread_init(void *drcontext) { /* create an instance of our data structure for this thread */ per_thread_t *data = (per_thread_t *)dr_thread_alloc(drcontext, sizeof(per_thread_t)); /* store it in the slot provided in the drcontext */ drmgr_set_tls_field(drcontext, tls_idx, data); data->num_direct_calls = 0; data->num_indirect_calls = 0; data->num_returns = 0; dr_log(drcontext, DR_LOG_ALL, 1, "countcalls: set up for thread " TIDFMT "\n", dr_get_thread_id(drcontext)); }
static void eddi_client_thread_init(void *drcontext, eddi_info_t *eddi_info) { client_tls_data_t *tls_data; /* allocate client tls data */ tls_data = dr_thread_alloc(drcontext, sizeof(client_tls_data_t)); eddi_info->client_tls_data = tls_data; /* update client proc data */ dr_mutex_lock(client_proc_data.lock); client_proc_data.num_threads++; dr_mutex_unlock(client_proc_data.lock); }
static void drmgr_thread_init_event(void *drcontext) { generic_event_entry_t *e; tls_array_t *tls = dr_thread_alloc(drcontext, sizeof(*tls)); memset(tls, 0, sizeof(*tls)); dr_set_tls_field(drcontext, (void *)tls); dr_rwlock_read_lock(thread_event_lock); for (e = cblist_thread_init; e != NULL; e = e->next) (*e->cb.thread_cb)(drcontext); dr_rwlock_read_unlock(thread_event_lock); drmgr_cls_stack_init(drcontext); }
static void event_thread_context_init(void *drcontext, bool new_depth) { /* create an instance of our data structure for this thread context */ per_thread_t *data; #ifdef SHOW_RESULTS dr_fprintf(STDERR, "new thread context id="TIDFMT"%s\n", dr_get_thread_id(drcontext), new_depth ? " new depth" : ""); #endif if (new_depth) { data = (per_thread_t *) dr_thread_alloc(drcontext, sizeof(per_thread_t)); drmgr_set_cls_field(drcontext, tcls_idx, data); } else data = (per_thread_t *) drmgr_get_cls_field(drcontext, tcls_idx); memset(data, 0, sizeof(*data)); }
void table_bb_add_to_hashtable(void *drcontext, umbra_info_t *info, basic_block_t *bb) { uint index; code_hash_t *code; index = CODE_HASH_FUNC((reg_t)bb->tag, info->table.code_hash_mask); code = dr_thread_alloc(drcontext, sizeof(code_hash_t)); code->tag = bb->tag; code->length = bb->length; code->bb = bb->id; code->next = info->table.code_hash_table[index]; info->table.code_hash_table[index] = code; }
static per_thread_t * thread_data_create(void *drcontext) { per_thread_t *data; if (drcontext == NULL) { ASSERT(!drcov_per_thread, "drcov_per_thread should not be set"); data = dr_global_alloc(sizeof(*data)); } else { ASSERT(drcov_per_thread, "drcov_per_thread should be set"); data = dr_thread_alloc(drcontext, sizeof(*data)); } /* XXX: can we assume bb create event is serialized, * if so, no lock is required for bb_table operation. */ data->bb_table = bb_table_create(drcontext == NULL ? true : false); log_file_create(drcontext, data); return data; }
static void event_thread_context_init(void *drcontext, bool new_depth) { /* create an instance of our data structure for this thread context */ per_thread_t *data; if (new_depth) { data = (per_thread_t *) dr_thread_alloc(drcontext, sizeof(per_thread_t)); drmgr_set_cls_field(drcontext, cls_idx, data); } else data = (per_thread_t *) drmgr_get_cls_field(drcontext, cls_idx); memset(data, 0, sizeof(*data)); /* test self-nudge to make up for lack of nudge_test on windows (waiting * for runall support (i#120) */ if (!sent_self) { sent_self = true; if (!dr_nudge_client(client_id, NUDGE_ARG_SELF)) dr_fprintf(STDERR, "self nudge failed"); } }
/* test unregistering from inside an event */ static dr_emit_flags_t one_time_bb_event(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating) { int i; # define STRESS_REGISTER_ITERS 64 # define NAME_SZ 32 char *names[STRESS_REGISTER_ITERS]; drmgr_priority_t pri = { sizeof(pri), }; one_time_exec++; if (!drmgr_unregister_bb_app2app_event(one_time_bb_event)) CHECK(false, "drmgr unregistration failed"); /* stress-test adding and removing */ for (i = 0; i < STRESS_REGISTER_ITERS; i++) { /* force sorted insertion on each add */ pri.priority = STRESS_REGISTER_ITERS - i; names[i] = dr_thread_alloc(drcontext, NAME_SZ); dr_snprintf(names[i], NAME_SZ, "%d", pri.priority); pri.name = names[i]; if (!drmgr_register_bb_app2app_event(one_time_bb_event, &pri)) CHECK(false, "drmgr app2app registration failed"); } /* XXX: drmgr lets us add multiple instances of the same callback * so long as they have different priority names (or use default * priority) -- but on removal it only asks for callback and * removes the first it finds. Thus we cannot free any memory * tied up in a priority until we remove *all* of them. * Normally priorities use string literals, so seems ok. */ for (i = 0; i < STRESS_REGISTER_ITERS; i++) { if (!drmgr_unregister_bb_app2app_event(one_time_bb_event)) CHECK(false, "drmgr app2app unregistration failed"); } for (i = 0; i < STRESS_REGISTER_ITERS; i++) { dr_thread_free(drcontext, names[i], NAME_SZ); } return DR_EMIT_DEFAULT; }
static void event_thread_init(void *drcontext) { per_thread_t *data = dr_thread_alloc(drcontext, sizeof(per_thread_t)); DR_ASSERT(data != NULL); data->reg_addr = DR_REG_NULL; drmgr_set_tls_field(drcontext, tls_idx, data); /* We're going to dump our data to a per-thread file. * On Windows we need an absolute path so we place it in * the same directory as our library. We could also pass * in a path as a client argument. */ data->log = log_file_open(client_id, drcontext, NULL /* using client lib path */, "memval", #ifndef WINDOWS DR_FILE_CLOSE_ON_FORK | #endif DR_FILE_ALLOW_LARGE); data->logf = log_stream_from_file(data->log); }
/* Called when the trace buffer has filled up, and needs to be flushed to disk. */ static void trace_fault(void *drcontext, void *buf_base, size_t size) { per_thread_t *data = drmgr_get_tls_field(drcontext, tls_idx); mem_ref_t *trace_base = (mem_ref_t *)(char *)buf_base; mem_ref_t *trace_ptr = (mem_ref_t *)((char *)buf_base + size); byte *write_base = drx_buf_get_buffer_base(drcontext, write_buffer); byte *write_ptr = drx_buf_get_buffer_ptr(drcontext, write_buffer); int largest_size = 0; mem_ref_t *mem_ref; char *hex_buf; /* find the largest necessary buffer so we only perform a single allocation */ for (mem_ref = trace_base; mem_ref < trace_ptr; mem_ref++) { if (mem_ref->size > largest_size) largest_size = mem_ref->size; } hex_buf = dr_thread_alloc(drcontext, 2 * largest_size + 1); /* write the memrefs to disk */ for (mem_ref = trace_base; mem_ref < trace_ptr; mem_ref++) { /* Each memref in the trace buffer has an "associated" write in the write buffer. * We pull mem_ref->size bytes from the write buffer, and assert we haven't yet * gone too far. */ /* We use libc's fprintf as it is buffered and much faster than dr_fprintf for * repeated printing that dominates performance, as the printing does here. Note * that a binary dump is *much* faster than fprintf still. */ fprintf(data->logf, "" PFX ": %s %2d %s\n", (ptr_uint_t)mem_ref->addr, decode_opcode_name(mem_ref->type), mem_ref->size, write_hexdump(hex_buf, write_base, mem_ref)); write_base += mem_ref->size; DR_ASSERT(write_base <= write_ptr); } dr_thread_free(drcontext, hex_buf, 2 * largest_size + 1); /* reset the write buffer (note: the trace buffer gets reset automatically) */ drx_buf_set_buffer_ptr(drcontext, write_buffer, drx_buf_get_buffer_base(drcontext, write_buffer)); }
/* callbacks for threads */ void funcwrap_thread_init(void *drcontext){ per_thread_t * data; char logfilename[MAX_STRING_LENGTH]; char thread_id[MAX_STRING_LENGTH]; DEBUG_PRINT("%s - initializing thread %d\n", ins_pass_name, dr_get_thread_id(drcontext)); data = dr_thread_alloc(drcontext, sizeof(per_thread_t)); if (log_mode){ dr_snprintf(thread_id, MAX_STRING_LENGTH, "%d", dr_get_thread_id(drcontext)); populate_conv_filename(logfilename, logdir, ins_pass_name, thread_id); data->logfile = dr_open_file(logfilename, DR_FILE_WRITE_OVERWRITE); } data->filter_func = false; data->nesting = 0; drmgr_set_tls_field(drcontext, tls_index, data); DEBUG_PRINT("%s - initializing thread done %d\n", ins_pass_name, dr_get_thread_id(drcontext)); }