void kstat_thread_init(dcontext_t *dcontext) { thread_kstats_t *new_thread_kstats; if (!DYNAMO_OPTION(kstats)) return; /* dcontext->thread_kstats stays NULL */ /* allocated on thread heap - use global if timing initialization matters */ new_thread_kstats = HEAP_TYPE_ALLOC(dcontext, thread_kstats_t, ACCT_STATS, UNPROTECTED); LOG(THREAD, LOG_STATS, 2, "thread_kstats="PFX" size=%d\n", new_thread_kstats, sizeof(thread_kstats_t)); /* initialize any thread stats bookkeeping fields before assigning to dcontext */ kstat_init_variables(&new_thread_kstats->vars_kstats); /* add a dummy node to save one branch in UPDATE_CURRENT_COUNTER */ new_thread_kstats->stack_kstats.depth = 1; new_thread_kstats->thread_id = get_thread_id(); #ifdef DEBUG new_thread_kstats->outfile_kstats = THREAD; #else new_thread_kstats->outfile_kstats = open_log_file(kstats_thread_logfile_name(), NULL, 0); #endif dcontext->thread_kstats = new_thread_kstats; /* need to do this in a thread after it's initialized */ kstat_calibrate(); KSTART_DC(dcontext, thread_measured); LOG(THREAD, LOG_STATS, 2, "threads_started\n"); }
static void * allmem_info_dup(void *data) { allmem_info_t *src = (allmem_info_t *) data; allmem_info_t *dst = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, allmem_info_t, ACCT_MEM_MGT, PROTECTED); ASSERT(src != NULL); *dst = *src; return dst; }
static callee_info_t * callee_info_create(app_pc start, uint num_args) { callee_info_t *info; info = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, callee_info_t, ACCT_CLEANCALL, PROTECTED); callee_info_init(info); info->start = start; info->num_args = num_args; return info; }
static fragment_tree_t * fragment_tree_create() { fragment_tree_t *tree = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_tree_t, ACCT_OTHER, UNPROTECTED); memset(tree, 0, sizeof(fragment_tree_t)); tree->node_heap = special_heap_init(sizeof(bb_node_t), true /* lock */, false /* -x */, true /* persistent */); tree->trace_heap = special_heap_init(sizeof(bb_node_t), true /* lock */, false /* -x */, true /* persistent */); tree->nil = special_heap_alloc(tree->node_heap); memset(tree->nil, 0, sizeof(bb_node_t)); tree->root = tree->nil; return tree; }
{ if (htable->free_payload_func != NULL) (*htable->free_payload_func)(dcontext, entry->payload); HEAP_TYPE_FREE(dcontext, entry, generic_entry_t, ACCT_OTHER, PROTECTED); } /* Wrapper routines to implement our generic_entry_t and free-func layer */ generic_table_t * generic_hash_create(dcontext_t *dcontext, uint bits, uint load_factor_percent, uint table_flags, void (*free_payload_func)(dcontext_t *, void *) _IF_DEBUG(const char *table_name)) { generic_table_t *table = HEAP_TYPE_ALLOC(dcontext, generic_table_t, ACCT_OTHER, PROTECTED); hashtable_generic_init(dcontext, table, bits, load_factor_percent, (hash_function_t)INTERNAL_OPTION(alt_hash_func), 0 /* hash_mask_offset */, table_flags _IF_DEBUG(table_name)); table->free_payload_func = free_payload_func; return table; } void generic_hash_clear(dcontext_t *dcontext, generic_table_t *htable) { hashtable_generic_clear(dcontext, htable); } void generic_hash_destroy(dcontext_t *dcontext, generic_table_t *htable)
hashtable_generic_free_entry(dcontext_t *dcontext, generic_table_t *htable, generic_entry_t *entry) { if (htable->free_payload_func != NULL) (*htable->free_payload_func)(entry->payload); HEAP_TYPE_FREE(dcontext, entry, generic_entry_t, ACCT_OTHER, PROTECTED); } /* Wrapper routines to implement our generic_entry_t and free-func layer */ generic_table_t * generic_hash_create(dcontext_t *dcontext, uint bits, uint load_factor_percent, uint table_flags, void (*free_payload_func)(void*) _IF_DEBUG(const char *table_name)) { generic_table_t *table = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, generic_table_t, ACCT_OTHER, PROTECTED); hashtable_generic_init(GLOBAL_DCONTEXT, table, bits, load_factor_percent, (hash_function_t)INTERNAL_OPTION(alt_hash_func), 0 /* hash_mask_offset */, table_flags _IF_DEBUG(table_name)); table->free_payload_func = free_payload_func; return table; } void generic_hash_clear(dcontext_t *dcontext, generic_table_t *htable) { hashtable_generic_clear(dcontext, htable); } void
return self_owns_write_lock(&module_data_lock); return false; } /**************** module_area routines *****************/ /* view_size can be the size of the first mapping, to handle non-contiguous * modules -- we'll update the module's size in os_module_area_init() */ static module_area_t * module_area_create(app_pc base, size_t view_size, bool at_map, const char *filepath _IF_UNIX(uint64 inode)) { module_area_t *ma = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_area_t, ACCT_VMAREAS, PROTECTED); memset(ma, 0, sizeof(*ma)); ma->start = base; ma->end = base + view_size; /* updated in os_module_area_init () */ os_module_area_init(ma, base, view_size, at_map, filepath _IF_UNIX(inode) HEAPACCT(ACCT_VMAREAS)); return ma; } static void module_area_delete(module_area_t *ma) { os_module_area_reset(ma HEAPACCT(ACCT_VMAREAS)); free_module_names(&ma->names HEAPACCT(ACCT_VMAREAS)); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ma, module_area_t, ACCT_VMAREAS, PROTECTED); }