/* frees the instrlist_t object */ void instrlist_destroy(dcontext_t *dcontext, instrlist_t *ilist) { CLIENT_ASSERT(ilist->first == NULL && ilist->last == NULL, "instrlist_destroy: list not empty"); heap_free(dcontext, ilist, sizeof(instrlist_t) HEAPACCT(ACCT_IR)); }
/* returns an empty instrlist_t object */ instrlist_t * instrlist_create(dcontext_t *dcontext) { instrlist_t *ilist = (instrlist_t *)heap_alloc(dcontext, sizeof(instrlist_t) HEAPACCT(ACCT_IR)); CLIENT_ASSERT(ilist != NULL, "instrlist_create: allocation error"); instrlist_init(ilist); return ilist; }
static void our_memcpy_vs_libc(void) { /* Compare our memcpy with libc memcpy. * XXX: Should compare on more sizes, especially small ones. */ size_t alloc_size = 20 * 1024; int loop_count = 100 * 1000; void *src = global_heap_alloc(alloc_size HEAPACCT(ACCT_OTHER)); void *dst = global_heap_alloc(alloc_size HEAPACCT(ACCT_OTHER)); int i; memcpy_t glibc_memcpy = (memcpy_t) dlsym(RTLD_NEXT, "memcpy"); uint64 our_memcpy_start, our_memcpy_end, our_memcpy_time; uint64 libc_memcpy_start, libc_memcpy_end, libc_memcpy_time; memset(src, -1, alloc_size); memset(dst, 0, alloc_size); our_memcpy_start = query_time_millis(); for (i = 0; i < loop_count; i++) { memcpy(src, dst, alloc_size); } our_memcpy_end = query_time_millis(); libc_memcpy_start = query_time_millis(); for (i = 0; i < loop_count; i++) { glibc_memcpy(src, dst, alloc_size); } libc_memcpy_end = query_time_millis(); global_heap_free(src, alloc_size HEAPACCT(ACCT_OTHER)); global_heap_free(dst, alloc_size HEAPACCT(ACCT_OTHER)); our_memcpy_time = our_memcpy_end - our_memcpy_start; libc_memcpy_time = libc_memcpy_end - libc_memcpy_start; print_file(STDERR, "our_memcpy_time: "UINT64_FORMAT_STRING"\n", our_memcpy_time); print_file(STDERR, "libc_memcpy_time: "UINT64_FORMAT_STRING"\n", libc_memcpy_time); /* We could assert that we're not too much slower, but that's a recipe for * flaky failures when the suite is run on shared VMs or in parallel. */ }
void monitor_thread_init(dcontext_t *dcontext) { monitor_data_t *md; md = (monitor_data_t *) heap_alloc(dcontext, sizeof(monitor_data_t) HEAPACCT(ACCT_TRACE)); dcontext->monitor_field = (void *)md; memset(md, 0, sizeof(monitor_data_t)); /* need to be filled up */ }
static void print_all_ldt(void) { int i, bytes; /* can't fit 64K on our stack */ raw_ldt_entry_t *ldt = global_heap_alloc(sizeof(raw_ldt_entry_t) * LDT_ENTRIES HEAPACCT(ACCT_OTHER)); /* make sure our struct size jives w/ ldt.h */ ASSERT(sizeof(raw_ldt_entry_t) == LDT_ENTRY_SIZE); memset(ldt, 0, sizeof(*ldt)); bytes = modify_ldt_syscall(0, (void *)ldt, sizeof(raw_ldt_entry_t) * LDT_ENTRIES); LOG(GLOBAL, LOG_ALL, 3, "read %d bytes, should == %d * %d\n", bytes, sizeof(raw_ldt_entry_t), LDT_ENTRIES); ASSERT(bytes == 0 /* no ldt entries */ || bytes == sizeof(raw_ldt_entry_t) * LDT_ENTRIES); for (i = 0; i < bytes/sizeof(raw_ldt_entry_t); i++) { if (((ldt[i].base3124<<24) | (ldt[i].base2316<<16) | ldt[i].base1500) != 0) { LOG(GLOBAL, LOG_ALL, 1, "ldt at index %d:\n", i); print_raw_ldt(&ldt[i]); } } global_heap_free(ldt, sizeof(raw_ldt_entry_t) * LDT_ENTRIES HEAPACCT(ACCT_OTHER)); }
BOOL WINAPI redirect_RtlDestroyHeap(HANDLE base) { if (redirect_heap_call(base)) { /* XXX i#: need to iterate over all blocks in the heap and free them: * would have to keep a list of blocks. * For now assume all private heaps practice individual dealloc * instead of whole-pool-free. */ LOG(GLOBAL, LOG_LOADER, 2, "%s "PFX"\n", __FUNCTION__, base); global_heap_free((byte *)base, 1 HEAPACCT(ACCT_LIBDUP)); return TRUE; } else return RtlDestroyHeap(base); }
HANDLE WINAPI redirect_RtlCreateHeap(ULONG flags, void *base, size_t reserve_sz, size_t commit_sz, void *lock, void *params) { if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(privlib_privheap), true)) { /* We don't want to waste space by letting a Heap be created * and not used so we nop this. We need to return something * here, and distinguish a nop-ed from real in Destroy, so we * allocate a token block. */ LOG(GLOBAL, LOG_LOADER, 2, "%s "PFX"\n", __FUNCTION__, base); return (HANDLE) global_heap_alloc(1 HEAPACCT(ACCT_LIBDUP)); } else return RtlCreateHeap(flags, base, reserve_sz, commit_sz, lock, params); }
/* Only touches thread-private data and acquires no lock */ void nudge_add_pending(dcontext_t *dcontext, nudge_arg_t *nudge_arg) { pending_nudge_t *pending = (pending_nudge_t *) heap_alloc(dcontext, sizeof(*pending) HEAPACCT(ACCT_OTHER)); pending_nudge_t *prev; pending->arg = *nudge_arg; pending->next = NULL; /* Simpler to prepend, but we want FIFO. Should be rare to have multiple * so not worth storing an end pointer. */ DOSTATS({ if (dcontext->nudge_pending != NULL) STATS_INC(num_pending_nudges); });
void * wrapped_dr_alloc(ULONG flags, SIZE_T size) { byte *mem; ASSERT(sizeof(size_t) >= HEAP_ALIGNMENT); size += sizeof(size_t); mem = global_heap_alloc(size HEAPACCT(ACCT_LIBDUP)); if (mem == NULL) { /* FIXME: support HEAP_GENERATE_EXCEPTIONS (xref PR 406742) */ ASSERT_NOT_REACHED(); return NULL; } *((size_t *)mem) = size; if (TEST(HEAP_ZERO_MEMORY, flags)) memset(mem + sizeof(size_t), 0, size - sizeof(size_t)); return (void *) (mem + sizeof(size_t)); }
void fragment_thread_init(dcontext_t *dcontext) { /* we allocate per_thread_t in the global heap solely for self-protection, * even when turned off, since even with a lot of threads this isn't a lot of * pressure on the global heap */ per_thread_t *pt; /* don't initialize un-needed data for hotp_only & thin_client. * FIXME: could set htable initial sizes to 0 for all configurations, instead. * per_thread_t is pretty big, so we avoid it, though it costs us checks for * hotp_only in the islinking-related routines. */ if (RUNNING_WITHOUT_CODE_CACHE()) return; pt = (per_thread_t *)global_heap_alloc(sizeof(per_thread_t) HEAPACCT(ACCT_OTHER)); dcontext->fragment_field = (void *) pt; framgment_reset_init(dcontext); }
void wrapped_dr_free(byte *ptr) { ptr -= sizeof(size_t); global_heap_free(ptr, *((size_t *)ptr) HEAPACCT(ACCT_LIBDUP)); }
/**************** module_area routines *****************/ /* view_size can be the size of the first mapping, to handle non-contiguous * modules -- we'll update the module's size in os_module_area_init() */ static module_area_t * module_area_create(app_pc base, size_t view_size, bool at_map, const char *filepath _IF_UNIX(uint64 inode)) { module_area_t *ma = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_area_t, ACCT_VMAREAS, PROTECTED); memset(ma, 0, sizeof(*ma)); ma->start = base; ma->end = base + view_size; /* updated in os_module_area_init () */ os_module_area_init(ma, base, view_size, at_map, filepath _IF_UNIX(inode) HEAPACCT(ACCT_VMAREAS)); return ma; } static void module_area_delete(module_area_t *ma) { os_module_area_reset(ma HEAPACCT(ACCT_VMAREAS)); free_module_names(&ma->names HEAPACCT(ACCT_VMAREAS)); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ma, module_area_t, ACCT_VMAREAS, PROTECTED); } /**************** init/exit routines *****************/ void