static int umocktypes_copy_WSIO_CONFIG_ptr(WSIO_CONFIG** destination, const WSIO_CONFIG** source) { int result; if (*source == NULL) { *destination = NULL; result = 0; } else { *destination = (WSIO_CONFIG*)real_malloc(sizeof(WSIO_CONFIG)); if (*destination == NULL) { result = __LINE__; } else { if (copy_string((char**)&((*destination)->hostname), (*source)->hostname) != 0) { real_free(*destination); result = __LINE__; } else if (copy_string((char**)&((*destination)->resource_name), (*source)->resource_name) != 0) { real_free((char*)(*destination)->hostname); real_free(*destination); result = __LINE__; } else if (copy_string((char**)&((*destination)->protocol), (*source)->protocol) != 0) { real_free((char*)(*destination)->resource_name); real_free((char*)(*destination)->hostname); real_free(*destination); result = __LINE__; } else { (*destination)->port = (*source)->port; (*destination)->underlying_io_interface = (*source)->underlying_io_interface; if (umocktypes_copy("TLSIO_CONFIG*", &((*destination)->underlying_io_parameters), &((*source)->underlying_io_parameters)) != 0) { real_free((char*)(*destination)->resource_name); real_free((char*)(*destination)->hostname); real_free((char*)(*destination)->protocol); real_free(*destination); result = __LINE__; } else { result = 0; } } } } return result; }
static int umocktypes_copy_HTTP_PROXY_IO_CONFIG_ptr(HTTP_PROXY_IO_CONFIG** destination, const HTTP_PROXY_IO_CONFIG** source) { int result; if (*source == NULL) { *destination = NULL; result = 0; } else { *destination = (HTTP_PROXY_IO_CONFIG*)real_malloc(sizeof(HTTP_PROXY_IO_CONFIG)); if (*destination == NULL) { result = __LINE__; } else { if (copy_string((char**)&((*destination)->hostname), (*source)->hostname) != 0) { real_free(*destination); result = __LINE__; } else if (copy_string((char**)&((*destination)->proxy_hostname), (*source)->proxy_hostname) != 0) { real_free((char*)((*destination)->hostname)); real_free(*destination); result = __LINE__; } else if (copy_string((char**)&((*destination)->username), (*source)->username) != 0) { real_free((char*)((*destination)->proxy_hostname)); real_free((char*)((*destination)->hostname)); real_free(*destination); result = __LINE__; } else if (copy_string((char**)&((*destination)->password), (*source)->password) != 0) { real_free((char*)((*destination)->username)); real_free((char*)((*destination)->proxy_hostname)); real_free((char*)((*destination)->hostname)); real_free(*destination); result = __LINE__; } else { (*destination)->port = (*source)->port; (*destination)->proxy_port = (*source)->proxy_port; result = 0; } } } return result; }
static void umocktypes_free_HTTP_PROXY_IO_CONFIG_ptr(HTTP_PROXY_IO_CONFIG** value) { if (*value != NULL) { real_free((void*)(*value)->hostname); real_free((void*)(*value)->proxy_hostname); real_free((void*)(*value)->username); real_free((void*)(*value)->password); real_free(*value); } }
static void umocktypes_free_WSIO_CONFIG_ptr(WSIO_CONFIG** value) { if (*value != NULL) { umocktypes_free("TLSIO_CONFIG*", &((*value)->underlying_io_parameters)); real_free((void*)(*value)->hostname); real_free((void*)(*value)->resource_name); real_free((void*)(*value)->protocol); real_free(*value); } }
static void umocktypes_free_TLSIO_CONFIG_ptr(TLSIO_CONFIG** value) { if (*value != NULL) { if ((*value)->underlying_io_interface != NULL) { umocktypes_free("HTTP_PROXY_IO_CONFIG*", &((*value)->underlying_io_parameters)); } real_free((void*)(*value)->hostname); real_free(*value); } }
static void myth_malloc_wrapper_free(void *ptr) { #ifdef MYTH_WRAP_MALLOC_RUNTIME /* before wrapping completed, we simply forget about it. (real_free not available yet, so we cannot call it. the problem may be deeper. the ptr may have been allocated by yet another function (not the original system malloc), so even passing it to real_free may not be the right action */ if (!g_wrap_malloc_completed) { /* leak */ return; } if (!g_wrap_malloc) { /* we call real_free, except for region we have allocated before wrapping is complete */ if (!sys_alloc_region(ptr)) { return real_free(ptr); } } #endif if (!ptr)return; if (!real_free){ real_free=dlsym(RTLD_NEXT,"free"); assert(real_free); } #ifdef MYTH_WRAP_MALLOC_DLSYM_ENABLED //do nothing if in dlsym region intptr_t s,e; s=(intptr_t)s_malloc_dlsym_region; e=s+MYTH_WRAP_MALLOC_DLSYM_SIZE; if (s<=((intptr_t)ptr) && ((intptr_t)ptr)<e)return; #endif malloc_wrapper_header_t rptr=(malloc_wrapper_header_t)ptr; rptr--; uint64_t idx=rptr->s.fl_index; if (idx>=FREE_LIST_NUM){ //fprintf(stderr,"free A,%p,%d\n",rptr->s.org_ptr,(int)idx); real_free(rptr->s.org_ptr); return; } if (g_worker_thread_num && (g_alloc_hook_ok==g_worker_thread_num)){ myth_running_env_t env; env=myth_get_current_env(); int rank=env->rank; myth_freelist_push(g_myth_malloc_wrapper_fl[rank][idx],(void**)rptr); return ; } //fprintf(stderr,"free B,%p,%d\n",rptr->s.org_ptr,(int)idx); real_free(rptr->s.org_ptr); }
void free(void *p) { if (p==NULL) return; if (p<(void*)junkareas || p>=(void*)(junkareas[MAX_JUNK_AREAS]) ) { // main use case xbt_mheap_t mdp = __mmalloc_current_heap; if (mdp) { LOCK(mdp); mfree(mdp, p); UNLOCK(mdp); } else { real_free(p); } } else { // We are in the junkarea. // This area is used to allocate memory at initilization time. if(allocated_junk && p==junkareas[allocated_junk-1]) { // Last junkarea. We can reuse it. allocated_junk--; } else { // We currently cannot reuse freed junkareas in the general case. } } }
void __terminal_hook_free(void *ptr, const void *caller) { static void (*real_free)(void*); if (!real_free) real_free = fake_dlsym(RTLD_DEFAULT, "__real_free"); if (!real_free) real_free = fake_dlsym(RTLD_DEFAULT, "free"); // probably infinite regress... if (!real_free) abort(); real_free(ptr); }
void _free(void* p) { if (p) { RegisterFree(p); real_free(p); } }
void myth_malloc_wrapper_fini() { #ifdef MYTH_WRAP_MALLOC_RUNTIME /* is it possible to come here before myth_malloc_wrapper_init is called? */ if (!g_wrap_malloc) return; #endif real_free(g_myth_malloc_wrapper_fl); }
ATTRIBUTE_NO_SANITIZE void bypass_aligned_free(void* ptr, size_t size) noexcept { #if !defined(NDEBUG) && BYPASS_CHECKER { std::unique_lock<std::mutex> lock(s_bypass_mutex); size_t i; for (i = 0; i < kBypassCheckerSize; ++i) { if (s_bypass_checker[i].first != ptr) continue; if (s_bypass_checker[i].second == size) { s_bypass_checker[i].first = nullptr; break; } printf(PPREFIX "bypass_aligned_free() checker: " "ptr %p size %zu mismatches allocation of %zu\n", ptr, size, s_bypass_checker[i].second); abort(); } if (i == kBypassCheckerSize) { printf(PPREFIX "bypass_aligned_free() checker: " "ptr = %p size %zu was not found\n", ptr, size); abort(); } } #endif ssize_t mycurr = sync_sub_and_fetch(base_curr, size); sync_sub_and_fetch(current_allocs, 1); update_memprofile(get(float_curr), mycurr); #if defined(_MSC_VER) return _aligned_free(ptr); #else if (real_aligned_alloc) { return real_free(ptr); } else { real_free((reinterpret_cast<void**>(ptr))[-1]); } #endif }
static int umocktypes_copy_TLSIO_CONFIG_ptr(TLSIO_CONFIG** destination, const TLSIO_CONFIG** source) { int result; if (*source == NULL) { *destination = NULL; result = 0; } else { *destination = (TLSIO_CONFIG*)real_malloc(sizeof(TLSIO_CONFIG)); if (*destination == NULL) { result = __LINE__; } else { if (copy_string((char**)&((*destination)->hostname), (*source)->hostname) != 0) { real_free(*destination); result = __LINE__; } else { (*destination)->port = (*source)->port; (*destination)->underlying_io_interface = (*source)->underlying_io_interface; (*destination)->underlying_io_parameters = (*source)->underlying_io_parameters; if (((*destination)->underlying_io_interface != NULL) && (umocktypes_copy("HTTP_PROXY_IO_CONFIG*", &((*destination)->underlying_io_parameters), &((*source)->underlying_io_parameters)) != 0)) { real_free((char*)((*destination)->hostname)); real_free(*destination); result = __LINE__; } else { result = 0; } } } } return result; }
void free(void *ptr) { size_t usable,size,cnt = 0u; unsigned int i; char *p; /* If ptr is NULL, no operation is performed.*/ if (ptr == NULL){ return real_free(ptr); } usable = malloc_usable_size(ptr); /* At the first, check fixed redzone. If overwritten, following size info is maybe invalid */ for (p = (char *)P_F_RZ(ptr, usable); p < (char *)P_F_RZ(ptr, usable) + SIZEOF_F_RZ; p++){ if(*p != MAGIC_BYTE) cnt++; } if (cnt == SIZEOF_F_RZ){ ofc_count = cnt; /* for testing */ /* Maybe size info was broken */ OFC_DUMP_COUNT_MAYBE(cnt); ofc_bt(); real_free(ptr); return; } size = *(size_t *)P_SIZE(ptr, usable); OFC_DUMP(ptr, usable, size); p = P_RZ(ptr, usable, size); for (i = 0; i < SIZEOF_RZ(usable ,size) - SIZEOF_F_RZ; i++) { if(*(p + i) != MAGIC_BYTE) cnt++; } if (cnt){ ofc_count = cnt; /* for testing */ OFC_DUMP_COUNT(cnt); OFC_DUMP_INFO(ptr,size); ofc_bt(); } real_free(ptr); return; }
void free(void *ptr) { char *p = ptr; if (buf <= p && p < &buf[MALLOC_BUFSIZE]) return; if (real_free) real_free(ptr); }
void free(void *ptr) { if (!real_free) real_free = dlsym(RTLD_NEXT, "free"); if (block || lib_block) { real_free(ptr); return; } if (ptr) free_count++; block = true; printf("free =>\t%p\n", ptr); printf("Delta = %ld\n", malloc_count - free_count); real_free(ptr); block = false; }
void myth_malloc_wrapper_fini_worker(int rank) { #ifdef MYTH_WRAP_MALLOC_RUNTIME /* is it possible to come here before myth_malloc_wrapper_init is called? */ if (!g_wrap_malloc) return; #endif //Release freelist contents /*for (i=0;i<FREE_LIST_NUM;i++){ }*/ //Release the array real_free(g_myth_malloc_wrapper_fl[rank]); __sync_fetch_and_sub(&g_alloc_hook_ok,1); }
static void track_free (malloc_zone_t * zone, void * p) { // // Perform the allocation. // real_free (zone, p); // // Record the allocation and return to the caller. // ExternalObjects.remove(p); return; }
void free(void *ptr) { int foreign; sig_atomic_t memuse; sig_atomic_t memruse = 0; size_t rsize = 0; struct log_malloc_s *mem = MEM_HEAD(ptr); if(!DL_RESOLVE_CHECK(free) || ptr == NULL) return; /* check if we allocated it */ foreign = (mem->size != ~mem->cb); memuse = __sync_sub_and_fetch(&g_ctx.mem_used, (foreign) ? 0: mem->size); #ifdef HAVE_MALLOC_USABLE_SIZE memruse = __sync_sub_and_fetch(&g_ctx.mem_rused, (foreign) ? 0 : mem->rsize); if(foreign) rsize = malloc_usable_size(ptr); #endif #ifndef DISABLE_CALL_COUNTS (void)__sync_fetch_and_add(&g_ctx.stat.free, 1); g_ctx.stat.unrel_sum++; #endif if(!g_ctx.memlog_disabled) { int s; char buf[LOG_BUFSIZE]; //getrusage(RUSAGE_SELF, &ruse); if(!foreign) { s = snprintf(buf, sizeof(buf), "+ free -%zu %p [%u:%u]\n", mem->size, MEM_PTR(mem), memuse, memruse); } else { s = snprintf(buf, sizeof(buf), "+ free -%zu %p [%u:%u] !f\n", rsize, ptr, memuse, memruse); } log_trace(buf, s, sizeof(buf), foreign); } real_free((foreign) ? ptr : mem); return; }
void free(void *ptr) { if(!real_free) { if(!real_free) return; } real_free(ptr); if(memlog) { LOCK; fprintf(memlog, "free 0x%08x ", ptr); print_stack(); fprintf(memlog, "\n"); UNLOCK; } }
static void TEST_free(void* ptr) { int i, j; for (i = 0, j = 0; j < saved_malloc_returns_count; i++, j++) { if (saved_malloc_returns[i] == ptr) j++; saved_malloc_returns[i] = saved_malloc_returns[j]; } if (i != j) saved_malloc_returns_count--; real_free(ptr); }
void tst_free_canary_right(void *ptr, size_t size) { size_t pagesize = sysconf(_SC_PAGESIZE); size_t pages = size/pagesize + !!(size%pagesize); void *start = size%pagesize ? (char*)ptr - (pagesize - size%pagesize) : ptr; /* Reset the memory protection back to RW */ if (mprotect(start + pagesize * pages, pagesize, PROT_READ | PROT_WRITE)) perror("mprotect"); if (!real_free) real_free = dlsym(RTLD_NEXT, "free"); real_free(start); }
void tst_free_canary_left(void *ptr, size_t size __attribute__((unused))) { size_t pagesize = sysconf(_SC_PAGESIZE); void *start = ptr - pagesize; /* Reset the memory protection back to RW */ if (mprotect(start, pagesize, PROT_READ | PROT_WRITE)) { printf("%p size %zu\n", ptr, size); perror("mprotect"); } if (!real_free) real_free = dlsym(RTLD_NEXT, "free"); real_free(start); }
/** * for free, we memset the allocated memory to 0 (using the header * information, before calling the real free function */ void free(void *ptr) { if (ptr) { struct alloc_header *store_ptr = (struct alloc_header *)ptr; store_ptr--; #ifdef CHECK_COOKIE if (store_ptr->cookie != ALLOC_COOKIE) { fprintf(stderr, "%s: Invalid pointer\n", __func__); return; } #endif memset(store_ptr->ptr, 0, (ptr - store_ptr->ptr) + store_ptr->requested_size); real_free(store_ptr->ptr); } }
/** * For this default free function, we force the constructor and call the * real free if the function address resolution was successful. */ static void default_free(void *ptr) { /* * if free is called before the constructor, we force the * constructor. */ init_malloc(); /* if real_free was not found, we return NULL */ if (real_free == default_free) { debug("Failed to resolve 'free'\n"); return; } /* We can now use the real_free */ real_free(ptr); }
void Buffer::Resize(unsigned int newSize) { if (Data == NULL) { Data = (char*)real_malloc(newSize); assert(Data); } else { // Allocate the new size, copy the data, and free the old memory char* newData = (char*)real_malloc(newSize); assert(newData); unsigned int numBytesToCopy = MIN(newSize, CurrentSize); memcpy(newData, Data, numBytesToCopy); real_free(Data); Data = newData; } CurrentSize = newSize; }
void free(void* devPtr) { FUNC_ENTER_LOG; cpu_cma_init(); mem_region_t* mr = (mem_region_t*)g_hash_table_lookup(hash_table, devPtr); if(mr == NULL){ //printf("ERROR: memory region not found in hash table\n"); //exit(1); return; } mem_stat.num_free ++; mem_stat.tot_mem_alloc -= mr->size; g_hash_table_remove(hash_table, devPtr); real_free(mr); (*real_free)(devPtr); FUNC_EXIT_LOG; return; }
void free(void *ptr) { START_CALL(); real_free(ptr); remove_message_by_ptr(&alloc_msg_store, (uintptr_t)ptr); }
void* malloc(size_t size) { FUNC_ENTER_LOG; static int depth = 0; depth ++; cpu_cma_init(); void* ptr= real_malloc(size); if(ptr == NULL){ printf("Error: real_malloc failed in %s\n", __FUNCTION__); depth--; exit(1); } if(depth > 1){ //we are in a loop of ourselves, get out now depth--; return ptr; } //get the stack back trace int buflen = MAX_NUM_STACKS ; void* buffer[buflen]; int n = backtrace(buffer, buflen); if(n <2){ printf("Error: backtrace call failed(n=%d)\n",n); depth--; exit(1); } char** s = backtrace_symbols(buffer, n); if(s == NULL){ printf("Error: bactrace_symbols call failed\n"); depth--; exit(1); } if(!strncmp(s[1], __progname_full, strlen(__progname_full)) == 0){ //only record cals from the exeutible, not from libs real_free(s); depth--; return ptr; } mem_region_t * mr = real_malloc(sizeof(mem_region_t)); if(mr == NULL){ printf("ERROR: malloc faied for mem_region_t\n"); depth--; exit(1); } mr->ptr = ptr; mr->size= size; mr->num_stacks= n; int i; for(i=0;i < n; i++){ strncpy(mr->backtrace[i], s[i], MAX_NAME_LEN); } g_hash_table_insert(hash_table, mr->ptr, mr); mem_stat.num_alloc ++; mem_stat.tot_mem_alloc += mr->size; if(mem_stat.tot_mem_alloc > mem_stat.peak_mem_alloc){ mem_stat.peak_mem_alloc = mem_stat.tot_mem_alloc; } real_free(s); depth -- ; FUNC_EXIT_LOG; return ptr; }
int main(void) { const struct dt_property *names, *ranges; struct mem_region *r; unsigned int i, l, c; uint64_t *rangep; const char *name; void *buf; /* Use malloc for the heap, so valgrind can find issues. */ skiboot_heap.start = (long)real_malloc(TEST_HEAP_SIZE); skiboot_heap.len = TEST_HEAP_SIZE; skiboot_os_reserve.len = skiboot_heap.start; dt_root = dt_new_root(""); dt_add_property_cells(dt_root, "#address-cells", 2); dt_add_property_cells(dt_root, "#size-cells", 2); buf = real_malloc(1024*1024); add_mem_node((unsigned long)buf, 1024*1024); /* Now convert. */ mem_region_init(); /* create our reservations */ for (i = 0; i < ARRAY_SIZE(test_regions); i++) mem_reserve(test_regions[i].name, test_regions[i].addr, 0x1000); /* release unused */ mem_region_release_unused(); /* and create reservations */ mem_region_add_dt_reserved(); /* ensure we can't create further reservations */ r = new_region("test.4", 0x5000, 0x1000, NULL, REGION_RESERVED); assert(!add_region(r)); /* check dt properties */ names = dt_find_property(dt_root, "reserved-names"); ranges = dt_find_property(dt_root, "reserved-ranges"); assert(names && ranges); /* walk through names & ranges properies, ensuring that the test * regions are all present */ for (name = names->prop, rangep = (uint64_t *)ranges->prop, c = 0; name < names->prop + names->len; name += l, rangep += 2) { uint64_t addr; addr = dt_get_number(rangep, 2); l = strlen(name) + 1; for (i = 0; i < ARRAY_SIZE(test_regions); i++) { if (strcmp(test_regions[i].name, name)) continue; assert(test_regions[i].addr == addr); assert(!test_regions[i].found); test_regions[i].found = true; c++; } } assert(c == ARRAY_SIZE(test_regions)); dt_free(dt_root); real_free(buf); real_free((void *)(long)skiboot_heap.start); return 0; }
void free(void *ptr) { _native_syscall_enter(); real_free(ptr); _native_syscall_leave(); }