static void heaptracker_free_leaked_memory(void) { struct hdr *del; int cnt; if (num) log_message("+++ THERE ARE %d LEAKED ALLOCATIONS\n", num); while (head) { int safe; del = head; log_message("+++ DELETING %d BYTES OF LEAKED MEMORY AT %p (%d REMAINING)\n", del->size, user(del), num); if (del_leak(del, &safe)) { /* safe == 1, because the allocation is valid */ log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", user(del), del->size); print_backtrace(del->bt, del->bt_depth); } dlfree(del); } // log_message("+++ DELETING %d BACKLOGGED ALLOCATIONS\n", backlog_num); while (backlog_head) { del = backlog_tail; del_from_backlog(del); dlfree(del); } }
void free_symbol_table(symbol_table_t* table) { size_t i; if (table) { for (i = 0; i < table->num_symbols; i++) { dlfree(table->symbols[i].name); } dlfree(table->symbols); dlfree(table); } }
static Dyndev* dlload(char *path, Dynsym *tab, int ntab) { Fd f; Dyndev *l; f.fd = kopen(path, OREAD); if(f.fd < 0) error("cannot open"); if(waserror()){ kclose(f.fd); nexterror(); } l = mallocz(sizeof(Dyndev), 1); if(l == nil) error(Enomem); if(waserror()){ dlfree(l); nexterror(); } l->path = strdup(path); if(l->path == nil) error(Enomem); l->o = dynloadgen(&f, readfd, seekfd, errfd, tab, ntab, 0); if(l->o == nil) error(up->env->errstr); poperror(); poperror(); kclose(f.fd); return l; }
static void devunload(char *path) { int i, dc; Dyndev *l, **ll; dc = 0; if(strlen(path) == 1) dc = path[0]; for(ll = &loaded; *ll != nil; ll = &(*ll)->next){ if(path != nil && strcmp(path, (*ll)->path) == 0) break; if(dc != 0 && (*ll)->dev && dc == (*ll)->dev->dc) break; } if((l = *ll) != nil){ for(i = 0; i < ndevs; i++) if(l->dev == devtab[i]){ devtab[i] = nil; break; } /* if(l->dev) l->dev->shutdown(); */ *ll = l->next; dlfree(l); } }
static void devload(char *path) { int i; Dyndev *l; Dev *dev; char devname[32]; l = dlload(path, _exporttab, dyntabsize(_exporttab)); if(waserror()){ dlfree(l); nexterror(); } snprint(devname, sizeof(devname), "%sdevtab", "XXX"); /* TO DO */ dev = dynimport(l->o, devname, signof(*dev)); if(dev == nil) error("no devtab"); if(devno(dev->dc, 1) >= 0) error("device loaded"); for(i = 0; devtab[i] != nil; i++) ; if(i >= ndevs || devtab[i+1] != nil) error("device table full"); l->dev = devtab[i] = dev; dev->init(); l->next = loaded; loaded = l; poperror(); }
void* dlrealloc(void* m, size_t bytes) //maks { void* newMem = NULL; if(bytes <= 0 && m) { dlfree(m); return 0; } do { #ifdef DISABLE_DLMALLOC newMem = realloc(m, bytes); #else newMem = nedrealloc(m, bytes); #endif if(!newMem && bytes > 0) { //Try allocate again if(!MemoryAllocationError(bytes)) { //failed break; } } } while(!newMem); return newMem; }
/** Frees a portion of memory allocated with orxMemory_Allocateate * @param[in] _pMem Pointer on the memory allocated by orx */ void orxFASTCALL orxMemory_Free(void *_pMem) { /* Checks */ orxASSERT((sstMemory.u32Flags & orxMEMORY_KU32_STATIC_FLAG_READY) == orxMEMORY_KU32_STATIC_FLAG_READY); #ifdef __orxPROFILER__ /* Valid? */ if(_pMem != NULL) { orxMEMORY_TYPE eMemType; size_t uMemoryChunkSize; /* Updates pointer */ _pMem = (orxU8 *)_pMem - sizeof(orxMEMORY_TYPE); /* Gets memory type from memory chunk tag */ eMemType = *(orxMEMORY_TYPE *)_pMem; /* Gets memory chunk size */ uMemoryChunkSize = dlmalloc_usable_size(_pMem); /* Updates memory tracker */ orxMemory_Track(eMemType, (orxU32)(uMemoryChunkSize - sizeof(orxMEMORY_TYPE)), orxFALSE); } #endif /* __orxPROFILER__ */ /* System call to free memory */ dlfree(_pMem); return; }
static void free_chunklist (CodeChunk *chunk) { CodeChunk *dead; #if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP) int valgrind_unregister = 0; if (RUNNING_ON_VALGRIND) valgrind_unregister = 1; #define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0) #else #define valgrind_unregister(x) #endif for (; chunk; ) { dead = chunk; mono_profiler_code_chunk_destroy ((gpointer) dead->data); if (code_manager_callbacks.chunk_destroy) code_manager_callbacks.chunk_destroy ((gpointer)dead->data); chunk = chunk->next; if (dead->flags == CODE_FLAG_MMAP) { codechunk_vfree (dead->data, dead->size); /* valgrind_unregister(dead->data); */ } else if (dead->flags == CODE_FLAG_MALLOC) { dlfree (dead->data); } code_memory_used -= dead->size; g_free (dead); } }
/* This routine serves as entry point for 'malloc'. * Primary responsibility of this routine is to allocate requested number of * bytes (plus prefix, and suffix guards), and report allocation to the * emulator. */ void* qemu_instrumented_malloc(size_t bytes) { MallocDesc desc; /* Initialize block descriptor and allocate memory. Note that dlmalloc * returns a valid pointer on zero allocation. Lets mimic this behavior. */ desc.prefix_size = DEFAULT_PREFIX_SIZE; desc.requested_bytes = bytes; desc.suffix_size = DEFAULT_SUFFIX_SIZE; desc.ptr = dlmalloc(mallocdesc_alloc_size(&desc)); if (desc.ptr == NULL) { qemu_error_log("<libc_pid=%03u, pid=%03u> malloc(%u): dlmalloc(%u) failed.", malloc_pid, getpid(), bytes, mallocdesc_alloc_size(&desc)); return NULL; } // Fire up event in the emulator. if (notify_qemu_malloc(&desc)) { log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: malloc: notify_malloc failed for ", malloc_pid, getpid()); dlfree(desc.ptr); return NULL; } else { #if TEST_ACCESS_VIOLATIONS test_access_violation(&desc); #endif // TEST_ACCESS_VIOLATIONS log_mdesc(info, &desc, "+++ <libc_pid=%03u, pid=%03u> malloc(%u) -> ", malloc_pid, getpid(), bytes); return mallocdesc_user_ptr(&desc); } }
Plugin::~Plugin() { if (_module) { if (_pluginDelete) _pluginDelete(); dlfree((void*)_module); } }
__LIBC_HIDDEN__ void deinit_mapinfo(mapinfo *mi) { mapinfo *del; while (mi) { del = mi; mi = mi->next; dlfree(del); } }
void DLAllocator::TFree(void* ptr) { if(!ptr) return; void* realPtr = (void*)((char*)ptr - sizeof(MemoryHead)); MemoryHead* pHead = (MemoryHead*)(realPtr); pHead->MemInfo &= 0x7fffffff; dlfree(realPtr); }
void shmem_free(void *ptr) { SHMEM_ERR_CHECK_INITIALIZED(); SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc); dlfree(ptr); SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc); shmem_internal_barrier_all(); }
void mbed_ufree(void * ptr) { void * caller = (void*) caller_addr(); ualloc_debug(UALLOC_DEBUG_LOG, "uf c:%p m:%p\n", caller, ptr); uintptr_t ptr_tmp = (uintptr_t) ptr; if ((ptr_tmp < (uintptr_t) mbed_sbrk_ptr) && (ptr_tmp >= (uintptr_t)&__mbed_sbrk_start)) { dlfree(ptr); } else { ualloc_debug(UALLOC_DEBUG_LOG, "uf c:%p m:%p non-heap free\n", caller, ptr); } }
void FC_SHPDEALLOC(void **addr, fortran_integer_t *errcode, fortran_integer_t *want_abort) { SHMEM_ERR_CHECK_INITIALIZED(); SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc); dlfree(*addr); SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc); *errcode = 0; shmem_internal_barrier_all(); }
void substring::set( char k, bool icase ) { _icase = icase; _from = _to = _icase ? ::tolower(k) : k; if(_shf) dlfree(_shf); _shf = 0; _subs = &_from; //for comparison in find_onechar _len = 1; }
/* Release a chunk of memory allocated with ffi_closure_alloc. If FFI_CLOSURE_FREE_CODE is nonzero, the given address can be the writable or the executable address given. Otherwise, only the writable address can be provided here. */ void ffi_closure_free (void *ptr) { #if FFI_CLOSURE_FREE_CODE msegmentptr seg = segment_holding_code (gm, ptr); if (seg) ptr = sub_segment_exec_offset (ptr, seg); #endif dlfree (ptr); }
static inline void add_to_backlog(hdr_t* hdr) { ScopedPthreadMutexLocker locker(&backlog_lock); hdr->tag = BACKLOG_TAG; backlog_num++; add_locked(hdr, &backlog_tail, &backlog_head); poison(hdr); /* If we've exceeded the maximum backlog, clear it up */ while (backlog_num > gMallocDebugBacklog) { hdr_t* gone = backlog_tail; del_from_backlog_locked(gone); dlfree(gone->base); } }
/* This routine serves as entry point for 'malloc'. * Primary responsibility of this routine is to free requested memory, and * report free block to the emulator. */ void qemu_instrumented_free(void* mem) { MallocDesc desc; if (mem == NULL) { // Just let go NULL free dlfree(mem); return; } // Query emulator for the freeing block information. if (query_qemu_malloc_info(mem, &desc, 1)) { error_log("<libc_pid=%03u, pid=%03u>: free(%p) query_info failed.", malloc_pid, getpid(), mem); return; } #if TEST_ACCESS_VIOLATIONS test_access_violation(&desc); #endif // TEST_ACCESS_VIOLATIONS /* Make sure that pointer that's being freed matches what we expect * for this memory block. Note that this violation should be already * caught in the emulator. */ if (mem != mallocdesc_user_ptr(&desc)) { log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: free(%p) is invalid for ", malloc_pid, getpid(), mem); return; } // Fire up event in the emulator and free block that was actually allocated. if (notify_qemu_free(mem)) { log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: free(%p) notify_free failed for ", malloc_pid, getpid(), mem); } else { log_mdesc(info, &desc, "--- <libc_pid=%03u, pid=%03u> free(%p) -> ", malloc_pid, getpid(), mem); dlfree(desc.ptr); } }
void shmem_free(void *ptr) { SHMEM_ERR_CHECK_INITIALIZED(); if (ptr != NULL) { SHMEM_ERR_CHECK_SYMMETRIC_HEAP(ptr); } shmem_internal_barrier_all(); SHMEM_MUTEX_LOCK(shmem_internal_mutex_alloc); dlfree(ptr); SHMEM_MUTEX_UNLOCK(shmem_internal_mutex_alloc); }
static inline void add_to_backlog(struct hdr *hdr) { pthread_mutex_lock(&backlog_lock); hdr->tag = BACKLOG_TAG; backlog_num++; add_locked(hdr, &backlog_tail, &backlog_head); poison(hdr); /* If we've exceeded the maximum backlog, clear it up */ while (backlog_num > malloc_double_free_backlog) { struct hdr *gone = backlog_tail; del_from_backlog_locked(gone); dlfree(gone); } pthread_mutex_unlock(&backlog_lock); }
void free(void* ptr) { if(IS_OUR_PTR(ptr)) { if(!pthread_equal(pthread_self(), main_thread)) { //this will queue up the free to be performed later in the //main thread when it wants more memory. free_memory_from_other_thread(ptr); return; } free_memory(ptr); return; } pthread_mutex_lock(&dlmalloc_mutex); dlfree(ptr); pthread_mutex_unlock(&dlmalloc_mutex); }
void starFree( void * ptr ) { #if USE_AST_MALLOC int ast_status = 0; int *old_ast_status = NULL; #endif #if STARMEM_DEBUG if (STARMEM_PRINT_MALLOC) printf(__FILE__": Free pointer %p\n", ptr ); #endif switch ( STARMEM_MALLOC ) { case STARMEM__SYSTEM: free( ptr ); break; case STARMEM__AST: #if USE_AST_MALLOC old_ast_status = astWatch( &ast_status ); astFree( ptr ); astWatch( old_ast_status ); #else starMemFatalAST; #endif break; case STARMEM__DL: dlfree( ptr ); break; case STARMEM__GC: /* Nothing to do if garbage collector selected */ break; default: starMemFatalNone; } return; }
/* This routine serves as entry point for 'memalign'. * This routine behaves similarly to qemu_instrumented_malloc. */ void* qemu_instrumented_memalign(size_t alignment, size_t bytes) { MallocDesc desc; if (bytes == 0) { // Just let go zero bytes allocation. qemu_info_log("::: <libc_pid=%03u, pid=%03u>: memalign(%X, %u) redir to malloc", malloc_pid, getpid(), alignment, bytes); return qemu_instrumented_malloc(0); } /* Prefix size for aligned allocation must be equal to the alignment used * for allocation in order to ensure proper alignment of the returned * pointer, in case that alignment requirement is greater than prefix * size. */ desc.prefix_size = alignment > DEFAULT_PREFIX_SIZE ? alignment : DEFAULT_PREFIX_SIZE; desc.requested_bytes = bytes; desc.suffix_size = DEFAULT_SUFFIX_SIZE; desc.ptr = dlmemalign(desc.prefix_size, mallocdesc_alloc_size(&desc)); if (desc.ptr == NULL) { error_log("<libc_pid=%03u, pid=%03u> memalign(%X, %u): dlmalloc(%u) failed.", malloc_pid, getpid(), alignment, bytes, mallocdesc_alloc_size(&desc)); return NULL; } if (notify_qemu_malloc(&desc)) { log_mdesc(error, &desc, "<libc_pid=%03u, pid=%03u>: memalign(%X, %u): notify_malloc failed for ", malloc_pid, getpid(), alignment, bytes); dlfree(desc.ptr); return NULL; } #if TEST_ACCESS_VIOLATIONS test_access_violation(&desc); #endif // TEST_ACCESS_VIOLATIONS log_mdesc(info, &desc, "@@@ <libc_pid=%03u, pid=%03u> memalign(%X, %u) -> ", malloc_pid, getpid(), alignment, bytes); return mallocdesc_user_ptr(&desc); }
// O(1) for small objects void deallocate( pointer block, size_type count ) const throw() { assert(ms_pool); assert( block && "null pointer argument" ); POOL_MUTEX_LOCK; if (ms_pool->contains(block)) { ms_pool->deallocate(block); } else { #ifdef USE_DLMALLOC dlfree((char*)block); #else free((void*)block); #endif } ms_allocCount--; POOL_MUTEX_UNLOCK; }
void PoolAllocatorEx::TFree(void* ptr) { LOCK(&m_lock); if(!ptr) return; PushMemory(ptr); if((int32)m_count > m_maxSize) { // free more memory for (int32 i=m_maxSize; i < m_count && m_head; ++i) { void* getPtr = PopMemory(); if(!getPtr) return; void* realPtr = (void*)((char*)getPtr - sizeof(MemoryHead)); dlfree(realPtr); m_memUsage -= m_cellSize; m_count--; } } }
void _free_r( struct _reent* r, void* ptr ) { dlfree( ptr ); }
static CodeChunk* new_codechunk (CodeChunk *last, int dynamic, int size) { int minsize, flags = CODE_FLAG_MMAP; int chunk_size, bsize = 0; int pagesize, valloc_granule; CodeChunk *chunk; void *ptr; #ifdef FORCE_MALLOC flags = CODE_FLAG_MALLOC; #endif pagesize = mono_pagesize (); valloc_granule = mono_valloc_granule (); if (dynamic) { chunk_size = size; flags = CODE_FLAG_MALLOC; } else { minsize = MAX (pagesize * MIN_PAGES, valloc_granule); if (size < minsize) chunk_size = minsize; else { /* Allocate MIN_ALIGN-1 more than we need so we can still */ /* guarantee MIN_ALIGN alignment for individual allocs */ /* from mono_code_manager_reserve_align. */ size += MIN_ALIGN - 1; size &= ~(MIN_ALIGN - 1); chunk_size = size; chunk_size += valloc_granule - 1; chunk_size &= ~ (valloc_granule - 1); } } #ifdef BIND_ROOM if (dynamic) /* Reserve more space since there are no other chunks we might use if this one gets full */ bsize = (chunk_size * 2) / BIND_ROOM; else bsize = chunk_size / BIND_ROOM; if (bsize < MIN_BSIZE) bsize = MIN_BSIZE; bsize += MIN_ALIGN -1; bsize &= ~ (MIN_ALIGN - 1); if (chunk_size - size < bsize) { chunk_size = size + bsize; if (!dynamic) { chunk_size += valloc_granule - 1; chunk_size &= ~ (valloc_granule - 1); } } #endif if (flags == CODE_FLAG_MALLOC) { ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1); if (!ptr) return NULL; } else { /* Try to allocate code chunks next to each other to help the VM */ ptr = NULL; if (last) ptr = codechunk_valloc ((guint8*)last->data + last->size, chunk_size); if (!ptr) ptr = codechunk_valloc (NULL, chunk_size); if (!ptr) return NULL; } if (flags == CODE_FLAG_MALLOC) { #ifdef BIND_ROOM /* Make sure the thunks area is zeroed */ memset (ptr, 0, bsize); #endif } chunk = (CodeChunk *) g_malloc (sizeof (CodeChunk)); if (!chunk) { if (flags == CODE_FLAG_MALLOC) dlfree (ptr); else mono_vfree (ptr, chunk_size, MONO_MEM_ACCOUNT_CODE); return NULL; } chunk->next = NULL; chunk->size = chunk_size; chunk->data = (char *) ptr; chunk->flags = flags; chunk->pos = bsize; chunk->bsize = bsize; if (code_manager_callbacks.chunk_new) code_manager_callbacks.chunk_new ((gpointer)chunk->data, chunk->size); mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size); code_memory_used += chunk_size; mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used); /*printf ("code chunk at: %p\n", ptr);*/ return chunk; }
extern "C" void* chk_realloc(void* ptr, size_t size) { // log_message("%s: %s\n", __FILE__, __FUNCTION__); if (!ptr) { return chk_malloc(size); } #ifdef REALLOC_ZERO_BYTES_FREE if (!size) { chk_free(ptr); return NULL; } #endif hdr_t* hdr = meta(ptr); if (del(hdr) < 0) { uintptr_t bt[MAX_BACKTRACE_DEPTH]; int depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH); if (hdr->tag == BACKLOG_TAG) { log_message("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n", user(hdr), size, hdr->size); log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", user(hdr), hdr->size); log_backtrace(hdr->bt, hdr->bt_depth); /* hdr->freed_bt_depth should be nonzero here */ log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", user(hdr), hdr->size); log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); log_message("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n", user(hdr), hdr->size); log_backtrace(bt, depth); /* We take the memory out of the backlog and fall through so the * reallocation below succeeds. Since we didn't really free it, we * can default to this behavior. */ del_from_backlog(hdr); } else { log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", user(hdr), size); log_backtrace(bt, depth); // just get a whole new allocation and leak the old one return dlrealloc(0, size); // return dlrealloc(user(hdr), size); // assuming it was allocated externally } } if (hdr->base != hdr) { // An allocation from memalign, so create another allocation and // copy the data out. void* newMem = dlmalloc(sizeof(hdr_t) + size + sizeof(ftr_t)); if (newMem) { memcpy(newMem, hdr, sizeof(hdr_t) + hdr->size); dlfree(hdr->base); hdr = static_cast<hdr_t*>(newMem); } else { dlfree(hdr->base); hdr = NULL; } } else { hdr = static_cast<hdr_t*>(dlrealloc(hdr, sizeof(hdr_t) + size + sizeof(ftr_t))); } if (hdr) { hdr->base = hdr; hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); add(hdr, size); return user(hdr); } return NULL; }
void free_malloc_leak_info(uint8_t* info) { dlfree(info); }