void* public_rEALLOc(void* oldmem, size_t bytes) { struct malloc_arena* ar_ptr; mchunkptr oldp; /* chunk corresponding to oldmem */ void* newp; /* chunk to return */ void * (*hook) (void *, size_t, const void *) = __realloc_hook; if (hook != NULL) return (*hook)(oldmem, bytes, RETURN_ADDRESS (0)); #if REALLOC_ZERO_BYTES_FREES if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; } #endif /* realloc of null is supposed to be same as malloc */ if (oldmem == 0) return public_mALLOc(bytes); oldp = mem2chunk(oldmem); if (is_mmapped(oldp)) ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */ else ar_ptr = arena_for_chunk(oldp); #if THREAD_STATS if(!mutex_trylock(&ar_ptr->mutex)) ++(ar_ptr->stat_lock_direct); else { (void)mutex_lock(&ar_ptr->mutex); ++(ar_ptr->stat_lock_wait); } #else (void)mutex_lock(&ar_ptr->mutex); #endif #ifndef NO_THREADS /* As in malloc(), remember this arena for the next allocation. */ tsd_setspecific(arena_key, (void *)ar_ptr); #endif if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes); if (newp && ar_ptr != &main_arena) set_non_main_arena(newp, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); assert(!newp || is_mmapped(mem2chunk(newp)) || ar_ptr == arena_for_chunk(mem2chunk(newp))); return newp; }
void* public_mEMALIGn(size_t alignment, size_t bytes) { struct malloc_arena* ar_ptr; void *p; void * (*hook) (size_t, size_t, const void *) = __memalign_hook; if (hook != NULL) return (*hook)(alignment, bytes, RETURN_ADDRESS (0)); /* If need less alignment than we give anyway, just relay to malloc */ if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes); /* Otherwise, ensure that it is at least a minimum chunk size */ if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE; arena_get(ar_ptr, bytes + FOOTER_OVERHEAD + alignment + MIN_CHUNK_SIZE); if(!ar_ptr) return 0; if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; p = mspace_memalign(arena_to_mspace(ar_ptr), alignment, bytes); if (p && ar_ptr != &main_arena) set_non_main_arena(p, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); assert(!p || is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p))); return p; }
static void free_atfork(void* mem, const void *caller) { void *vptr = NULL; struct malloc_arena *ar_ptr; mchunkptr p; /* chunk corresponding to mem */ if (mem == 0) /* free(0) has no effect */ return; p = mem2chunk(mem); if (is_mmapped(p)) { /* release mmapped memory. */ ar_ptr = arena_for_mmap_chunk(p); munmap_chunk(arena_to_mspace(ar_ptr), p); return; } ar_ptr = arena_for_chunk(p); tsd_getspecific(arena_key, vptr); if(vptr != ATFORK_ARENA_PTR) (void)mutex_lock(&ar_ptr->mutex); mspace_free(arena_to_mspace(ar_ptr), mem); if(vptr != ATFORK_ARENA_PTR) (void)mutex_unlock(&ar_ptr->mutex); }
static void free_starter(void* mem, const void *caller) { if (mem) { mchunkptr p = mem2chunk(mem); void *msp = arena_to_mspace(&main_arena); if (is_mmapped(p)) munmap_chunk(msp, p); else mspace_free(msp, mem); } THREAD_STAT(++main_arena.stat_starter); }
void* public_cALLOc(size_t n_elements, size_t elem_size) { struct malloc_arena* ar_ptr; size_t bytes, sz; void* mem; void * (*hook) (size_t, const void *) = __malloc_hook; /* size_t is unsigned so the behavior on overflow is defined. */ bytes = n_elements * elem_size; #define HALF_INTERNAL_SIZE_T \ (((size_t) 1) << (8 * sizeof (size_t) / 2)) if (__builtin_expect ((n_elements | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) { if (elem_size != 0 && bytes / elem_size != n_elements) { /*MALLOC_FAILURE_ACTION;*/ return 0; } } if (hook != NULL) { sz = bytes; mem = (*hook)(sz, RETURN_ADDRESS (0)); if(mem == 0) return 0; #ifdef HAVE_MEMCPY return memset(mem, 0, sz); #else while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */ return mem; #endif } arena_get(ar_ptr, bytes + FOOTER_OVERHEAD); if(!ar_ptr) return 0; if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; mem = mspace_calloc(arena_to_mspace(ar_ptr), bytes, 1); if (mem && ar_ptr != &main_arena) set_non_main_arena(mem, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); assert(!mem || is_mmapped(mem2chunk(mem)) || ar_ptr == arena_for_chunk(mem2chunk(mem))); return mem; }
void* public_mALLOc(size_t bytes) { struct malloc_arena* ar_ptr; void *victim; void * (*hook) (size_t, const void *) = __malloc_hook; if (hook != NULL) return (*hook)(bytes, RETURN_ADDRESS (0)); arena_get(ar_ptr, bytes + FOOTER_OVERHEAD); if (!ar_ptr) return 0; if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; victim = mspace_malloc(arena_to_mspace(ar_ptr), bytes); if (victim && ar_ptr != &main_arena) set_non_main_arena(victim, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); assert(!victim || is_mmapped(mem2chunk(victim)) || ar_ptr == arena_for_chunk(mem2chunk(victim))); return victim; }
void public_fREe(void* mem) { struct malloc_arena* ar_ptr; mchunkptr p; /* chunk corresponding to mem */ void (*hook) (void *, const void *) = __free_hook; if (hook != NULL) { (*hook)(mem, RETURN_ADDRESS (0)); return; } if (mem == 0) /* free(0) has no effect */ return; p = mem2chunk(mem); if (is_mmapped(p)) { /* release mmapped memory. */ ar_ptr = arena_for_mmap_chunk(p); munmap_chunk(arena_to_mspace(ar_ptr), p); return; } ar_ptr = arena_for_chunk(p); #if THREAD_STATS if(!mutex_trylock(&ar_ptr->mutex)) ++(ar_ptr->stat_lock_direct); else { (void)mutex_lock(&ar_ptr->mutex); ++(ar_ptr->stat_lock_wait); } #else (void)mutex_lock(&ar_ptr->mutex); #endif mspace_free(arena_to_mspace(ar_ptr), mem); (void)mutex_unlock(&ar_ptr->mutex); }
//This function is equal to mspace_free //replacing PREACTION with 0 and POSTACTION with nothing static void mspace_free_lockless(mspace msp, void* mem) { if (mem != 0) { mchunkptr p = mem2chunk(mem); #if FOOTERS mstate fm = get_mstate_for(p); msp = msp; /* placate people compiling -Wunused */ #else /* FOOTERS */ mstate fm = (mstate)msp; #endif /* FOOTERS */ if (!ok_magic(fm)) { USAGE_ERROR_ACTION(fm, p); return; } if (!0){//PREACTION(fm)) { check_inuse_chunk(fm, p); if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { size_t psize = chunksize(p); mchunkptr next = chunk_plus_offset(p, psize); s_allocated_memory -= psize; if (!pinuse(p)) { size_t prevsize = p->prev_foot; if (is_mmapped(p)) { psize += prevsize + MMAP_FOOT_PAD; if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) fm->footprint -= psize; goto postaction; } else { mchunkptr prev = chunk_minus_offset(p, prevsize); psize += prevsize; p = prev; if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ if (p != fm->dv) { unlink_chunk(fm, p, prevsize); } else if ((next->head & INUSE_BITS) == INUSE_BITS) { fm->dvsize = psize; set_free_with_pinuse(p, psize, next); goto postaction; } } else goto erroraction; } } if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { if (!cinuse(next)) { /* consolidate forward */ if (next == fm->top) { size_t tsize = fm->topsize += psize; fm->top = p; p->head = tsize | PINUSE_BIT; if (p == fm->dv) { fm->dv = 0; fm->dvsize = 0; } if (should_trim(fm, tsize)) sys_trim(fm, 0); goto postaction; } else if (next == fm->dv) { size_t dsize = fm->dvsize += psize; fm->dv = p; set_size_and_pinuse_of_free_chunk(p, dsize); goto postaction; } else { size_t nsize = chunksize(next); psize += nsize; unlink_chunk(fm, next, nsize); set_size_and_pinuse_of_free_chunk(p, psize); if (p == fm->dv) { fm->dvsize = psize; goto postaction; } } } else set_free_with_pinuse(p, psize, next); if (is_small(psize)) { insert_small_chunk(fm, p, psize); check_free_chunk(fm, p); } else { tchunkptr tp = (tchunkptr)p; insert_large_chunk(fm, tp, psize); check_free_chunk(fm, p); if (--fm->release_checks == 0) release_unused_segments(fm); } goto postaction; } } erroraction: USAGE_ERROR_ACTION(fm, p); postaction: ;//POSTACTION(fm); } } }