void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); #ifdef JEMALLOC_STATS huge_ndalloc++; huge_allocated -= node->size; #endif malloc_mutex_unlock(&huge_mtx); if (unmap) { /* Unmap chunk. */ #ifdef JEMALLOC_FILL #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) if (opt_junk) memset(node->addr, 0x5a, node->size); #endif #endif chunk_dealloc(node->addr, node->size); } base_node_dealloc(node); }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); if (config_stats) { stats_cactive_sub(node->size); huge_ndalloc++; huge_allocated -= node->size; } malloc_mutex_unlock(&huge_mtx); if (unmap && config_fill && config_dss && opt_junk) memset(node->addr, 0x5a, node->size); chunk_dealloc(node->addr, node->size, unmap); base_node_dealloc(node); }
// TODO: improve, caching void allocator_dealloc(allocator_t *allocator, void *p) { pthread_mutex_lock(&allocator->lock); dllist_link *l = allocator->chunks.head; for (; l; l = l->next) { chunk_t *tmp = DLLIST_ELEMENT(l, chunk_t, link); if (chunk_exists(tmp, allocator->block_size, p)) { chunk_dealloc(tmp, p, allocator->block_size); break; } } pthread_mutex_unlock(&allocator->lock); }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; #ifdef JEMALLOC_ENABLE_MEMKIND key.partition = -1; do { key.partition++; #endif node = extent_tree_ad_search(&huge, &key); #ifdef JEMALLOC_ENABLE_MEMKIND } while ((node == NULL || node->partition != key.partition) && key.partition < 256); /* FIXME hard coding partition max to 256 */ #endif assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); if (config_stats) { stats_cactive_sub(node->size); huge_ndalloc++; huge_allocated -= node->size; } malloc_mutex_unlock(&huge_mtx); if (unmap) huge_dalloc_junk(node->addr, node->size); chunk_dealloc(node->addr, node->size, unmap #ifdef JEMALLOC_ENABLE_MEMKIND , key.partition #endif ); base_node_dealloc(node); }
void * chunk_alloc_dss(size_t size, size_t alignment, bool *zero) { void *ret; cassert(config_dss); assert(size > 0 && (size & chunksize_mask) == 0); assert(alignment > 0 && (alignment & chunksize_mask) == 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a huge allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); malloc_mutex_lock(&dss_mtx); if (dss_prev != (void *)-1) { size_t gap_size, cpad_size; void *cpad, *dss_next; intptr_t incr; /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ do { /* Get the current end of the DSS. */ dss_max = sbrk(0); /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. */ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & chunksize_mask; /* * Compute how much chunk-aligned pad space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ cpad = (void *)((uintptr_t)dss_max + gap_size); ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, alignment); cpad_size = (uintptr_t)ret - (uintptr_t)cpad; dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || (uintptr_t)dss_next < (uintptr_t)dss_max) { /* Wrap-around. */ malloc_mutex_unlock(&dss_mtx); return (NULL); } incr = gap_size + cpad_size + size; dss_prev = sbrk(incr); if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) chunk_dealloc(cpad, cpad_size, true); if (*zero) { VALGRIND_MAKE_MEM_UNDEFINED(ret, size); memset(ret, 0, size); } return (ret); } } while (dss_prev != (void *)-1); } malloc_mutex_unlock(&dss_mtx); return (NULL); }
void drop_chunk (Chunk* chunk) { chunk->nb = 0; if (nb_recyclable_chunks == 20) chunk_dealloc (chunk); else chunks[nb_recyclable_chunks++] = chunk; }
/* Only handles large allocations that require more than chunk alignment. */ void * huge_palloc(size_t size, size_t alignment, bool zero) { void *ret; size_t alloc_size, chunk_size, offset; extent_node_t *node; /* * This allocation requires alignment that is even larger than chunk * alignment. This means that huge_malloc() isn't good enough. * * Allocate almost twice as many chunks as are demanded by the size or * alignment, in order to assure the alignment can be achieved, then * unmap leading and trailing chunks. */ assert(alignment >= chunksize); chunk_size = CHUNK_CEILING(size); if (size >= alignment) alloc_size = chunk_size + alignment - chunksize; else alloc_size = (alignment << 1) - chunksize; /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(); if (node == NULL) return (NULL); ret = chunk_alloc(alloc_size, false, &zero); if (ret == NULL) { base_node_dealloc(node); return (NULL); } offset = (uintptr_t)ret & (alignment - 1); assert((offset & chunksize_mask) == 0); assert(offset < alloc_size); if (offset == 0) { /* Trim trailing space. */ chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size - chunk_size); } else { size_t trailsize; /* Trim leading space. */ chunk_dealloc(ret, alignment - offset); ret = (void *)((uintptr_t)ret + (alignment - offset)); trailsize = alloc_size - (alignment - offset) - chunk_size; if (trailsize != 0) { /* Trim trailing space. */ assert(trailsize < alloc_size); chunk_dealloc((void *)((uintptr_t)ret + chunk_size), trailsize); } } /* Insert node into huge. */ node->addr = ret; node->size = chunk_size; malloc_mutex_lock(&huge_mtx); extent_tree_ad_insert(&huge, node); #ifdef JEMALLOC_STATS huge_nmalloc++; huge_allocated += chunk_size; #endif malloc_mutex_unlock(&huge_mtx); #ifdef JEMALLOC_FILL if (zero == false) { if (opt_junk) memset(ret, 0xa5, chunk_size); else if (opt_zero) memset(ret, 0, chunk_size); } #endif return (ret); }