void * chunk_alloc_swap(size_t size, bool *zero) { void *ret; assert(swap_enabled); ret = chunk_recycle_swap(size, zero); if (ret != NULL) return (ret); malloc_mutex_lock(&swap_mtx); if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) { ret = swap_end; swap_end = (void *)((uintptr_t)swap_end + size); #ifdef JEMALLOC_STATS swap_avail -= size; #endif malloc_mutex_unlock(&swap_mtx); if (swap_prezeroed) *zero = true; else if (*zero) memset(ret, 0, size); } else { malloc_mutex_unlock(&swap_mtx); return (NULL); } return (ret); }
void * base_alloc(size_t size) { void *ret; size_t csize; /* Round size up to nearest multiple of the cacheline size. */ csize = CACHELINE_CEILING(size); malloc_mutex_lock(&base_mtx); /* Make sure there's enough space for the allocation. */ if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { if (base_pages_alloc(csize)) { malloc_mutex_unlock(&base_mtx); return (NULL); } } /* Allocate. */ ret = base_next_addr; base_next_addr = (void *)((uintptr_t)base_next_addr + csize); malloc_mutex_unlock(&base_mtx); VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); return (ret); }
static rtree_leaf_elm_t * rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock * held, so we can get by with 'relaxed' here. */ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); if (leaf == NULL) { leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << rtree_levels[RTREE_HEIGHT-1].bits); if (leaf == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, leaf, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); return leaf; }
static rtree_node_elm_t * rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, atomic_p_t *elmp) { malloc_mutex_lock(tsdn, &rtree->init_lock); /* * If *elmp is non-null, then it was initialized with the init lock * held, so we can get by with 'relaxed' here. */ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); if (node == NULL) { node = rtree_node_alloc(tsdn, rtree, ZU(1) << rtree_levels[level].bits); if (node == NULL) { malloc_mutex_unlock(tsdn, &rtree->init_lock); return NULL; } /* * Even though we hold the lock, a later reader might not; we * need release semantics. */ atomic_store_p(elmp, node, ATOMIC_RELEASE); } malloc_mutex_unlock(tsdn, &rtree->init_lock); return node; }
static void huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize, size_t size, size_t extra, bool zero) { size_t usize_next; extent_node_t *node; arena_t *arena; chunk_purge_t *chunk_purge; bool zeroed; /* Increase usize to incorporate extra. */ while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize) usize = usize_next; if (oldsize == usize) return; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->lock); chunk_purge = arena->chunk_purge; malloc_mutex_unlock(&arena->lock); /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; zeroed = !chunk_purge_wrapper(arena, chunk_purge, ptr, usize, sdiff); if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); zeroed = false; } } else zeroed = true; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); /* Clear node's zeroed field if zeroing failed above. */ extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed); malloc_mutex_unlock(&arena->huge_mtx); arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { if (zero || (config_fill && unlikely(opt_zero))) { if (!zeroed) { memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } } }
static void ctl_refresh(void) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); if (config_stats) { malloc_mutex_lock(&chunks_mtx); ctl_stats.chunks.current = stats_chunks.curchunks; ctl_stats.chunks.total = stats_chunks.nchunks; ctl_stats.chunks.high = stats_chunks.highchunks; malloc_mutex_unlock(&chunks_mtx); malloc_mutex_lock(&huge_mtx); ctl_stats.huge.allocated = huge_allocated; ctl_stats.huge.nmalloc = huge_nmalloc; ctl_stats.huge.ndalloc = huge_ndalloc; malloc_mutex_unlock(&huge_mtx); } /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); malloc_mutex_lock(&arenas_lock); memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); for (i = 0; i < ctl_stats.narenas; i++) { if (arenas[i] != NULL) ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; else ctl_stats.arenas[i].nthreads = 0; } malloc_mutex_unlock(&arenas_lock); for (i = 0; i < ctl_stats.narenas; i++) { bool initialized = (tarenas[i] != NULL); ctl_stats.arenas[i].initialized = initialized; if (initialized) ctl_arena_refresh(tarenas[i], i); } if (config_stats) { ctl_stats.allocated = ctl_stats.arenas[ctl_stats.narenas].allocated_small + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + ctl_stats.huge.allocated; ctl_stats.active = (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE) + ctl_stats.huge.allocated; ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); } ctl_epoch++; }
void * chunk_alloc_dss(size_t size, bool *zero) { void *ret; ret = chunk_recycle_dss(size, zero); if (ret != NULL) return (ret); /* * sbrk() uses a signed increment argument, so take care not to * interpret a huge allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); malloc_mutex_lock(&dss_mtx); if (dss_prev != (void *)-1) { intptr_t incr; /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ do { /* Get the current end of the DSS. */ dss_max = sbrk(0); /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. */ incr = (intptr_t)size - (intptr_t)CHUNK_ADDR2OFFSET(dss_max); if (incr == (intptr_t)size) ret = dss_max; else { ret = (void *)((intptr_t)dss_max + incr); incr += size; } dss_prev = sbrk(incr); if (dss_prev == dss_max) { /* Success. */ dss_max = (void *)((intptr_t)dss_prev + incr); malloc_mutex_unlock(&dss_mtx); *zero = true; return (ret); } } while (dss_prev != (void *)-1); } malloc_mutex_unlock(&dss_mtx); return (NULL); }
static bool huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) { size_t usize; extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; usize = s2u(size); if (usize == 0) { /* size_t overflow. */ return (true); } node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); malloc_mutex_unlock(&arena->huge_mtx); /* * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so * that it is possible to make correct junk/zero fill decisions below. */ is_zeroed_chunk = zero; if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { memset((void *)((uintptr_t)ptr + oldsize), 0, CHUNK_CEILING(oldsize) - oldsize); } if (!is_zeroed_chunk) { memset((void *)((uintptr_t)ptr + CHUNK_CEILING(oldsize)), 0, usize - CHUNK_CEILING(oldsize)); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } return (false); }
static int thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned newind, oldind; malloc_mutex_lock(&ctl_mtx); newind = oldind = choose_arena(NULL)->ind; WRITE(newind, unsigned); READ(oldind, unsigned); if (newind != oldind) { arena_t *arena; if (newind >= ctl_stats.narenas) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } /* Initialize arena if necessary. */ malloc_mutex_lock(&arenas_lock); if ((arena = arenas[newind]) == NULL && (arena = arenas_extend(newind)) == NULL) { malloc_mutex_unlock(&arenas_lock); ret = EAGAIN; goto label_return; } assert(arena == arenas[newind]); arenas[oldind]->nthreads--; arenas[newind]->nthreads++; malloc_mutex_unlock(&arenas_lock); /* Set new arena association. */ if (config_tcache) { tcache_t *tcache; if ((uintptr_t)(tcache = *tcache_tsd_get()) > (uintptr_t)TCACHE_STATE_MAX) { tcache_arena_dissociate(tcache); tcache_arena_associate(tcache, arena); } } arenas_tsd_set(&arena); } ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); }
static int prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; if (config_prof == false) return (ENOENT); malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ oldval = opt_prof_active; if (newp != NULL) { /* * The memory barriers will tend to make opt_prof_active * propagate faster on systems with weak memory ordering. */ mb_write(); WRITE(opt_prof_active, bool); mb_write(); } READ(oldval, bool); ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); }
static int swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; malloc_mutex_lock(&ctl_mtx); if (swap_enabled) { READONLY(); } else { /* * swap_prezeroed isn't actually used by the swap code until it * is set during a successful chunk_swap_enabled() call. We * use it here to store the value that we'll pass to * chunk_swap_enable() in a swap.fds mallctl(). This is not * very clean, but the obvious alternatives are even worse. */ WRITE(swap_prezeroed, bool); } READ(swap_prezeroed, bool); ret = 0; RETURN: malloc_mutex_unlock(&ctl_mtx); return (ret); }
size_t huge_salloc(const void *ptr #ifdef JEMALLOC_ENABLE_MEMKIND , unsigned partition #endif ) { size_t ret; extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); #ifdef JEMALLOC_ENABLE_MEMKIND key.partition = partition - 1; do { key.partition++; #endif node = extent_tree_ad_search(&huge, &key); #ifdef JEMALLOC_ENABLE_MEMKIND } while((node == NULL || node->partition != key.partition) && key.partition < 256); /* FIXME hard coding partition max to 256 */ #endif assert(node != NULL); ret = node->size; malloc_mutex_unlock(&huge_mtx); return (ret); }
prof_ctx_t * huge_prof_ctx_get(const void *ptr) { prof_ctx_t *ret = NULL; int i; extent_node_t *node, key; for (i = 0; i < POOLS_MAX; ++i) { pool_t *pool = pools[i]; if (pool == NULL) continue; malloc_mutex_lock(&pool->huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&pool->huge, &key); if (node != NULL) ret = node->prof_ctx; malloc_mutex_unlock(&pool->huge_mtx); if (ret != NULL) break; } return (ret); }
void * huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ /* Allocate an extent node with which to track the chunk. */ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), CACHELINE, false, tcache, true, arena); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; /* ANDROID change */ #if !defined(__LP64__) /* On 32 bit systems, using a per arena cache can exhaust * virtual address space. Force all huge allocations to * always take place in the first arena. */ arena = a0get(); #else arena = arena_choose(tsd, arena); #endif /* End ANDROID change */ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, usize, alignment, &is_zeroed)) == NULL) { idalloctm(tsd, node, tcache, true); return (NULL); } extent_node_init(node, arena, ret, usize, is_zeroed); if (huge_node_set(ret, node)) { arena_chunk_dalloc_huge(arena, ret, usize); idalloctm(tsd, node, tcache, true); return (NULL); } /* Insert node into huge. */ malloc_mutex_lock(&arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) memset(ret, 0, usize); } else if (config_fill && unlikely(opt_junk_alloc)) memset(ret, 0xa5, usize); return (ret); }
static int arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned nread, i; malloc_mutex_lock(&ctl_mtx); READONLY(); if (*oldlenp != narenas * sizeof(bool)) { ret = EINVAL; nread = (*oldlenp < narenas * sizeof(bool)) ? (*oldlenp / sizeof(bool)) : narenas; } else { ret = 0; nread = narenas; } for (i = 0; i < nread; i++) ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); #ifdef JEMALLOC_STATS huge_ndalloc++; huge_allocated -= node->size; #endif malloc_mutex_unlock(&huge_mtx); if (unmap) { /* Unmap chunk. */ #ifdef JEMALLOC_FILL #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) if (opt_junk) memset(node->addr, 0x5a, node->size); #endif #endif chunk_dealloc(node->addr, node->size); } base_node_dealloc(node); }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); if (config_stats) { stats_cactive_sub(node->size); huge_ndalloc++; huge_allocated -= node->size; } malloc_mutex_unlock(&huge_mtx); if (unmap && config_fill && config_dss && opt_junk) memset(node->addr, 0x5a, node->size); chunk_dealloc(node->addr, node->size, unmap); base_node_dealloc(node); }
void arena_extent_cache_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->lock); arena_extent_cache_dalloc_locked(tsdn, arena, r_extent_hooks, extent); malloc_mutex_unlock(tsdn, &arena->lock); }
extent_node_t * base_node_alloc(void) { extent_node_t *ret; malloc_mutex_lock(&base_mtx); if (base_nodes != NULL) { ret = base_nodes; base_nodes = *(extent_node_t **)ret; malloc_mutex_unlock(&base_mtx); } else { malloc_mutex_unlock(&base_mtx); ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); } return (ret); }
static void huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t usize, usize_next; extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; bool pre_zeroed, post_zeroed; /* Increase usize to incorporate extra. */ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) <= oldsize; usize = usize_next) ; /* Do nothing. */ if (oldsize == usize) return; node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff); post_zeroed = false; } else { post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, sdiff); } } else post_zeroed = pre_zeroed; malloc_mutex_lock(&arena->huge_mtx); /* Update the size of the huge allocation. */ assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(&arena->huge_mtx); arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { if (zero || (config_fill && unlikely(opt_zero))) { if (!pre_zeroed) { memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } } }
void base_node_dealloc(extent_node_t *node) { malloc_mutex_lock(&base_mtx); *(extent_node_t **)node = base_nodes; base_nodes = node; malloc_mutex_unlock(&base_mtx); }
void base_node_dealloc(extent_node_t *node) { VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); malloc_mutex_lock(&base_mtx); *(extent_node_t **)node = base_nodes; base_nodes = node; malloc_mutex_unlock(&base_mtx); }
void * huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t usize; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ usize = sa2u(size, alignment); if (unlikely(usize == 0)) return (NULL); assert(usize >= chunksize); /* Allocate an extent node with which to track the chunk. */ node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), CACHELINE, false, tcache, true, arena); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena, size, alignment, &is_zeroed)) == NULL) { idalloctm(tsd, node, tcache, true, true); return (NULL); } extent_node_init(node, arena, ret, size, is_zeroed, true); if (huge_node_set(ret, node)) { arena_chunk_dalloc_huge(arena, ret, size); idalloctm(tsd, node, tcache, true, true); return (NULL); } /* Insert node into huge. */ malloc_mutex_lock(&arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); malloc_mutex_unlock(&arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) memset(ret, 0, size); } else if (config_fill && unlikely(opt_junk_alloc)) memset(ret, 0xa5, size); return (ret); }
extent_node_t * base_node_alloc(void) { extent_node_t *ret; malloc_mutex_lock(&base_mtx); if (base_nodes != NULL) { ret = base_nodes; base_nodes = *(extent_node_t **)ret; malloc_mutex_unlock(&base_mtx); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); } else { malloc_mutex_unlock(&base_mtx); ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); } return (ret); }
void * huge_palloc(size_t size, size_t alignment, bool zero) { void *ret; size_t csize; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ csize = CHUNK_CEILING(size); if (csize == 0) { /* size is large enough to cause size_t wrap-around. */ return (NULL); } /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; ret = chunk_alloc(csize, alignment, false, &is_zeroed, chunk_dss_prec_get()); if (ret == NULL) { base_node_dealloc(node); return (NULL); } /* Insert node into huge. */ node->addr = ret; node->size = csize; malloc_mutex_lock(&huge_mtx); extent_tree_ad_insert(&huge, node); if (config_stats) { stats_cactive_add(csize); huge_nmalloc++; huge_allocated += csize; } malloc_mutex_unlock(&huge_mtx); if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, csize); else if (opt_zero && is_zeroed == false) memset(ret, 0, csize); } return (ret); }
bool chunk_dss_prec_set(dss_prec_t dss_prec) { if (config_dss == false) return (true); malloc_mutex_lock(&dss_mtx); dss_prec_default = dss_prec; malloc_mutex_unlock(&dss_mtx); return (false); }
bool chunk_dss_prec_set(dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); malloc_mutex_lock(&dss_mtx); dss_prec_default = dss_prec; malloc_mutex_unlock(&dss_mtx); return (false); }
void * huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) { void *ret; size_t csize; extent_node_t *node; bool is_zeroed; pool_t *pool; /* Allocate one or more contiguous chunks for this request. */ csize = CHUNK_CEILING(size); if (csize == 0) { /* size is large enough to cause size_t wrap-around. */ return (NULL); } /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; arena = choose_arena(arena); pool = arena->pool; /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(pool); if (node == NULL) return (NULL); ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed); if (ret == NULL) { base_node_dalloc(pool, node); return (NULL); } /* Insert node into huge. */ node->addr = ret; node->size = csize; node->arena = arena; malloc_mutex_lock(&pool->huge_mtx); extent_tree_ad_insert(&pool->huge, node); malloc_mutex_unlock(&pool->huge_mtx); if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, csize); else if (opt_zero && is_zeroed == false) memset(ret, 0, csize); } return (ret); }
static void * chunk_recycle_swap(size_t size, bool *zero) { extent_node_t *node, key; key.addr = NULL; key.size = size; malloc_mutex_lock(&swap_mtx); node = extent_tree_szad_nsearch(&swap_chunks_szad, &key); if (node != NULL) { void *ret = node->addr; /* Remove node from the tree. */ extent_tree_szad_remove(&swap_chunks_szad, node); if (node->size == size) { extent_tree_ad_remove(&swap_chunks_ad, node); base_node_dealloc(node); } else { /* * Insert the remainder of node's address range as a * smaller chunk. Its position within swap_chunks_ad * does not change. */ assert(node->size > size); node->addr = (void *)((uintptr_t)node->addr + size); node->size -= size; extent_tree_szad_insert(&swap_chunks_szad, node); } #ifdef JEMALLOC_STATS swap_avail -= size; #endif malloc_mutex_unlock(&swap_mtx); if (*zero) memset(ret, 0, size); return (ret); } malloc_mutex_unlock(&swap_mtx); return (NULL); }
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); malloc_mutex_unlock(&arena->huge_mtx); }