size_t huge_salloc(const void *ptr #ifdef JEMALLOC_ENABLE_MEMKIND , unsigned partition #endif ) { size_t ret; extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); #ifdef JEMALLOC_ENABLE_MEMKIND key.partition = partition - 1; do { key.partition++; #endif node = extent_tree_ad_search(&huge, &key); #ifdef JEMALLOC_ENABLE_MEMKIND } while((node == NULL || node->partition != key.partition) && key.partition < 256); /* FIXME hard coding partition max to 256 */ #endif assert(node != NULL); ret = node->size; malloc_mutex_unlock(&huge_mtx); return (ret); }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); if (config_stats) { stats_cactive_sub(node->size); huge_ndalloc++; huge_allocated -= node->size; } malloc_mutex_unlock(&huge_mtx); if (unmap && config_fill && config_dss && opt_junk) memset(node->addr, 0x5a, node->size); chunk_dealloc(node->addr, node->size, unmap); base_node_dealloc(node); }
prof_ctx_t * huge_prof_ctx_get(const void *ptr) { prof_ctx_t *ret = NULL; int i; extent_node_t *node, key; for (i = 0; i < POOLS_MAX; ++i) { pool_t *pool = pools[i]; if (pool == NULL) continue; malloc_mutex_lock(&pool->huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&pool->huge, &key); if (node != NULL) ret = node->prof_ctx; malloc_mutex_unlock(&pool->huge_mtx); if (ret != NULL) break; } return (ret); }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); #ifdef JEMALLOC_STATS huge_ndalloc++; huge_allocated -= node->size; #endif malloc_mutex_unlock(&huge_mtx); if (unmap) { /* Unmap chunk. */ #ifdef JEMALLOC_FILL #if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS)) if (opt_junk) memset(node->addr, 0x5a, node->size); #endif #endif chunk_dealloc(node->addr, node->size); } base_node_dealloc(node); }
static void huge_update_size(struct arena *arena, void *ptr, size_t new_size) { struct extent_node key; key.addr = ptr; extent_tree *huge = acquire_huge(arena); struct extent_node *node = extent_tree_ad_search(huge, &key); assert(node); node->size = new_size; release_huge(arena); }
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&huge, &key); assert(node != NULL); node->prof_ctx = ctx; malloc_mutex_unlock(&huge_mtx); }
size_t huge_pool_salloc(pool_t *pool, const void *ptr) { size_t ret = 0; extent_node_t *node, key; malloc_mutex_lock(&pool->huge_mtx); /* Extract from tree of huge allocations. */ key.addr = __DECONST(void *, ptr); node = extent_tree_ad_search(&pool->huge, &key); if (node != NULL) ret = node->size; malloc_mutex_unlock(&pool->huge_mtx); return (ret); }
size_t huge_alloc_size(void *ptr) { struct extent_node key; key.addr = ptr; struct arena *arena = get_huge_arena(ptr); maybe_lock_arena(arena); extent_tree *huge = acquire_huge(arena); struct extent_node *node = extent_tree_ad_search(huge, &key); assert(node); size_t size = node->size; release_huge(arena); maybe_unlock_arena(arena); return size; }
void huge_dalloc(void *ptr, bool unmap) { extent_node_t *node, key; malloc_mutex_lock(&huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; #ifdef JEMALLOC_ENABLE_MEMKIND key.partition = -1; do { key.partition++; #endif node = extent_tree_ad_search(&huge, &key); #ifdef JEMALLOC_ENABLE_MEMKIND } while ((node == NULL || node->partition != key.partition) && key.partition < 256); /* FIXME hard coding partition max to 256 */ #endif assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&huge, node); if (config_stats) { stats_cactive_sub(node->size); huge_ndalloc++; huge_allocated -= node->size; } malloc_mutex_unlock(&huge_mtx); if (unmap) huge_dalloc_junk(node->addr, node->size); chunk_dealloc(node->addr, node->size, unmap #ifdef JEMALLOC_ENABLE_MEMKIND , key.partition #endif ); base_node_dealloc(node); }
void huge_dalloc(pool_t *pool, void *ptr) { extent_node_t *node, key; malloc_mutex_lock(&pool->huge_mtx); /* Extract from tree of huge allocations. */ key.addr = ptr; node = extent_tree_ad_search(&pool->huge, &key); assert(node != NULL); assert(node->addr == ptr); extent_tree_ad_remove(&pool->huge, node); malloc_mutex_unlock(&pool->huge_mtx); huge_dalloc_junk(node->addr, node->size); arena_chunk_dalloc_huge(node->arena, node->addr, node->size); base_node_dalloc(pool, node); }
void huge_free(void *ptr) { struct extent_node *node, key; key.addr = ptr; struct arena *arena = get_huge_arena(ptr); maybe_lock_arena(arena); extent_tree *huge = acquire_huge(arena); node = extent_tree_ad_search(huge, &key); assert(node); size_t size = node->size; extent_tree_ad_remove(huge, node); node_free(get_huge_nodes(arena), node); release_huge(arena); if (purge_ratio >= 0) { memory_decommit(ptr, size); } chunk_free(get_recycler(arena), ptr, size); maybe_unlock_arena(arena); }
static void *huge_move_expand(struct thread_cache *cache, void *old_addr, size_t old_size, size_t new_size) { struct arena *arena; void *new_addr = huge_chunk_alloc(cache, new_size, CHUNK_SIZE, &arena); if (unlikely(!new_addr)) { return NULL; } bool gap = true; if (unlikely(memory_remap_fixed(old_addr, old_size, new_addr, new_size))) { memcpy(new_addr, old_addr, old_size); if (purge_ratio >= 0) { memory_decommit(old_addr, old_size); } gap = false; } else { // Attempt to fill the virtual memory hole. The kernel should provide a flag for preserving // the old mapping to avoid the possibility of this failing and creating fragmentation. // // https://lkml.org/lkml/2014/10/2/624 void *extra = memory_map(old_addr, old_size, false); if (likely(extra)) { if (unlikely(extra != old_addr)) { memory_unmap(extra, old_size); } else { gap = false; } } } struct extent_node key; key.addr = old_addr; struct arena *old_arena = get_huge_arena(old_addr); extent_tree *huge = acquire_huge(old_arena); struct extent_node *node = extent_tree_ad_search(huge, &key); assert(node); extent_tree_ad_remove(huge, node); node->addr = new_addr; node->size = new_size; if (arena != old_arena) { release_huge(old_arena); huge = acquire_huge(arena); } extent_tree_ad_insert(huge, node); release_huge(arena); if (!gap) { if (arena != old_arena && old_arena) { mutex_lock(&old_arena->mutex); } chunk_free(get_recycler(old_arena), old_addr, old_size); if (arena != old_arena && old_arena) { mutex_unlock(&old_arena->mutex); } } maybe_unlock_arena(arena); return new_addr; }