bool huge_boot(pool_t *pool) { /* Initialize chunks data. */ if (malloc_mutex_init(&pool->huge_mtx)) return (true); extent_tree_ad_new(&pool->huge); return (false); }
bool chunk_dss_boot(void) { if (malloc_mutex_init(&dss_mtx)) return (true); dss_base = sbrk(0); dss_prev = dss_base; dss_max = dss_base; extent_tree_szad_new(&dss_chunks_szad); extent_tree_ad_new(&dss_chunks_ad); return (false); }
bool huge_boot(void) { /* Initialize chunks data. */ if (malloc_mutex_init(&huge_mtx)) return (true); extent_tree_ad_new(&huge); if (config_stats) { huge_nmalloc = 0; huge_ndalloc = 0; huge_allocated = 0; } return (false); }
bool huge_boot(void) { /* Initialize chunks data. */ if (malloc_mutex_init(&huge_mtx)) return (true); extent_tree_ad_new(&huge); #ifdef JEMALLOC_STATS huge_nmalloc = 0; huge_ndalloc = 0; huge_allocated = 0; #endif return (false); }
bool chunk_swap_boot(void) { if (malloc_mutex_init(&swap_mtx)) return (true); swap_enabled = false; swap_prezeroed = false; /* swap.* mallctl's depend on this. */ swap_nfds = 0; swap_fds = NULL; #ifdef JEMALLOC_STATS swap_avail = 0; #endif swap_base = NULL; swap_end = NULL; swap_max = NULL; extent_tree_szad_new(&swap_chunks_szad); extent_tree_ad_new(&swap_chunks_ad); return (false); }
COLD void huge_init(void) { extent_tree_ad_new(&huge_global); }