void * huge_palloc(size_t size, size_t alignment, bool zero) { void *ret; size_t csize; extent_node_t *node; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ csize = CHUNK_CEILING(size); if (csize == 0) { /* size is large enough to cause size_t wrap-around. */ return (NULL); } /* Allocate an extent node with which to track the chunk. */ node = base_node_alloc(); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; ret = chunk_alloc(csize, alignment, false, &is_zeroed, chunk_dss_prec_get()); if (ret == NULL) { base_node_dealloc(node); return (NULL); } /* Insert node into huge. */ node->addr = ret; node->size = csize; malloc_mutex_lock(&huge_mtx); extent_tree_ad_insert(&huge, node); if (config_stats) { stats_cactive_add(csize); huge_nmalloc++; huge_allocated += csize; } malloc_mutex_unlock(&huge_mtx); if (config_fill && zero == false) { if (opt_junk) memset(ret, 0xa5, csize); else if (opt_zero && is_zeroed == false) memset(ret, 0, csize); } return (ret); }
static int arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret, i; bool match, err; const char *dss; unsigned arena_ind = mib[1]; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; malloc_mutex_lock(&ctl_mtx); WRITE(dss, const char *); match = false; for (i = 0; i < dss_prec_limit; i++) { if (strcmp(dss_prec_names[i], dss) == 0) { dss_prec = i; match = true; break; } } if (match == false) { ret = EINVAL; goto label_return; } if (arena_ind < ctl_stats.narenas) { arena_t *arena = arenas[arena_ind]; if (arena != NULL) { dss_prec_old = arena_dss_prec_get(arena); arena_dss_prec_set(arena, dss_prec); err = false; } else err = true; } else { dss_prec_old = chunk_dss_prec_get(); err = chunk_dss_prec_set(dss_prec); } dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); if (err) { ret = EFAULT; goto label_return; } ret = 0; label_return: malloc_mutex_unlock(&ctl_mtx); return (ret); }
static bool base_pages_alloc(size_t minsize) { size_t csize; bool zero; assert(minsize != 0); csize = CHUNK_CEILING(minsize); zero = false; base_pages = chunk_alloc(csize, chunksize, true, &zero, chunk_dss_prec_get()); if (base_pages == NULL) return (true); base_next_addr = base_pages; base_past_addr = (void *)((uintptr_t)base_pages + csize); return (false); }