static void base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, size_t size) { /* * Cascade through dalloc, decommit, purge_forced, and purge_lazy, * stopping at first success. This cascade is performed for consistency * with the cascade in extent_dalloc_wrapper() because an application's * custom hooks may not support e.g. dalloc. This function is only ever * called as a side effect of arena destruction, so although it might * seem pointless to do anything besides dalloc here, the application * may in fact want the end state of all associated virtual memory to be * in some consistent-but-allocated state. */ if (extent_hooks == &extent_hooks_default) { if (!extent_dalloc_mmap(addr, size)) { goto label_done; } if (!pages_decommit(addr, size)) { goto label_done; } if (!pages_purge_forced(addr, size)) { goto label_done; } if (!pages_purge_lazy(addr, size)) { goto label_done; } /* Nothing worked. This should never happen. */ not_reached(); } else { tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); pre_reentrancy(tsd, NULL); if (extent_hooks->dalloc != NULL && !extent_hooks->dalloc(extent_hooks, addr, size, true, ind)) { goto label_post_reentrancy; } if (extent_hooks->decommit != NULL && !extent_hooks->decommit(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } if (extent_hooks->purge_forced != NULL && !extent_hooks->purge_forced(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } if (extent_hooks->purge_lazy != NULL && !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, ind)) { goto label_post_reentrancy; } /* Nothing worked. That's the application's problem. */ label_post_reentrancy: post_reentrancy(tsd); } label_done: if (metadata_thp_madvise()) { /* Set NOHUGEPAGE after unmap to avoid kernel defrag. */ assert(__builtin_is_aligned(addr, HUGEPAGE) && (size & HUGEPAGE_MASK) == 0); pages_nohuge(addr, size); } }
void * chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { cassert(have_dss); assert(size > 0 && (size & chunksize_mask) == 0); assert(alignment > 0 && (alignment & chunksize_mask) == 0); /* * sbrk() uses a signed increment argument, so take care not to * interpret a huge allocation request as a negative increment. */ if ((intptr_t)size < 0) return (NULL); malloc_mutex_lock(&dss_mtx); if (dss_prev != (void *)-1) { /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ do { void *ret, *cpad, *dss_next; size_t gap_size, cpad_size; intptr_t incr; /* Avoid an unnecessary system call. */ if (new_addr != NULL && dss_max != new_addr) break; /* Get the current end of the DSS. */ dss_max = chunk_dss_sbrk(0); /* Make sure the earlier condition still holds. */ if (new_addr != NULL && dss_max != new_addr) break; /* * Calculate how much padding is necessary to * chunk-align the end of the DSS. */ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & chunksize_mask; /* * Compute how much chunk-aligned pad space (if any) is * necessary to satisfy alignment. This space can be * recycled for later use. */ cpad = (void *)((uintptr_t)dss_max + gap_size); ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, alignment); cpad_size = (uintptr_t)ret - (uintptr_t)cpad; dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || (uintptr_t)dss_next < (uintptr_t)dss_max) { /* Wrap-around. */ malloc_mutex_unlock(&dss_mtx); return (NULL); } incr = gap_size + cpad_size + size; dss_prev = chunk_dss_sbrk(incr); if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_dalloc_wrapper(arena, &chunk_hooks, cpad, cpad_size, true); } if (*zero) { JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( ret, size); memset(ret, 0, size); } if (!*commit) *commit = pages_decommit(ret, size); return (ret); } } while (dss_prev != (void *)-1); } malloc_mutex_unlock(&dss_mtx); return (NULL); }