TEST_END TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 26) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); } #undef MAXSZ }
TEST_END TEST_BEGIN(huge_mallocx) { unsigned arena1, arena2; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0, "Failed to create arena"); void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1)); assert_ptr_not_null(huge, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge, sizeof(huge)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena1, arena2, "Wrong arena used for mallocx"); dallocx(huge, MALLOCX_ARENA(arena1)); void *huge2 = mallocx(HUGE_SZ, 0); assert_ptr_not_null(huge, "Fail to allocate huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2, sizeof(huge2)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Huge allocation should not come from the manual arena."); assert_u_ne(arena2, 0, "Huge allocation should not come from the arena 0."); dallocx(huge2, 0); }
TEST_END TEST_BEGIN(huge_allocation) { unsigned arena1, arena2; void *ptr = mallocx(HUGE_SZ, 0); assert_ptr_not_null(ptr, "Fail to allocate huge size"); size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_gt(arena1, 0, "Huge allocation should not come from arena 0"); dallocx(ptr, 0); ptr = mallocx(HUGE_SZ >> 1, 0); assert_ptr_not_null(ptr, "Fail to allocate half huge size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Wrong arena used for half huge"); dallocx(ptr, 0); ptr = mallocx(SMALL_SZ, MALLOCX_TCACHE_NONE); assert_ptr_not_null(ptr, "Fail to allocate small size"); assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_ne(arena1, arena2, "Huge and small should be from different arenas"); dallocx(ptr, 0); }
TEST_END TEST_BEGIN(test_stats_arenas_summary) { unsigned arena; void *little, *large, *huge; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; uint64_t npurge, nmadvise, purged; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx(large_maxclass, 0); assert_ptr_not_null(large, "Unexpected mallocx() failure"); huge = mallocx(chunksize, 0); assert_ptr_not_null(huge, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); dallocx(huge, 0); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); if (config_stats) { assert_u64_gt(npurge, 0, "At least one purge should have occurred"); assert_u64_le(nmadvise, purged, "nmadvise should be no greater than purged"); } }
TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); dallocx(p, 0); }
void data_cleanup(int *data) { if (data_cleanup_count == 0) { assert_x_eq(*data, MALLOC_TSD_TEST_DATA_INIT, "Argument passed into cleanup function should match tsd " "value"); } ++data_cleanup_count; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ bool reincarnate = false; switch (*data) { case MALLOC_TSD_TEST_DATA_INIT: *data = 1; reincarnate = true; break; case 1: *data = 2; reincarnate = true; break; case 2: return; default: not_reached(); } if (reincarnate) { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } }
static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; size_t bt_count_prev, bt_count; unsigned i_prev, i; i_prev = 0; bt_count_prev = 0; for (i = 0; i < NALLOCS_PER_THREAD; i++) { void *p = alloc_from_permuted_backtrace(thd_ind, i); dallocx(p, 0); if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } if (i % BT_COUNT_CHECK_INTERVAL == 0 || i+1 == NALLOCS_PER_THREAD) { bt_count = prof_bt_count(); assert_zu_le(bt_count_prev+(i-i_prev), bt_count, "Expected larger backtrace count increase"); i_prev = i; bt_count_prev = bt_count; } } return (NULL); }
TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 29) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN }
TEST_END TEST_BEGIN(test_oom) { size_t hugemax, size, alignment; hugemax = get_huge_size(get_nhuge()-1); /* * It should be impossible to allocate two objects that each consume * more than half the virtual address space. */ { void *p; p = mallocx(hugemax, 0); if (p != NULL) { assert_ptr_null(mallocx(hugemax, 0), "Expected OOM for mallocx(size=%#zx, 0)", hugemax); dallocx(p, 0); } } #if LG_SIZEOF_PTR == 3 size = ZU(0x8000000000000000); alignment = ZU(0x8000000000000000); #else size = ZU(0x80000000); alignment = ZU(0x80000000); #endif assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, alignment); }
TEST_END TEST_BEGIN(test_overflow) { size_t largemax; void *p; largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_null(rallocx(p, largemax+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); dallocx(p, 0); }
TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, "Unexpected xallocx() behavior"); /* Test overflow such that hugemax-size underflows. */ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); }
static void test_junk(size_t sz_min, size_t sz_max) { char *s; size_t sz_prev, sz, i; arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; arena_dalloc_junk_large_orig = arena_dalloc_junk_large; arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; huge_dalloc_junk_orig = huge_dalloc_junk; huge_dalloc_junk = huge_dalloc_junk_intercept; sz_prev = 0; s = (char *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_c_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_c_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { assert_c_eq(s[i], 0xa5, "Newly allocated byte %zu/%zu isn't junk-filled", i, sz); s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { void *junked = (void *)s; s = (char *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); if (!config_mremap || sz+1 <= arena_maxclass) { assert_ptr_eq(most_recently_junked, junked, "Expected region of size %zu to be " "junk-filled", sz); } } } dallocx(s, 0); assert_ptr_eq(most_recently_junked, (void *)s, "Expected region of size %zu to be junk-filled", sz); arena_dalloc_junk_small = arena_dalloc_junk_small_orig; arena_dalloc_junk_large = arena_dalloc_junk_large_orig; huge_dalloc_junk = huge_dalloc_junk_orig; }
TEST_END TEST_BEGIN(test_alignment_and_size) { #define MAXALIGN (((size_t)1) << 25) #define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } } #undef MAXALIGN #undef NITER }
static void alloc_free_size(size_t sz) { void *ptr = mallocx(1, 0); free(ptr); ptr = mallocx(1, 0); free(ptr); ptr = mallocx(1, MALLOCX_TCACHE_NONE); dallocx(ptr, MALLOCX_TCACHE_NONE); }
TEST_END static void malloc_dallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } dallocx(p, 0); }
static void prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, line); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), expected_backtraces, "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); }
TEST_END TEST_BEGIN(test_tcache_none) { test_skip_if(!opt_tcache); /* Allocate p and q. */ void *p0 = mallocx(42, 0); assert_ptr_not_null(p0, "Unexpected mallocx() failure"); void *q = mallocx(42, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); /* Deallocate p and q, but bypass the tcache for q. */ dallocx(p0, 0); dallocx(q, MALLOCX_TCACHE_NONE); /* Make sure that tcache-based allocation returns p, not q. */ void *p1 = mallocx(42, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); /* Clean up. */ dallocx(p1, MALLOCX_TCACHE_NONE); }
TEST_END static void * thd_receiver_start(void *arg) { mq_t *mq = (mq_t *)arg; unsigned i; for (i = 0; i < (NSENDERS * NMSGS); i++) { mq_msg_t *msg = mq_get(mq); assert_ptr_not_null(msg, "mq_get() should never return NULL"); dallocx(msg, 0); } return NULL; }
TEST_END TEST_BEGIN(test_arenas_lookup) { unsigned arena, arena1; void *ptr; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE); assert_ptr_not_null(ptr, "Unexpected mallocx() failure"); assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)), 0, "Unexpected mallctl() failure"); assert_u_eq(arena, arena1, "Unexpected arena index"); dallocx(ptr, 0); }
TEST_END TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz + 5, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); }
TEST_END TEST_BEGIN(test_hooks_dalloc_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; /* free() */ reset(); ptr = malloc(1); free(ptr); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); /* dallocx() */ reset(); ptr = malloc(1); dallocx(ptr, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], "Wrong raw arg"); /* sdallocx() */ reset(); ptr = malloc(1); sdallocx(ptr, 1, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg"); assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], "Wrong raw arg"); hook_remove(TSDN_NULL, handle); }
TEST_END TEST_BEGIN(test_stats_arenas_large) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(large_maxclass, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); }
void SparseHeap::freeBig(void* ptr, MemoryUsageStats& stats) { // Since we account for these direct allocations in our usage and adjust for // them on allocation, we also need to adjust for them negatively on free. auto cap = m_bigs.erase(ptr); stats.mm_freed += cap; stats.malloc_cap -= cap; #ifdef USE_JEMALLOC assertx(nallocx(cap, 0) == sallocx(ptr, 0)); #if JEMALLOC_VERSION_MAJOR >= 4 sdallocx(ptr, cap, 0); #else dallocx(ptr, 0); #endif #else free(ptr); #endif }
TEST_END TEST_BEGIN(test_stats_arenas_lruns) { unsigned arena; void *p; uint64_t epoch, nmalloc, ndalloc, nrequests; size_t curruns, sz; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(LARGE_MINCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_u64_gt(curruns, 0, "At least one run should be currently allocated"); } dallocx(p, 0); }
static void test_zero(size_t szmin, size_t szmax) { int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; void *p; #define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); /* * Fill with non-zero so that non-debug builds are more likely to detect * errors. */ memset(p, FILL_BYTE, sz); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; assert_zu_eq(xallocx(p, sz, 0, flags), sz, "Unexpected xallocx() error"); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { nsz = nallocx(sz+1, flags); assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, "Unexpected xallocx() failure"); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); assert_false(validate_fill(p, FILL_BYTE, 0, nsz), "Memory not filled: nsz=%zu", nsz); } dallocx(p, flags); }
void SparseHeap::reset() { TRACE(1, "heap-id %lu SparseHeap-reset: pooled_slabs %lu bigs %lu\n", tl_heap_id, m_pooled_slabs.size(), m_bigs.countBlocks()); #if !FOLLY_SANITIZE // trash fill is redundant with ASAN if (RuntimeOption::EvalTrashFillOnRequestExit) { m_bigs.iterate([&](HeapObject* h, size_t size) { memset(h, kSmallFreeFill, size); }); } #endif auto const do_free = [](void* ptr, size_t size) { #ifdef USE_JEMALLOC #if JEMALLOC_VERSION_MAJOR >= 4 sdallocx(ptr, size, 0); #else dallocx(ptr, 0); #endif #else free(ptr); #endif }; TaggedSlabList pooledSlabs; void* pooledSlabTail = nullptr; for (auto& slab : m_pooled_slabs) { if (!pooledSlabTail) pooledSlabTail = slab.ptr; pooledSlabs.push_front<true>(slab.ptr, slab.version); m_bigs.erase(slab.ptr); } if (pooledSlabTail) { m_slabManager->merge(pooledSlabs.head(), pooledSlabTail); } m_pooled_slabs.clear(); m_hugeBytes = 0; m_bigs.iterate([&](HeapObject* h, size_t size) { do_free(h, size); }); m_bigs.clear(); m_slab_range = {nullptr, 0}; }
TEST_END TEST_BEGIN(test_oom) { size_t hugemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ hugemax = get_huge_size(get_nhuge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = mallocx(hugemax, 0); if (ptrs[i] == NULL) oom = true; } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", hugemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) dallocx(ptrs[i], 0); } #if LG_SIZEOF_PTR == 3 assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x8000000000000000ULL)), "Expected OOM for mallocx()"); assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)), "Expected OOM for mallocx()"); #else assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), "Expected OOM for mallocx()"); #endif }
static void test_zero(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; #define MAGIC ((uint8_t)0x61) sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_u_eq(s[0], MAGIC, "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_u_eq(s[sz_prev-1], MAGIC, "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { assert_u_eq(s[i], 0x0, "Newly allocated byte %zu/%zu isn't zero-filled", i, sz); s[i] = MAGIC; } if (xallocx(s, sz+1, 0, 0) == sz) { s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); } } dallocx(s, 0); #undef MAGIC }
TEST_END TEST_BEGIN(test_stats_huge) { void *p; uint64_t epoch; size_t allocated; uint64_t nmalloc, ndalloc, nrequests; size_t sz; int expected = config_stats ? 0 : ENOENT; p = mallocx(large_maxclass+1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_le(nmalloc, nrequests, "nmalloc should no larger than nrequests"); } dallocx(p, 0); }
void data_cleanup(void *arg) { data_t *data = (data_t *)arg; if (!data_cleanup_executed) { assert_x_eq(*data, THREAD_DATA, "Argument passed into cleanup function should match tsd " "value"); } data_cleanup_executed = true; /* * Allocate during cleanup for two rounds, in order to assure that * jemalloc's internal tsd reinitialization happens. */ switch (*data) { case THREAD_DATA: *data = 1; data_tsd_set(data); break; case 1: *data = 2; data_tsd_set(data); break; case 2: return; default: not_reached(); } { void *p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpeced mallocx() failure"); dallocx(p, 0); } }