static void malloc_sdallocx(void) { void *p = malloc(1); if (p == NULL) { test_fail("Unexpected malloc() failure"); return; } sdallocx(p, 1, 0); }
TEST_END TEST_BEGIN(test_hooks_dalloc_simple) { /* "Simple" in the sense that we're not in a realloc variant. */ hooks_t hooks = {NULL, &test_dalloc_hook, NULL, (void *)123}; void *handle = hook_install(TSDN_NULL, &hooks); assert_ptr_ne(handle, NULL, "Hook installation failed"); void *volatile ptr; /* free() */ reset(); ptr = malloc(1); free(ptr); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_free, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); /* dallocx() */ reset(); ptr = malloc(1); dallocx(ptr, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_dallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[1], "Wrong raw arg"); /* sdallocx() */ reset(); ptr = malloc(1); sdallocx(ptr, 1, MALLOCX_TCACHE_NONE); assert_d_eq(call_count, 1, "Hook not called"); assert_ptr_eq(arg_extra, (void *)123, "Wrong extra"); assert_d_eq(arg_type, (int)hook_dalloc_sdallocx, "Wrong hook type"); assert_ptr_eq(ptr, arg_address, "Wrong pointer freed"); assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong raw arg"); assert_u64_eq((uintptr_t)1, arg_args_raw[1], "Wrong raw arg"); assert_u64_eq((uintptr_t)MALLOCX_TCACHE_NONE, arg_args_raw[2], "Wrong raw arg"); hook_remove(TSDN_NULL, handle); }
void SparseHeap::freeBig(void* ptr, MemoryUsageStats& stats) { // Since we account for these direct allocations in our usage and adjust for // them on allocation, we also need to adjust for them negatively on free. auto cap = m_bigs.erase(ptr); stats.mm_freed += cap; stats.malloc_cap -= cap; #ifdef USE_JEMALLOC assertx(nallocx(cap, 0) == sallocx(ptr, 0)); #if JEMALLOC_VERSION_MAJOR >= 4 sdallocx(ptr, cap, 0); #else dallocx(ptr, 0); #endif #else free(ptr); #endif }
void SparseHeap::reset() { TRACE(1, "heap-id %lu SparseHeap-reset: pooled_slabs %lu bigs %lu\n", tl_heap_id, m_pooled_slabs.size(), m_bigs.countBlocks()); #if !FOLLY_SANITIZE // trash fill is redundant with ASAN if (RuntimeOption::EvalTrashFillOnRequestExit) { m_bigs.iterate([&](HeapObject* h, size_t size) { memset(h, kSmallFreeFill, size); }); } #endif auto const do_free = [](void* ptr, size_t size) { #ifdef USE_JEMALLOC #if JEMALLOC_VERSION_MAJOR >= 4 sdallocx(ptr, size, 0); #else dallocx(ptr, 0); #endif #else free(ptr); #endif }; TaggedSlabList pooledSlabs; void* pooledSlabTail = nullptr; for (auto& slab : m_pooled_slabs) { if (!pooledSlabTail) pooledSlabTail = slab.ptr; pooledSlabs.push_front<true>(slab.ptr, slab.version); m_bigs.erase(slab.ptr); } if (pooledSlabTail) { m_slabManager->merge(pooledSlabs.head(), pooledSlabTail); } m_pooled_slabs.clear(); m_hugeBytes = 0; m_bigs.iterate([&](HeapObject* h, size_t size) { do_free(h, size); }); m_bigs.clear(); m_slab_range = {nullptr, 0}; }
TEST_END TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { sdallocx(ps[i], sz, MALLOCX_ALIGN(alignment)); ps[i] = NULL; } } } } }