TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 29) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN }
TEST_END TEST_BEGIN(test_alignment_and_size) { #define MAXALIGN (((size_t)1) << 25) #define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } } #undef MAXALIGN #undef NITER }
TEST_END TEST_BEGIN(test_oom) { size_t hugemax, size, alignment; hugemax = get_huge_size(get_nhuge()-1); /* * It should be impossible to allocate two objects that each consume * more than half the virtual address space. */ { void *p; p = mallocx(hugemax, 0); if (p != NULL) { assert_ptr_null(mallocx(hugemax, 0), "Expected OOM for mallocx(size=%#zx, 0)", hugemax); dallocx(p, 0); } } #if LG_SIZEOF_PTR == 3 size = ZU(0x8000000000000000); alignment = ZU(0x8000000000000000); #else size = ZU(0x80000000); alignment = ZU(0x80000000); #endif assert_ptr_null(mallocx(size, MALLOCX_ALIGN(alignment)), "Expected OOM for mallocx(size=%#zx, MALLOCX_ALIGN(%#zx)", size, alignment); }
TEST_END TEST_BEGIN(test_overflow) { size_t largemax; void *p; largemax = get_large_size(get_nlarge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_null(rallocx(p, largemax+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", largemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); dallocx(p, 0); }
TEST_END TEST_BEGIN(test_oom) { size_t hugemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ hugemax = get_huge_size(get_nhuge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = mallocx(hugemax, 0); if (ptrs[i] == NULL) oom = true; } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", hugemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) dallocx(ptrs[i], 0); } #if LG_SIZEOF_PTR == 3 assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x8000000000000000ULL)), "Expected OOM for mallocx()"); assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)), "Expected OOM for mallocx()"); #else assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), "Expected OOM for mallocx()"); #endif }
TEST_END TEST_BEGIN(test_alignment_and_size) { size_t nsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) { ps[i] = NULL; } for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); total += nsz; if (total >= (MAXALIGN << 1)) { break; } } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { sdallocx(ps[i], sz, MALLOCX_ALIGN(alignment)); ps[i] = NULL; } } } } }
int memkind_arena_posix_memalign(struct memkind *kind, void **memptr, size_t alignment, size_t size) { int err = 0; unsigned int arena; int errno_before; *memptr = NULL; err = kind->ops->get_arena(kind, &arena, size); if (!err) { err = memkind_posix_check_alignment(kind, alignment); } if (!err) { /* posix_memalign should not change errno. Set it to its previous value after calling jemalloc */ errno_before = errno; *memptr = jemk_mallocx_check(size, MALLOCX_ALIGN(alignment) | MALLOCX_ARENA(arena)); errno = errno_before; err = *memptr ? 0 : ENOMEM; } return err; }
HeapObject* SparseHeap::allocSlab(MemoryUsageStats& stats) { auto finish = [&](void* p) { // expand m_slab_range to include this new slab if (!m_slab_range.size) { m_slab_range = {p, kSlabSize}; } else { auto min = std::min(m_slab_range.ptr, p); auto max = std::max((char*)p + kSlabSize, (char*)m_slab_range.ptr + m_slab_range.size); m_slab_range = {min, size_t((char*)max - (char*)min)}; } return static_cast<HeapObject*>(p); }; if (m_slabManager && m_hugeBytes < RuntimeOption::RequestHugeMaxBytes) { if (auto slab = m_slabManager->tryAlloc()) { stats.mmap_volume += kSlabSize; stats.mmap_cap += kSlabSize; stats.peakCap = std::max(stats.peakCap, stats.capacity()); m_pooled_slabs.emplace_back(slab.ptr(), kSlabSize, slab.tag()); m_bigs.insert((HeapObject*)slab.ptr(), kSlabSize); m_hugeBytes += kSlabSize; return finish(slab.ptr()); } } #ifdef USE_JEMALLOC void* slab = mallocx(kSlabSize, MALLOCX_ALIGN(kSlabAlign)); auto usable = sallocx(slab, 0); #else auto slab = safe_aligned_alloc(kSlabAlign, kSlabSize); auto usable = kSlabSize; #endif m_bigs.insert((HeapObject*)slab, kSlabSize); stats.malloc_cap += usable; stats.peakCap = std::max(stats.peakCap, stats.capacity()); return finish(slab); }
void *as_memalign(int id, size_t boundary, size_t size) { int flags = as_flags[id] | MALLOCX_ALIGN(boundary); return je_mallocx(size, flags); }