Esempio n. 1
0
TEST_END

TEST_BEGIN(test_basic)
{
#define	MAXSZ (((size_t)1) << 26)
	size_t sz;

	for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) {
		size_t nsz, rsz;
		void *p;
		nsz = nallocx(sz, 0);
		assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
		p = mallocx(sz, 0);
		assert_ptr_not_null(p, "Unexpected mallocx() error");
		rsz = sallocx(p, 0);
		assert_zu_ge(rsz, sz, "Real size smaller than expected");
		assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch");
		dallocx(p, 0);

		p = mallocx(sz, 0);
		assert_ptr_not_null(p, "Unexpected mallocx() error");
		dallocx(p, 0);

		nsz = nallocx(sz, MALLOCX_ZERO);
		assert_zu_ne(nsz, 0, "Unexpected nallocx() error");
		p = mallocx(sz, MALLOCX_ZERO);
		assert_ptr_not_null(p, "Unexpected mallocx() error");
		rsz = sallocx(p, 0);
		assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch");
		dallocx(p, 0);
	}
#undef MAXSZ
}
Esempio n. 2
0
static void
test_junk(size_t sz_min, size_t sz_max)
{
	char *s;
	size_t sz_prev, sz, i;

	arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
	arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
	arena_dalloc_junk_large_orig = arena_dalloc_junk_large;
	arena_dalloc_junk_large = arena_dalloc_junk_large_intercept;
	huge_dalloc_junk_orig = huge_dalloc_junk;
	huge_dalloc_junk = huge_dalloc_junk_intercept;

	sz_prev = 0;
	s = (char *)mallocx(sz_min, 0);
	assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");

	for (sz = sallocx(s, 0); sz <= sz_max;
	    sz_prev = sz, sz = sallocx(s, 0)) {
		if (sz_prev > 0) {
			assert_c_eq(s[0], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    ZU(0), sz_prev);
			assert_c_eq(s[sz_prev-1], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    sz_prev-1, sz_prev);
		}

		for (i = sz_prev; i < sz; i++) {
			assert_c_eq(s[i], 0xa5,
			    "Newly allocated byte %zu/%zu isn't junk-filled",
			    i, sz);
			s[i] = 'a';
		}

		if (xallocx(s, sz+1, 0, 0) == sz) {
			void *junked = (void *)s;

			s = (char *)rallocx(s, sz+1, 0);
			assert_ptr_not_null((void *)s,
			    "Unexpected rallocx() failure");
			if (!config_mremap || sz+1 <= arena_maxclass) {
				assert_ptr_eq(most_recently_junked, junked,
				    "Expected region of size %zu to be "
				    "junk-filled",
				    sz);
			}
		}
	}

	dallocx(s, 0);
	assert_ptr_eq(most_recently_junked, (void *)s,
	    "Expected region of size %zu to be junk-filled", sz);

	arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
	arena_dalloc_junk_large = arena_dalloc_junk_large_orig;
	huge_dalloc_junk = huge_dalloc_junk_orig;
}
Esempio n. 3
0
TEST_END

TEST_BEGIN(test_hooks_expand_simple) {
	/* "Simple" in the sense that we're not in a realloc variant. */
	hooks_t hooks = {NULL, NULL, &test_expand_hook, (void *)123};
	void *handle = hook_install(TSDN_NULL, &hooks);
	assert_ptr_ne(handle, NULL, "Hook installation failed");

	void *volatile ptr;

	/* xallocx() */
	reset();
	ptr = malloc(1);
	size_t new_usize = xallocx(ptr, 100, 200, MALLOCX_TCACHE_NONE);
	assert_d_eq(call_count, 1, "Hook not called");
	assert_ptr_eq(arg_extra, (void *)123, "Wrong extra");
	assert_d_eq(arg_type, (int)hook_expand_xallocx, "Wrong hook type");
	assert_ptr_eq(ptr, arg_address, "Wrong pointer expanded");
	assert_u64_eq(arg_old_usize, nallocx(1, 0), "Wrong old usize");
	assert_u64_eq(arg_new_usize, sallocx(ptr, 0), "Wrong new usize");
	assert_u64_eq(new_usize, arg_result_raw, "Wrong result");
	assert_u64_eq((uintptr_t)ptr, arg_args_raw[0], "Wrong arg");
	assert_u64_eq(100, arg_args_raw[1], "Wrong arg");
	assert_u64_eq(200, arg_args_raw[2], "Wrong arg");
	assert_u64_eq(MALLOCX_TCACHE_NONE, arg_args_raw[3], "Wrong arg");

	hook_remove(TSDN_NULL, handle);
}
Esempio n. 4
0
void* SparseHeap::resizeBig(void* ptr, size_t new_size,
                            MemoryUsageStats& stats) {
  auto old = static_cast<HeapObject*>(ptr);
  auto old_cap = m_bigs.get(old);
#ifdef USE_JEMALLOC
  auto const newNode = static_cast<HeapObject*>(
    rallocx(ptr, new_size, 0)
  );
  auto new_cap = sallocx(newNode, 0);
#else
  auto const newNode = static_cast<HeapObject*>(
    safe_realloc(ptr, new_size)
  );
  auto new_cap = malloc_usable_size(newNode);
  if (new_cap % kSmallSizeAlign != 0) {
    // adjust to satisfy RadixMap (see justification in allocBig())
    new_cap += kSmallSizeAlign - new_cap % kSmallSizeAlign;
  }
#endif
  if (newNode != old || new_cap != old_cap) {
    m_bigs.erase(old);
    m_bigs.insert(newNode, new_cap);
  }
  stats.mm_udebt -= new_cap - old_cap;
  stats.malloc_cap += new_cap - old_cap;
  return newNode;
}
Esempio n. 5
0
TEST_END

TEST_BEGIN(test_alignment_and_size)
{
#define	MAXALIGN (((size_t)1) << 25)
#define	NITER 4
	size_t nsz, rsz, sz, alignment, total;
	unsigned i;
	void *ps[NITER];

	for (i = 0; i < NITER; i++)
		ps[i] = NULL;

	for (alignment = 8;
	    alignment <= MAXALIGN;
	    alignment <<= 1) {
		total = 0;
		for (sz = 1;
		    sz < 3 * alignment && sz < (1U << 31);
		    sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) {
			for (i = 0; i < NITER; i++) {
				nsz = nallocx(sz, MALLOCX_ALIGN(alignment) |
				    MALLOCX_ZERO);
				assert_zu_ne(nsz, 0,
				    "nallocx() error for alignment=%zu, "
				    "size=%zu (%#zx)", alignment, sz, sz);
				ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) |
				    MALLOCX_ZERO);
				assert_ptr_not_null(ps[i],
				    "mallocx() error for alignment=%zu, "
				    "size=%zu (%#zx)", alignment, sz, sz);
				rsz = sallocx(ps[i], 0);
				assert_zu_ge(rsz, sz,
				    "Real size smaller than expected for "
				    "alignment=%zu, size=%zu", alignment, sz);
				assert_zu_eq(nsz, rsz,
				    "nallocx()/sallocx() size mismatch for "
				    "alignment=%zu, size=%zu", alignment, sz);
				assert_ptr_null(
				    (void *)((uintptr_t)ps[i] & (alignment-1)),
				    "%p inadequately aligned for"
				    " alignment=%zu, size=%zu", ps[i],
				    alignment, sz);
				total += rsz;
				if (total >= (MAXALIGN << 1))
					break;
			}
			for (i = 0; i < NITER; i++) {
				if (ps[i] != NULL) {
					dallocx(ps[i], 0);
					ps[i] = NULL;
				}
			}
		}
	}
#undef MAXALIGN
#undef NITER
}
Esempio n. 6
0
static void
malloc_sallocx_free(void)
{
	void *p;

	p = malloc(1);
	if (sallocx(p, 0) < 1)
		test_fail("Unexpected sallocx() failure");
	free(p);
}
Esempio n. 7
0
MemBlock BigHeap::allocBig(size_t bytes, HeaderKind kind) {
#ifdef USE_JEMALLOC
  auto n = static_cast<BigNode*>(mallocx(bytes + sizeof(BigNode), 0));
  auto cap = sallocx(n, 0);
#else
  auto cap = bytes + sizeof(BigNode);
  auto n = static_cast<BigNode*>(safe_malloc(cap));
#endif
  enlist(n, kind, cap);
  return {n + 1, cap - sizeof(BigNode)};
}
Esempio n. 8
0
static void
test_zero(size_t sz_min, size_t sz_max)
{
    uint8_t *s;
    size_t sz_prev, sz, i;
#define	MAGIC	((uint8_t)0x61)

    sz_prev = 0;
    s = (uint8_t *)mallocx(sz_min, 0);
    assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");

    for (sz = sallocx(s, 0); sz <= sz_max;
            sz_prev = sz, sz = sallocx(s, 0)) {
        if (sz_prev > 0) {
            assert_u_eq(s[0], MAGIC,
                        "Previously allocated byte %zu/%zu is corrupted",
                        ZU(0), sz_prev);
            assert_u_eq(s[sz_prev-1], MAGIC,
                        "Previously allocated byte %zu/%zu is corrupted",
                        sz_prev-1, sz_prev);
        }

        for (i = sz_prev; i < sz; i++) {
            assert_u_eq(s[i], 0x0,
                        "Newly allocated byte %zu/%zu isn't zero-filled",
                        i, sz);
            s[i] = MAGIC;
        }

        if (xallocx(s, sz+1, 0, 0) == sz) {
            s = (uint8_t *)rallocx(s, sz+1, 0);
            assert_ptr_not_null((void *)s,
                                "Unexpected rallocx() failure");
        }
    }

    dallocx(s, 0);
#undef MAGIC
}
Esempio n. 9
0
File: zero.c Progetto: Agobin/chapel
static void
test_zero(size_t sz_min, size_t sz_max)
{
	char *s;
	size_t sz_prev, sz, i;

	sz_prev = 0;
	s = (char *)mallocx(sz_min, 0);
	assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");

	for (sz = sallocx(s, 0); sz <= sz_max;
	    sz_prev = sz, sz = sallocx(s, 0)) {
		if (sz_prev > 0) {
			assert_c_eq(s[0], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    ZU(0), sz_prev);
			assert_c_eq(s[sz_prev-1], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    sz_prev-1, sz_prev);
		}

		for (i = sz_prev; i < sz; i++) {
			assert_c_eq(s[i], 0x0,
			    "Newly allocated byte %zu/%zu isn't zero-filled",
			    i, sz);
			s[i] = 'a';
		}

		if (xallocx(s, sz+1, 0, 0) == sz) {
			s = (char *)rallocx(s, sz+1, 0);
			assert_ptr_not_null((void *)s,
			    "Unexpected rallocx() failure");
		}
	}

	dallocx(s, 0);
}
Esempio n. 10
0
TEST_END

TEST_BEGIN(test_no_move_fail)
{
	void *p;
	size_t sz, tsz;

	p = mallocx(42, 0);
	assert_ptr_not_null(p, "Unexpected mallocx() error");
	sz = sallocx(p, 0);

	tsz = xallocx(p, sz + 5, 0, 0);
	assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz);

	dallocx(p, 0);
}
Esempio n. 11
0
void SparseHeap::freeBig(void* ptr, MemoryUsageStats& stats) {
  // Since we account for these direct allocations in our usage and adjust for
  // them on allocation, we also need to adjust for them negatively on free.
  auto cap = m_bigs.erase(ptr);
  stats.mm_freed += cap;
  stats.malloc_cap -= cap;
#ifdef USE_JEMALLOC
  assertx(nallocx(cap, 0) == sallocx(ptr, 0));
#if JEMALLOC_VERSION_MAJOR >= 4
  sdallocx(ptr, cap, 0);
#else
  dallocx(ptr, 0);
#endif
#else
  free(ptr);
#endif
}
Esempio n. 12
0
NEVER_INLINE
void* MemoryManager::smartMallocSizeBigHelper(void*& ptr,
                                              size_t& szOut,
                                              size_t bytes) {
  ptr = mallocx(debugAddExtra(bytes + sizeof(BigNode)), 0);
  szOut = debugRemoveExtra(sallocx(ptr, 0) - sizeof(BigNode));

  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  JEMALLOC_STATS_ADJUST(&m_stats, delta);

  return debugPostAllocate(
    smartEnlist(static_cast<BigNode*>(ptr)),
    bytes,
    szOut
  );
}
Esempio n. 13
0
void* SparseHeap::allocBig(size_t bytes, bool zero, MemoryUsageStats& stats) {
#ifdef USE_JEMALLOC
  int flags = zero ? MALLOCX_ZERO : 0;
  auto n = static_cast<HeapObject*>(mallocx(bytes, flags));
  auto cap = sallocx(n, 0);
#else
  auto n = static_cast<MallocNode*>(
    zero ? safe_calloc(1, bytes) : safe_malloc(bytes)
  );
  auto cap = malloc_usable_size(n);
  if (cap % kSmallSizeAlign != 0) {
    // cap==bytes is legal, but RadixMap requires at least kSmallSizeAlign
    // for tracking purposes. In this mode we just call free(), so it's safe
    // to slightly overestimate the block size
    cap += kSmallSizeAlign - cap % kSmallSizeAlign;
  }
#endif
  m_bigs.insert(n, cap);
  stats.mm_udebt -= cap;
  stats.malloc_cap += cap;
  return n;
}
Esempio n. 14
0
HeapObject* SparseHeap::allocSlab(MemoryUsageStats& stats) {
  auto finish = [&](void* p) {
    // expand m_slab_range to include this new slab
    if (!m_slab_range.size) {
      m_slab_range = {p, kSlabSize};
    } else {
      auto min = std::min(m_slab_range.ptr, p);
      auto max = std::max((char*)p + kSlabSize,
                          (char*)m_slab_range.ptr + m_slab_range.size);
      m_slab_range = {min, size_t((char*)max - (char*)min)};
    }
    return static_cast<HeapObject*>(p);
  };

  if (m_slabManager && m_hugeBytes < RuntimeOption::RequestHugeMaxBytes) {
    if (auto slab = m_slabManager->tryAlloc()) {
      stats.mmap_volume += kSlabSize;
      stats.mmap_cap += kSlabSize;
      stats.peakCap = std::max(stats.peakCap, stats.capacity());
      m_pooled_slabs.emplace_back(slab.ptr(), kSlabSize, slab.tag());
      m_bigs.insert((HeapObject*)slab.ptr(), kSlabSize);
      m_hugeBytes += kSlabSize;
      return finish(slab.ptr());
    }
  }
#ifdef USE_JEMALLOC
  void* slab = mallocx(kSlabSize, MALLOCX_ALIGN(kSlabAlign));
  auto usable = sallocx(slab, 0);
#else
  auto slab = safe_aligned_alloc(kSlabAlign, kSlabSize);
  auto usable = kSlabSize;
#endif
  m_bigs.insert((HeapObject*)slab, kSlabSize);
  stats.malloc_cap += usable;
  stats.peakCap = std::max(stats.peakCap, stats.capacity());
  return finish(slab);
}
Esempio n. 15
0
TEST_END

TEST_BEGIN(test_lg_align_and_zero)
{
	void *p, *q;
	size_t lg_align, sz;
#define	MAX_LG_ALIGN 29
#define	MAX_VALIDATE (ZU(1) << 22)

	lg_align = ZU(0);
	p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
	assert_ptr_not_null(p, "Unexpected mallocx() error");

	for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) {
		q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO);
		assert_ptr_not_null(q,
		    "Unexpected rallocx() error for lg_align=%zu", lg_align);
		assert_ptr_null(
		    (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)),
		    "%p inadequately aligned for lg_align=%zu",
		    q, lg_align);
		sz = sallocx(q, 0);
		if ((sz << 1) <= MAX_VALIDATE) {
			assert_false(validate_fill(q, 0, 0, sz),
			    "Expected zeroed memory");
		} else {
			assert_false(validate_fill(q, 0, 0, MAX_VALIDATE),
			    "Expected zeroed memory");
			assert_false(validate_fill(q+sz-MAX_VALIDATE, 0, 0,
			    MAX_VALIDATE), "Expected zeroed memory");
		}
		p = q;
	}
	dallocx(p, 0);
#undef MAX_VALIDATE
#undef MAX_LG_ALIGN
}
Esempio n. 16
0
template<bool callerSavesActualSize> NEVER_INLINE
MemBlock MemoryManager::smartMallocSizeBig(size_t bytes) {
#ifdef USE_JEMALLOC
  auto const n = static_cast<BigNode*>(
    mallocx(debugAddExtra(bytes + sizeof(BigNode)), 0)
  );
  auto szOut = debugRemoveExtra(sallocx(n, 0) - sizeof(BigNode));
  // NB: We don't report the SweepNode size in the stats.
  auto const delta = callerSavesActualSize ? szOut : bytes;
  m_stats.usage += int64_t(delta);
  // Adjust jemalloc otherwise we'll double count the direct allocation.
  JEMALLOC_STATS_ADJUST(&m_stats, delta);
#else
  m_stats.usage += bytes;
  auto const n = static_cast<BigNode*>(
    safe_malloc(debugAddExtra(bytes + sizeof(BigNode)))
  );
  auto szOut = bytes;
#endif
  auto ptrOut = debugPostAllocate(smartEnlist(n), bytes, szOut);
  FTRACE(3, "smartMallocSizeBig: {} ({} requested, {} usable)\n",
         ptrOut, bytes, szOut);
  return {ptrOut, szOut};
}
Esempio n. 17
0
static void
test_junk(size_t sz_min, size_t sz_max) {
	uint8_t *s;
	size_t sz_prev, sz, i;

	if (opt_junk_free) {
		arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
		arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
		large_dalloc_junk_orig = large_dalloc_junk;
		large_dalloc_junk = large_dalloc_junk_intercept;
		large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
		large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
	}

	sz_prev = 0;
	s = (uint8_t *)mallocx(sz_min, 0);
	assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");

	for (sz = sallocx(s, 0); sz <= sz_max;
	    sz_prev = sz, sz = sallocx(s, 0)) {
		if (sz_prev > 0) {
			assert_u_eq(s[0], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    ZU(0), sz_prev);
			assert_u_eq(s[sz_prev-1], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    sz_prev-1, sz_prev);
		}

		for (i = sz_prev; i < sz; i++) {
			if (opt_junk_alloc) {
				assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
				    "Newly allocated byte %zu/%zu isn't "
				    "junk-filled", i, sz);
			}
			s[i] = 'a';
		}

		if (xallocx(s, sz+1, 0, 0) == sz) {
			uint8_t *t;
			watch_junking(s);
			t = (uint8_t *)rallocx(s, sz+1, 0);
			assert_ptr_not_null((void *)t,
			    "Unexpected rallocx() failure");
			assert_zu_ge(sallocx(t, 0), sz+1,
			    "Unexpectedly small rallocx() result");
			if (!background_thread_enabled()) {
				assert_ptr_ne(s, t,
				    "Unexpected in-place rallocx()");
				assert_true(!opt_junk_free || saw_junking,
				    "Expected region of size %zu to be "
				    "junk-filled", sz);
			}
			s = t;
		}
	}

	watch_junking(s);
	dallocx(s, 0);
	assert_true(!opt_junk_free || saw_junking,
	    "Expected region of size %zu to be junk-filled", sz);

	if (opt_junk_free) {
		arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
		large_dalloc_junk = large_dalloc_junk_orig;
		large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
	}
}