Example #1
0
TEST_END

TEST_BEGIN(test_rtree_bits)
{
	unsigned i, j, k;

	for (i = 1; i < (sizeof(uintptr_t) << 3); i++) {
		uintptr_t keys[] = {0, 1,
		    (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1};
		rtree_t *rtree = rtree_new(i, malloc, free);

		for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
			rtree_set(rtree, keys[j], 1);
			for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) {
				assert_u_eq(rtree_get(rtree, keys[k]), 1,
				    "rtree_get() should return previously set "
				    "value and ignore insignificant key bits; "
				    "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", "
				    "get key=%#"PRIxPTR, i, j, k, keys[j],
				    keys[k]);
			}
			assert_u_eq(rtree_get(rtree,
			    (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0,
			    "Only leftmost rtree leaf should be set; "
			    "i=%u, j=%u", i, j);
			rtree_set(rtree, keys[j], 0);
		}

		rtree_delete(rtree);
	}
}
Example #2
0
TEST_END

static void *
thd_start_reincarnated(void *arg) {
	tsd_t *tsd = tsd_fetch();
	assert(tsd);

	void *p = malloc(1);
	assert_ptr_not_null(p, "Unexpected malloc() failure");

	/* Manually trigger reincarnation. */
	assert_ptr_not_null(tsd_arena_get(tsd),
	    "Should have tsd arena set.");
	tsd_cleanup((void *)tsd);
	assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
	    "TSD arena should have been cleared.");
	assert_u_eq(tsd->state, tsd_state_purgatory,
	    "TSD state should be purgatory\n");

	free(p);
	assert_u_eq(tsd->state, tsd_state_reincarnated,
	    "TSD state should be reincarnated\n");
	p = mallocx(1, MALLOCX_TCACHE_NONE);
	assert_ptr_not_null(p, "Unexpected malloc() failure");
	assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
	    "Should not have tsd arena set after reincarnation.");

	free(p);
	tsd_cleanup((void *)tsd);
	assert_ptr_null(*tsd_arenap_get_unsafe(tsd),
	    "TSD arena should have been cleared after 2nd cleanup.");

	return NULL;
}
Example #3
0
TEST_END

TEST_BEGIN(test_psize_classes)
{
	size_t size_class, max_size_class;
	pszind_t pind, max_pind;

	max_size_class = get_max_size_class();
	max_pind = psz2ind(max_size_class);

	for (pind = 0, size_class = pind2sz(pind); pind < max_pind ||
	    size_class < max_size_class; pind++, size_class =
	    pind2sz(pind)) {
		assert_true(pind < max_pind,
		    "Loop conditionals should be equivalent; pind=%u, "
		    "size_class=%zu (%#zx)", pind, size_class, size_class);
		assert_true(size_class < max_size_class,
		    "Loop conditionals should be equivalent; pind=%u, "
		    "size_class=%zu (%#zx)", pind, size_class, size_class);

		assert_u_eq(pind, psz2ind(size_class),
		    "psz2ind() does not reverse pind2sz(): pind=%u -->"
		    " size_class=%zu --> pind=%u --> size_class=%zu", pind,
		    size_class, psz2ind(size_class),
		    pind2sz(psz2ind(size_class)));
		assert_zu_eq(size_class, pind2sz(psz2ind(size_class)),
		    "pind2sz() does not reverse psz2ind(): pind=%u -->"
		    " size_class=%zu --> pind=%u --> size_class=%zu", pind,
		    size_class, psz2ind(size_class),
		    pind2sz(psz2ind(size_class)));

		assert_u_eq(pind+1, psz2ind(size_class+1),
		    "Next size_class does not round up properly");

		assert_zu_eq(size_class, (pind > 0) ?
		    psz2u(pind2sz(pind-1)+1) : psz2u(1),
		    "psz2u() does not round up to size class");
		assert_zu_eq(size_class, psz2u(size_class-1),
		    "psz2u() does not round up to size class");
		assert_zu_eq(size_class, psz2u(size_class),
		    "psz2u() does not compute same size class");
		assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1),
		    "psz2u() does not round up to next size class");
	}

	assert_u_eq(pind, psz2ind(pind2sz(pind)),
	    "psz2ind() does not reverse pind2sz()");
	assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)),
	    "pind2sz() does not reverse psz2ind()");

	assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
	    "psz2u() does not round up to size class");
	assert_zu_eq(size_class, psz2u(size_class-1),
	    "psz2u() does not round up to size class");
	assert_zu_eq(size_class, psz2u(size_class),
	    "psz2u() does not compute same size class");
}
Example #4
0
TEST_END

TEST_BEGIN(huge_mallocx) {
	unsigned arena1, arena2;
	size_t sz = sizeof(unsigned);

	assert_d_eq(mallctl("arenas.create", &arena1, &sz, NULL, 0), 0,
	    "Failed to create arena");
	void *huge = mallocx(HUGE_SZ, MALLOCX_ARENA(arena1));
	assert_ptr_not_null(huge, "Fail to allocate huge size");
	assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge,
	    sizeof(huge)), 0, "Unexpected mallctl() failure");
	assert_u_eq(arena1, arena2, "Wrong arena used for mallocx");
	dallocx(huge, MALLOCX_ARENA(arena1));

	void *huge2 = mallocx(HUGE_SZ, 0);
	assert_ptr_not_null(huge, "Fail to allocate huge size");
	assert_d_eq(mallctl("arenas.lookup", &arena2, &sz, &huge2,
	    sizeof(huge2)), 0, "Unexpected mallctl() failure");
	assert_u_ne(arena1, arena2,
	    "Huge allocation should not come from the manual arena.");
	assert_u_ne(arena2, 0,
	    "Huge allocation should not come from the arena 0.");
	dallocx(huge2, 0);
}
Example #5
0
void *
thd_start(void *arg)
{
	unsigned main_arena_ind = *(unsigned *)arg;
	void *p;
	unsigned arena_ind;
	size_t size;
	int err;

	p = malloc(1);
	assert_ptr_not_null(p, "Error in malloc()");
	free(p);

	size = sizeof(arena_ind);
	if ((err = mallctl("thread.arena", &arena_ind, &size, &main_arena_ind,
	    sizeof(main_arena_ind)))) {
		char buf[BUFERROR_BUF];

		buferror(err, buf, sizeof(buf));
		test_fail("Error in mallctl(): %s", buf);
	}

	size = sizeof(arena_ind);
	if ((err = mallctl("thread.arena", &arena_ind, &size, NULL, 0))) {
		char buf[BUFERROR_BUF];

		buferror(err, buf, sizeof(buf));
		test_fail("Error in mallctl(): %s", buf);
	}
	assert_u_eq(arena_ind, main_arena_ind,
	    "Arena index should be same as for main thread");

	return (NULL);
}
Example #6
0
static void
test_bitmap_initializer_body(const bitmap_info_t *binfo, size_t nbits)
{
	bitmap_info_t binfo_dyn;
	bitmap_info_init(&binfo_dyn, nbits);

	assert_zu_eq(bitmap_size(binfo), bitmap_size(&binfo_dyn),
	    "Unexpected difference between static and dynamic initialization, "
	    "nbits=%zu", nbits);
	assert_zu_eq(binfo->nbits, binfo_dyn.nbits,
	    "Unexpected difference between static and dynamic initialization, "
	    "nbits=%zu", nbits);
#ifdef BITMAP_USE_TREE
	assert_u_eq(binfo->nlevels, binfo_dyn.nlevels,
	    "Unexpected difference between static and dynamic initialization, "
	    "nbits=%zu", nbits);
	{
		unsigned i;

		for (i = 0; i < binfo->nlevels; i++) {
			assert_zu_eq(binfo->levels[i].group_offset,
			    binfo_dyn.levels[i].group_offset,
			    "Unexpected difference between static and dynamic "
			    "initialization, nbits=%zu, level=%u", nbits, i);
		}
	}
#else
	assert_zu_eq(binfo->ngroups, binfo_dyn.ngroups,
	    "Unexpected difference between static and dynamic initialization");
#endif
}
Example #7
0
TEST_END

TEST_BEGIN(test_arenas_create) {
	unsigned narenas_before, arena, narenas_after;
	size_t sz = sizeof(unsigned);

	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz,
	    NULL, 0), 0, "Unexpected mallctl() failure");
	assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
	    "Unexpected mallctl() failure");
	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL,
	    0), 0, "Unexpected mallctl() failure");

	assert_u_eq(narenas_before+1, narenas_after,
	    "Unexpected number of arenas before versus after extension");
	assert_u_eq(arena, narenas_after-1, "Unexpected arena index");
}
Example #8
0
TEST_END

TEST_BEGIN(test_rtree_random)
{
	unsigned i;
	sfmt_t *sfmt;
#define	NSET 100
#define	SEED 42

	sfmt = init_gen_rand(SEED);
	for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
		rtree_t *rtree = rtree_new(i, malloc, free);
		uintptr_t keys[NSET];
		unsigned j;

		for (j = 0; j < NSET; j++) {
			keys[j] = (uintptr_t)gen_rand64(sfmt);
			rtree_set(rtree, keys[j], 1);
			assert_u_eq(rtree_get(rtree, keys[j]), 1,
			    "rtree_get() should return previously set value");
		}
		for (j = 0; j < NSET; j++) {
			assert_u_eq(rtree_get(rtree, keys[j]), 1,
			    "rtree_get() should return previously set value");
		}

		for (j = 0; j < NSET; j++) {
			rtree_set(rtree, keys[j], 0);
			assert_u_eq(rtree_get(rtree, keys[j]), 0,
			    "rtree_get() should return previously set value");
		}
		for (j = 0; j < NSET; j++) {
			assert_u_eq(rtree_get(rtree, keys[j]), 0,
			    "rtree_get() should return previously set value");
		}

		rtree_delete(rtree);
	}
	fini_gen_rand(sfmt);
#undef NSET
#undef SEED
}
Example #9
0
TEST_END

TEST_BEGIN(test_rtree_extrema)
{
	unsigned i;

	for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) {
		rtree_t *rtree = rtree_new(i, malloc, free);

		rtree_set(rtree, 0, 1);
		assert_u_eq(rtree_get(rtree, 0), 1,
		    "rtree_get() should return previously set value");

		rtree_set(rtree, ~((uintptr_t)0), 1);
		assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1,
		    "rtree_get() should return previously set value");

		rtree_delete(rtree);
	}
}
Example #10
0
static void
test_empty_list(list_head_t *head)
{
	list_t *t;
	unsigned i;

	assert_ptr_null(ql_first(head), "Unexpected element for empty list");
	assert_ptr_null(ql_last(head, link),
	    "Unexpected element for empty list");

	i = 0;
	ql_foreach(t, head, link) {
		i++;
	}
	assert_u_eq(i, 0, "Unexpected element for empty list");

	i = 0;
	ql_reverse_foreach(t, head, link) {
		i++;
	}
	assert_u_eq(i, 0, "Unexpected element for empty list");
}
Example #11
0
static void
test_zero(size_t sz_min, size_t sz_max)
{
    uint8_t *s;
    size_t sz_prev, sz, i;
#define	MAGIC	((uint8_t)0x61)

    sz_prev = 0;
    s = (uint8_t *)mallocx(sz_min, 0);
    assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");

    for (sz = sallocx(s, 0); sz <= sz_max;
            sz_prev = sz, sz = sallocx(s, 0)) {
        if (sz_prev > 0) {
            assert_u_eq(s[0], MAGIC,
                        "Previously allocated byte %zu/%zu is corrupted",
                        ZU(0), sz_prev);
            assert_u_eq(s[sz_prev-1], MAGIC,
                        "Previously allocated byte %zu/%zu is corrupted",
                        sz_prev-1, sz_prev);
        }

        for (i = sz_prev; i < sz; i++) {
            assert_u_eq(s[i], 0x0,
                        "Newly allocated byte %zu/%zu isn't zero-filled",
                        i, sz);
            s[i] = MAGIC;
        }

        if (xallocx(s, sz+1, 0, 0) == sz) {
            s = (uint8_t *)rallocx(s, sz+1, 0);
            assert_ptr_not_null((void *)s,
                                "Unexpected rallocx() failure");
        }
    }

    dallocx(s, 0);
#undef MAGIC
}
Example #12
0
static void
arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info) {
	size_t i;

	arena_dalloc_junk_small_orig(ptr, bin_info);
	for (i = 0; i < bin_info->reg_size; i++) {
		assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
		    "Missing junk fill for byte %zu/%zu of deallocated region",
		    i, bin_info->reg_size);
	}
	if (ptr == watch_for_junking) {
		saw_junking = true;
	}
}
Example #13
0
static void
large_dalloc_junk_intercept(void *ptr, size_t usize) {
	size_t i;

	large_dalloc_junk_orig(ptr, usize);
	for (i = 0; i < usize; i++) {
		assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK,
		    "Missing junk fill for byte %zu/%zu of deallocated region",
		    i, usize);
	}
	if (ptr == watch_for_junking) {
		saw_junking = true;
	}
}
Example #14
0
TEST_END

TEST_BEGIN(test_arenas_lookup) {
	unsigned arena, arena1;
	void *ptr;
	size_t sz = sizeof(unsigned);
	assert_d_eq(mallctl("arenas.create", (void *)&arena, &sz, NULL, 0), 0,
	    "Unexpected mallctl() failure");
	ptr = mallocx(42, MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE);
	assert_ptr_not_null(ptr, "Unexpected mallocx() failure");
	assert_d_eq(mallctl("arenas.lookup", &arena1, &sz, &ptr, sizeof(ptr)),
	    0, "Unexpected mallctl() failure");
	assert_u_eq(arena, arena1, "Unexpected arena index");
	dallocx(ptr, 0);
}
Example #15
0
TEST_END

TEST_BEGIN(test_thread_arena)
{
	unsigned arena_old, arena_new, narenas;
	size_t sz = sizeof(unsigned);

	assert_d_eq(mallctl("arenas.narenas", &narenas, &sz, NULL, 0), 0,
	    "Unexpected mallctl() failure");
	assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");
	arena_new = narenas - 1;
	assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
	    sizeof(unsigned)), 0, "Unexpected mallctl() failure");
	arena_new = 0;
	assert_d_eq(mallctl("thread.arena", &arena_old, &sz, &arena_new,
	    sizeof(unsigned)), 0, "Unexpected mallctl() failure");
}
Example #16
0
TEST_END

TEST_BEGIN(test_thread_arena) {
	unsigned old_arena_ind, new_arena_ind, narenas;

	const char *opa;
	size_t sz = sizeof(opa);
	assert_d_eq(mallctl("opt.percpu_arena", &opa, &sz, NULL, 0), 0,
	    "Unexpected mallctl() failure");

	sz = sizeof(unsigned);
	assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0),
	    0, "Unexpected mallctl() failure");
	assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect");

	if (strcmp(opa, "disabled") == 0) {
		new_arena_ind = narenas - 1;
		assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
		    (void *)&new_arena_ind, sizeof(unsigned)), 0,
		    "Unexpected mallctl() failure");
		new_arena_ind = 0;
		assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
		    (void *)&new_arena_ind, sizeof(unsigned)), 0,
		    "Unexpected mallctl() failure");
	} else {
		assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind, &sz,
		    NULL, 0), 0, "Unexpected mallctl() failure");
		new_arena_ind = percpu_arena_ind_limit(opt_percpu_arena) - 1;
		if (old_arena_ind != new_arena_ind) {
			assert_d_eq(mallctl("thread.arena",
			    (void *)&old_arena_ind, &sz, (void *)&new_arena_ind,
			    sizeof(unsigned)), EPERM, "thread.arena ctl "
			    "should not be allowed with percpu arena");
		}
	}
}
Example #17
0
TEST_END

TEST_BEGIN(test_overflow)
{
	size_t max_size_class;

	max_size_class = get_max_size_class();

	assert_u_eq(size2index(max_size_class+1), NSIZES,
	    "size2index() should return NSIZES on overflow");
	assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
	    "size2index() should return NSIZES on overflow");
	assert_u_eq(size2index(SIZE_T_MAX), NSIZES,
	    "size2index() should return NSIZES on overflow");

	assert_zu_eq(s2u(max_size_class+1), 0,
	    "s2u() should return 0 for unsupported size");
	assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0,
	    "s2u() should return 0 for unsupported size");
	assert_zu_eq(s2u(SIZE_T_MAX), 0,
	    "s2u() should return 0 on overflow");

	assert_u_eq(psz2ind(max_size_class+1), NPSIZES,
	    "psz2ind() should return NPSIZES on overflow");
	assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
	    "psz2ind() should return NPSIZES on overflow");
	assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
	    "psz2ind() should return NPSIZES on overflow");

	assert_zu_eq(psz2u(max_size_class+1), 0,
	    "psz2u() should return 0 for unsupported size");
	assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
	    "psz2u() should return 0 for unsupported size");
	assert_zu_eq(psz2u(SIZE_T_MAX), 0,
	    "psz2u() should return 0 on overflow");
}
Example #18
0
static void
test_junk(size_t sz_min, size_t sz_max) {
	uint8_t *s;
	size_t sz_prev, sz, i;

	if (opt_junk_free) {
		arena_dalloc_junk_small_orig = arena_dalloc_junk_small;
		arena_dalloc_junk_small = arena_dalloc_junk_small_intercept;
		large_dalloc_junk_orig = large_dalloc_junk;
		large_dalloc_junk = large_dalloc_junk_intercept;
		large_dalloc_maybe_junk_orig = large_dalloc_maybe_junk;
		large_dalloc_maybe_junk = large_dalloc_maybe_junk_intercept;
	}

	sz_prev = 0;
	s = (uint8_t *)mallocx(sz_min, 0);
	assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");

	for (sz = sallocx(s, 0); sz <= sz_max;
	    sz_prev = sz, sz = sallocx(s, 0)) {
		if (sz_prev > 0) {
			assert_u_eq(s[0], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    ZU(0), sz_prev);
			assert_u_eq(s[sz_prev-1], 'a',
			    "Previously allocated byte %zu/%zu is corrupted",
			    sz_prev-1, sz_prev);
		}

		for (i = sz_prev; i < sz; i++) {
			if (opt_junk_alloc) {
				assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK,
				    "Newly allocated byte %zu/%zu isn't "
				    "junk-filled", i, sz);
			}
			s[i] = 'a';
		}

		if (xallocx(s, sz+1, 0, 0) == sz) {
			uint8_t *t;
			watch_junking(s);
			t = (uint8_t *)rallocx(s, sz+1, 0);
			assert_ptr_not_null((void *)t,
			    "Unexpected rallocx() failure");
			assert_zu_ge(sallocx(t, 0), sz+1,
			    "Unexpectedly small rallocx() result");
			if (!background_thread_enabled()) {
				assert_ptr_ne(s, t,
				    "Unexpected in-place rallocx()");
				assert_true(!opt_junk_free || saw_junking,
				    "Expected region of size %zu to be "
				    "junk-filled", sz);
			}
			s = t;
		}
	}

	watch_junking(s);
	dallocx(s, 0);
	assert_true(!opt_junk_free || saw_junking,
	    "Expected region of size %zu to be junk-filled", sz);

	if (opt_junk_free) {
		arena_dalloc_junk_small = arena_dalloc_junk_small_orig;
		large_dalloc_junk = large_dalloc_junk_orig;
		large_dalloc_maybe_junk = large_dalloc_maybe_junk_orig;
	}
}