Exemple #1
0
void *
huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
    bool zero, tcache_t *tcache)
{
	void *ret;
	extent_node_t *node;
	bool is_zeroed;

	/* Allocate one or more contiguous chunks for this request. */

	/* Allocate an extent node with which to track the chunk. */
	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
	    CACHELINE, false, tcache, true, arena);
	if (node == NULL)
		return (NULL);

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	/* ANDROID change */
#if !defined(__LP64__)
	/* On 32 bit systems, using a per arena cache can exhaust
	 * virtual address space. Force all huge allocations to
	 * always take place in the first arena.
	 */
	arena = a0get();
#else
	arena = arena_choose(tsd, arena);
#endif
	/* End ANDROID change */
	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
	    usize, alignment, &is_zeroed)) == NULL) {
		idalloctm(tsd, node, tcache, true);
		return (NULL);
	}

	extent_node_init(node, arena, ret, usize, is_zeroed);

	if (huge_node_set(ret, node)) {
		arena_chunk_dalloc_huge(arena, ret, usize);
		idalloctm(tsd, node, tcache, true);
		return (NULL);
	}

	/* Insert node into huge. */
	malloc_mutex_lock(&arena->huge_mtx);
	ql_elm_new(node, ql_link);
	ql_tail_insert(&arena->huge, node, ql_link);
	malloc_mutex_unlock(&arena->huge_mtx);

	if (zero || (config_fill && unlikely(opt_zero))) {
		if (!is_zeroed)
			memset(ret, 0, usize);
	} else if (config_fill && unlikely(opt_junk_alloc))
		memset(ret, 0xa5, usize);

	return (ret);
}
Exemple #2
0
void *
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
    bool zero, tcache_t *tcache)
{
	void *ret;
	size_t usize;
	extent_node_t *node;
	bool is_zeroed;

	/* Allocate one or more contiguous chunks for this request. */

	usize = sa2u(size, alignment);
	if (unlikely(usize == 0))
		return (NULL);
	assert(usize >= chunksize);

	/* Allocate an extent node with which to track the chunk. */
	node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
	    CACHELINE, false, tcache, true, arena);
	if (node == NULL)
		return (NULL);

	/*
	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
	 * it is possible to make correct junk/zero fill decisions below.
	 */
	is_zeroed = zero;
	arena = arena_choose(tsd, arena);
	if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
	    size, alignment, &is_zeroed)) == NULL) {
		idalloctm(tsd, node, tcache, true, true);
		return (NULL);
	}

	extent_node_init(node, arena, ret, size, is_zeroed, true);

	if (huge_node_set(ret, node)) {
		arena_chunk_dalloc_huge(arena, ret, size);
		idalloctm(tsd, node, tcache, true, true);
		return (NULL);
	}

	/* Insert node into huge. */
	malloc_mutex_lock(&arena->huge_mtx);
	ql_elm_new(node, ql_link);
	ql_tail_insert(&arena->huge, node, ql_link);
	malloc_mutex_unlock(&arena->huge_mtx);

	if (zero || (config_fill && unlikely(opt_zero))) {
		if (!is_zeroed)
			memset(ret, 0, size);
	} else if (config_fill && unlikely(opt_junk_alloc))
		memset(ret, 0xa5, size);

	return (ret);
}
Exemple #3
0
static bool
ckh_grow(tsd_t *tsd, ckh_t *ckh)
{
	bool ret;
	ckhc_t *tab, *ttab;
	size_t lg_curcells;
	unsigned lg_prevbuckets;

#ifdef CKH_COUNT
	ckh->ngrows++;
#endif

	/*
	 * It is possible (though unlikely, given well behaved hashes) that the
	 * table will have to be doubled more than once in order to create a
	 * usable table.
	 */
	lg_prevbuckets = ckh->lg_curbuckets;
	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
	while (true) {
		size_t usize;

		lg_curcells++;
		usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
		if (usize == 0) {
			ret = true;
			goto label_return;
		}
		tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
		    true, NULL);
		if (tab == NULL) {
			ret = true;
			goto label_return;
		}
		/* Swap in new table. */
		ttab = ckh->tab;
		ckh->tab = tab;
		tab = ttab;
		ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;

		if (!ckh_rebuild(ckh, tab)) {
			idalloctm(tsd, tab, tcache_get(tsd, false), true);
			break;
		}

		/* Rebuilding failed, so back out partially rebuilt table. */
		idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
		ckh->tab = tab;
		ckh->lg_curbuckets = lg_prevbuckets;
	}

	ret = false;
label_return:
	return (ret);
}
Exemple #4
0
void
quarantine(tsd_t *tsd, void *ptr)
{
	quarantine_t *quarantine;
	size_t usize = isalloc(ptr, config_prof);

	cassert(config_fill);
	assert(opt_quarantine);

	if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
		idalloctm(tsd, ptr, NULL, false);
		return;
	}
	/*
	 * Drain one or more objects if the quarantine size limit would be
	 * exceeded by appending ptr.
	 */
	if (quarantine->curbytes + usize > opt_quarantine) {
		size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
		    - usize : 0;
		quarantine_drain(tsd, quarantine, upper_bound);
	}
	/* Grow the quarantine ring buffer if it's full. */
	if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
		quarantine = quarantine_grow(tsd, quarantine);
	/* quarantine_grow() must free a slot if it fails to grow. */
	assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
	/* Append ptr if its size doesn't exceed the quarantine size. */
	if (quarantine->curbytes + usize <= opt_quarantine) {
		size_t offset = (quarantine->first + quarantine->curobjs) &
		    ((ZU(1) << quarantine->lg_maxobjs) - 1);
		quarantine_obj_t *obj = &quarantine->objs[offset];
		obj->ptr = ptr;
		obj->usize = usize;
		quarantine->curbytes += usize;
		quarantine->curobjs++;
		if (config_fill && unlikely(opt_junk_free)) {
			/*
			 * Only do redzone validation if Valgrind isn't in
			 * operation.
			 */
			if ((!config_valgrind || likely(!in_valgrind))
			    && usize <= SMALL_MAXCLASS)
				arena_quarantine_junk_small(ptr, usize);
			else
				memset(ptr, 0x5a, usize);
		}
	} else {
		assert(quarantine->curbytes == 0);
		idalloctm(tsd, ptr, NULL, false);
	}
}
Exemple #5
0
static quarantine_t *
quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
	quarantine_t *ret;

	ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
	if (ret == NULL) {
		quarantine_drain_one(tsd, quarantine);
		return (quarantine);
	}

	ret->curbytes = quarantine->curbytes;
	ret->curobjs = quarantine->curobjs;
	if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
	    quarantine->lg_maxobjs)) {
		/* objs ring buffer data are contiguous. */
		memcpy(ret->objs, &quarantine->objs[quarantine->first],
		    quarantine->curobjs * sizeof(quarantine_obj_t));
	} else {
		/* objs ring buffer data wrap around. */
		size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
		    quarantine->first;
		size_t ncopy_b = quarantine->curobjs - ncopy_a;

		memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
		    * sizeof(quarantine_obj_t));
		memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
		    sizeof(quarantine_obj_t));
	}
	idalloctm(tsd, quarantine, tcache_get(tsd, false), true);

	tsd_quarantine_set(tsd, ret);
	return (ret);
}
Exemple #6
0
static void
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
{
	ckhc_t *tab, *ttab;
	size_t lg_curcells, usize;
	unsigned lg_prevbuckets;

	/*
	 * It is possible (though unlikely, given well behaved hashes) that the
	 * table rebuild will fail.
	 */
	lg_prevbuckets = ckh->lg_curbuckets;
	lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
	usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
	if (usize == 0)
		return;
	tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
	    NULL);
	if (tab == NULL) {
		/*
		 * An OOM error isn't worth propagating, since it doesn't
		 * prevent this or future operations from proceeding.
		 */
		return;
	}
	/* Swap in new table. */
	ttab = ckh->tab;
	ckh->tab = tab;
	tab = ttab;
	ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;

	if (!ckh_rebuild(ckh, tab)) {
		idalloctm(tsd, tab, tcache_get(tsd, false), true);
#ifdef CKH_COUNT
		ckh->nshrinks++;
#endif
		return;
	}

	/* Rebuilding failed, so back out partially rebuilt table. */
	idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
	ckh->tab = tab;
	ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
	ckh->nshrinkfails++;
#endif
}
Exemple #7
0
static void
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
{
	quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
	assert(obj->usize == isalloc(obj->ptr, config_prof));
	idalloctm(tsd, obj->ptr, NULL, false);
	quarantine->curbytes -= obj->usize;
	quarantine->curobjs--;
	quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
	    quarantine->lg_maxobjs) - 1);
}
Exemple #8
0
void
quarantine_cleanup(tsd_t *tsd)
{
	quarantine_t *quarantine;

	if (!config_fill)
		return;

	quarantine = tsd_quarantine_get(tsd);
	if (quarantine != NULL) {
		quarantine_drain(tsd, quarantine, 0);
		idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
		tsd_quarantine_set(tsd, NULL);
	}
}
Exemple #9
0
void
quarantine_alloc_hook_work(tsd_t *tsd)
{
	quarantine_t *quarantine;

	if (!tsd_nominal(tsd))
		return;

	quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
	/*
	 * Check again whether quarantine has been initialized, because
	 * quarantine_init() may have triggered recursive initialization.
	 */
	if (tsd_quarantine_get(tsd) == NULL)
		tsd_quarantine_set(tsd, quarantine);
	else
		idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
}
Exemple #10
0
void
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
	extent_node_t *node;
	arena_t *arena;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	huge_node_unset(ptr, node);
	malloc_mutex_lock(&arena->huge_mtx);
	ql_remove(&arena->huge, node, ql_link);
	malloc_mutex_unlock(&arena->huge_mtx);

	huge_dalloc_junk(extent_node_addr_get(node),
	    extent_node_size_get(node));
	arena_chunk_dalloc_huge(extent_node_arena_get(node),
	    extent_node_addr_get(node), extent_node_size_get(node));
	idalloctm(tsd, node, tcache, true, true);
}
Exemple #11
0
void
ckh_delete(tsd_t *tsd, ckh_t *ckh) {
	assert(ckh != NULL);

#ifdef CKH_VERBOSE
	malloc_printf(
	    "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
	    " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
	    " nrelocs: %"FMTu64"\n", __func__, ckh,
	    (unsigned long long)ckh->ngrows,
	    (unsigned long long)ckh->nshrinks,
	    (unsigned long long)ckh->nshrinkfails,
	    (unsigned long long)ckh->ninserts,
	    (unsigned long long)ckh->nrelocs);
#endif

	idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
	if (config_debug) {
		memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
	}
}
Exemple #12
0
void
ckh_delete(tsd_t *tsd, ckh_t *ckh)
{

	assert(ckh != NULL);

#ifdef CKH_VERBOSE
	malloc_printf(
	    "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
	    " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
	    " nrelocs: %"PRIu64"\n", __func__, ckh,
	    (unsigned long long)ckh->ngrows,
	    (unsigned long long)ckh->nshrinks,
	    (unsigned long long)ckh->nshrinkfails,
	    (unsigned long long)ckh->ninserts,
	    (unsigned long long)ckh->nrelocs);
#endif

	idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true);
	if (config_debug)
		memset(ckh, 0x5a, sizeof(ckh_t));
}