Exemple #1
0
static extent_node_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
	extent_node_t *node;
	size_t csize, nsize;
	void *addr;

	malloc_mutex_assert_owner(tsdn, &base_mtx);
	assert(minsize != 0);
	node = base_node_try_alloc(tsdn);
	/* Allocate enough space to also carve a node out if necessary. */
	nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
	csize = CHUNK_CEILING(minsize + nsize);
	addr = chunk_alloc_base(csize);
	if (addr == NULL) {
		if (node != NULL)
			base_node_dalloc(tsdn, node);
		return (NULL);
	}
	base_mapped += csize;
	if (node == NULL) {
		node = (extent_node_t *)addr;
		addr = (void *)((uintptr_t)addr + nsize);
		csize -= nsize;
		if (config_stats) {
			base_allocated += nsize;
			base_resident += PAGE_CEILING(nsize);
		}
	}
	base_extent_node_init(node, addr, csize);
	return (node);
}
Exemple #2
0
static void
base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
	assert(opt_metadata_thp == metadata_thp_auto);
	malloc_mutex_assert_owner(tsdn, &base->mtx);
	if (base->auto_thp_switched) {
		return;
	}
	/* Called when adding a new block. */
	bool should_switch;
	if (base_ind_get(base) != 0) {
		should_switch = (base_get_num_blocks(base, true) ==
		    BASE_AUTO_THP_THRESHOLD);
	} else {
		should_switch = (base_get_num_blocks(base, true) ==
		    BASE_AUTO_THP_THRESHOLD_A0);
	}
	if (!should_switch) {
		return;
	}

	base->auto_thp_switched = true;
	assert(!config_stats || base->n_thp == 0);
	/* Make the initial blocks THP lazily. */
	base_block_t *block = base->blocks;
	while (block != NULL) {
		assert((block->size & HUGEPAGE_MASK) == 0);
		pages_huge(block, block->size);
		if (config_stats) {
			base->n_thp += HUGEPAGE_CEILING(block->size -
			    extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
		}
		block = block->next;
		assert(block == NULL || (base_ind_get(base) == 0));
	}
}
Exemple #3
0
static void
arena_extent_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
    extent_hooks_t **r_extent_hooks, extent_t *extent)
{
	malloc_mutex_assert_owner(tsdn, &arena->lock);

	extent_dalloc_cache(tsdn, arena, r_extent_hooks, extent);
	arena_maybe_purge(tsdn, arena);
}
Exemple #4
0
static void
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
{

	malloc_mutex_assert_owner(tsdn, &base_mtx);

	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
	*(extent_node_t **)node = base_nodes;
	base_nodes = node;
}
Exemple #5
0
void
arena_extent_cache_maybe_insert(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
    bool cache)
{
	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);

	if (cache) {
		extent_ring_insert(&arena->extents_dirty, extent);
		arena->ndirty += arena_extent_dirty_npages(extent);
	}
}
Exemple #6
0
static extent_t *
arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
    extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
    size_t alignment, bool *zero, bool slab)
{
	bool commit = true;

	malloc_mutex_assert_owner(tsdn, &arena->lock);

	return (extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
	    pad, alignment, zero, &commit, slab));
}
Exemple #7
0
void
arena_extent_cache_maybe_remove(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
    bool dirty)
{
	malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);

	if (dirty) {
		extent_ring_remove(extent);
		assert(arena->ndirty >= arena_extent_dirty_npages(extent));
		arena->ndirty -= arena_extent_dirty_npages(extent);
	}
}
Exemple #8
0
static extent_node_t *
base_node_try_alloc(tsdn_t *tsdn)
{
	extent_node_t *node;

	malloc_mutex_assert_owner(tsdn, &base_mtx);

	if (base_nodes == NULL)
		return (NULL);
	node = base_nodes;
	base_nodes = *(extent_node_t **)node;
	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
	return (node);
}