Esempio n. 1
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&huge, node);

	if (config_stats) {
		stats_cactive_sub(node->size);
		huge_ndalloc++;
		huge_allocated -= node->size;
	}

	malloc_mutex_unlock(&huge_mtx);

	if (unmap)
		huge_dalloc_junk(node->addr, node->size);

	chunk_dealloc(node->addr, node->size, unmap);

	base_node_dealloc(node);
}
Esempio n. 2
0
static bool
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
	extent_node_t *node;
	arena_t *arena;
	chunk_hooks_t chunk_hooks;
	size_t cdiff;
	bool pre_zeroed, post_zeroed;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	pre_zeroed = extent_node_zeroed_get(node);
	chunk_hooks = chunk_hooks_get(arena);

	assert(oldsize > usize);

	/* Split excess chunks. */
	cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
	if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
	    CHUNK_CEILING(usize), cdiff, true, arena->ind))
		return (true);

	if (oldsize > usize) {
		size_t sdiff = oldsize - usize;
		if (config_fill && unlikely(opt_junk_free)) {
			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
			    sdiff);
			post_zeroed = false;
		} else {
			post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
			    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
			    CHUNK_CEILING(oldsize),
			    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
		}
	} else
		post_zeroed = pre_zeroed;

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	extent_node_size_set(node, usize);
	/* Update zeroed. */
	extent_node_zeroed_set(node, post_zeroed);
	malloc_mutex_unlock(&arena->huge_mtx);

	/* Zap the excess chunks. */
	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);

	return (false);
}
Esempio n. 3
0
void
huge_dalloc(void *ptr, bool unmap)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
#ifdef JEMALLOC_ENABLE_MEMKIND
	key.partition = -1;
	do {
		key.partition++;
#endif
		node = extent_tree_ad_search(&huge, &key);
#ifdef JEMALLOC_ENABLE_MEMKIND
	} while ((node == NULL || node->partition != key.partition) &&
		 key.partition < 256); /* FIXME hard coding partition max to 256 */
#endif

	assert(node != NULL);
	assert(node->addr == ptr);

	extent_tree_ad_remove(&huge, node);

	if (config_stats) {
		stats_cactive_sub(node->size);
		huge_ndalloc++;
		huge_allocated -= node->size;
	}

	malloc_mutex_unlock(&huge_mtx);

	if (unmap)
		huge_dalloc_junk(node->addr, node->size);

	chunk_dealloc(node->addr, node->size, unmap
#ifdef JEMALLOC_ENABLE_MEMKIND
, key.partition
#endif
);

	base_node_dealloc(node);
}
Esempio n. 4
0
void
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
{
	extent_node_t *node;
	arena_t *arena;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);
	huge_node_unset(ptr, node);
	malloc_mutex_lock(&arena->huge_mtx);
	ql_remove(&arena->huge, node, ql_link);
	malloc_mutex_unlock(&arena->huge_mtx);

	huge_dalloc_junk(extent_node_addr_get(node),
	    extent_node_size_get(node));
	arena_chunk_dalloc_huge(extent_node_arena_get(node),
	    extent_node_addr_get(node), extent_node_size_get(node));
	idalloctm(tsd, node, tcache, true, true);
}
Esempio n. 5
0
void
huge_dalloc(pool_t *pool, void *ptr)
{
	extent_node_t *node, key;

	malloc_mutex_lock(&pool->huge_mtx);

	/* Extract from tree of huge allocations. */
	key.addr = ptr;
	node = extent_tree_ad_search(&pool->huge, &key);
	assert(node != NULL);
	assert(node->addr == ptr);
	extent_tree_ad_remove(&pool->huge, node);

	malloc_mutex_unlock(&pool->huge_mtx);

	huge_dalloc_junk(node->addr, node->size);
	arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
	base_node_dalloc(pool, node);
}
Esempio n. 6
0
static void
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
{
	extent_node_t *node;
	arena_t *arena;
	chunk_purge_t *chunk_purge;
	bool zeroed;

	node = huge_node_get(ptr);
	arena = extent_node_arena_get(node);

	malloc_mutex_lock(&arena->lock);
	chunk_purge = arena->chunk_purge;
	malloc_mutex_unlock(&arena->lock);

	if (oldsize > usize) {
		size_t sdiff = oldsize - usize;
		zeroed = !chunk_purge_wrapper(arena, chunk_purge,
		    CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
		    CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
		if (config_fill && unlikely(opt_junk_free)) {
			huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
			    sdiff);
			zeroed = false;
		}
	} else
		zeroed = true;

	malloc_mutex_lock(&arena->huge_mtx);
	/* Update the size of the huge allocation. */
	extent_node_size_set(node, usize);
	/* Clear node's zeroed field if zeroing failed above. */
	extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
	malloc_mutex_unlock(&arena->huge_mtx);

	/* Zap the excess chunks. */
	arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
}