Example #1
0
int
slab_arena_create(struct slab_arena *arena, struct quota *quota,
		  size_t prealloc, uint32_t slab_size, int flags)
{
	assert(flags & (MAP_PRIVATE | MAP_SHARED));
	lf_lifo_init(&arena->cache);
	/*
	 * Round up the user supplied data - it can come in
	 * directly from the configuration file. Allow
	 * zero-size arena for testing purposes.
	 */
	arena->slab_size = small_round(MAX(slab_size, SLAB_MIN_SIZE));

	arena->quota = quota;
	/** Prealloc can not be greater than the quota */
	prealloc = MIN(prealloc, quota_total(quota));
	/** Extremely large sizes can not be aligned properly */
	prealloc = MIN(prealloc, SIZE_MAX - arena->slab_size);
	/* Align prealloc around a fixed number of slabs. */
	arena->prealloc = small_align(prealloc, arena->slab_size);

	arena->used = 0;

	arena->flags = flags;

	if (arena->prealloc) {
		arena->arena = mmap_checked(arena->prealloc,
					    arena->slab_size,
					    arena->flags);
	} else {
		arena->arena = NULL;
	}
	return arena->prealloc && !arena->arena ? -1 : 0;
}
Example #2
0
/** Initialize the small allocator. */
void
small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
		   uint32_t objsize_min, float alloc_factor)
{
	alloc->cache = cache;
	/* Align sizes. */
	objsize_min = small_align(objsize_min, sizeof(intptr_t));
	alloc->slab_order = cache->order_max;
	/* Make sure at least 4 largest objects can fit in a slab. */
	alloc->objsize_max =
		mempool_objsize_max(slab_order_size(cache, alloc->slab_order));
	assert(alloc->objsize_max > objsize_min + STEP_POOL_MAX * STEP_SIZE);

	struct mempool *step_pool;
	for (step_pool = alloc->step_pools;
	     step_pool < alloc->step_pools + STEP_POOL_MAX;
	     step_pool++) {

		mempool_create_with_order(step_pool, alloc->cache,
					  objsize_min, alloc->slab_order);
		objsize_min += STEP_SIZE;
	}
	alloc->step_pool_objsize_max = (step_pool - 1)->objsize;
	if (alloc_factor > 2.0)
		alloc_factor = 2.0;
	/*
	 * Correct the user-supplied alloc_factor to ensure that
	 * it actually produces growing object sizes.
	 */
	if (alloc->step_pool_objsize_max * alloc_factor <
	    alloc->step_pool_objsize_max + STEP_SIZE) {

		alloc_factor =
			(alloc->step_pool_objsize_max + STEP_SIZE + 0.5)/
			alloc->step_pool_objsize_max;
	}
	alloc->factor = alloc_factor;

	/* Initialize the factored pool cache. */
	struct factor_pool *factor_pool = alloc->factor_pool_cache;
	do {
		factor_pool->next = factor_pool + 1;
		factor_pool++;
	} while (factor_pool !=
		 alloc->factor_pool_cache + FACTOR_POOL_MAX - 1);
	factor_pool->next = NULL;
	alloc->factor_pool_next = alloc->factor_pool_cache;
	factor_tree_new(&alloc->factor_pools);
	(void) factor_pool_create(alloc, NULL, alloc->objsize_max);

	alloc->is_delayed_free_mode = false;
}
Example #3
0
int
slab_arena_create(struct slab_arena *arena,
		  size_t prealloc, size_t maxalloc,
		  uint32_t slab_size, int flags)
{
	assert(flags & (MAP_PRIVATE | MAP_SHARED));
	lf_lifo_init(&arena->cache);
	/*
	 * Round up the user supplied data - it can come in
	 * directly from the configuration file. Allow
	 * zero-size arena for testing purposes.
	 */
	arena->slab_size = small_round(MAX(slab_size, SLAB_MIN_SIZE));

	if (maxalloc) {
		arena->maxalloc = small_round(MAX(maxalloc,
						      arena->slab_size));
	} else {
		arena->maxalloc = 0;
	}

	/* Align arena around a fixed number of slabs. */
	arena->prealloc = small_align(small_round(prealloc),
				      arena->slab_size);
	if (arena->maxalloc < arena->prealloc)
		arena->prealloc = arena->maxalloc;

	arena->used = 0;

	arena->flags = flags;

	if (arena->prealloc) {
		arena->arena = mmap_checked(arena->prealloc,
					    arena->slab_size,
					    arena->flags);
	} else {
		arena->arena = NULL;
	}
	return arena->prealloc && !arena->arena ? -1 : 0;
}
Example #4
0
static inline struct factor_pool *
factor_pool_create(struct small_alloc *alloc,
		   struct factor_pool *upper_bound,
		   size_t size)
{
	assert(size > alloc->step_pool_objsize_max);
	assert(size <= alloc->objsize_max);

	if (alloc->factor_pool_next == NULL) {
		/**
		 * Too many factored pools already, fall back
		 * to an imperfect one.
		 */
		return upper_bound;
	}
	size_t objsize = alloc->step_pool_objsize_max;
	size_t prevsize;
	do {
		prevsize = objsize;
		/*
		 * Align objsize after each multiplication to
		 * ensure that the distance between objsizes of
		 * factored pools is a multiple of STEP_SIZE.
		 */
		objsize = small_align(objsize * alloc->factor,
				      sizeof(intptr_t));
		assert(objsize > alloc->step_pool_objsize_max);
	} while (objsize < size);
	if (objsize > alloc->objsize_max)
		objsize = alloc->objsize_max;
	struct factor_pool *pool = alloc->factor_pool_next;
	alloc->factor_pool_next= pool->next;
	mempool_create_with_order(&pool->pool, alloc->cache,
				  objsize, alloc->slab_order);
	pool->objsize_min = prevsize + 1;
	factor_tree_insert(&alloc->factor_pools, pool);
	return pool;
}