struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
				     unsigned int reserved_tags, int node)
{
	unsigned int nr_tags, nr_cache;
	struct blk_mq_tags *tags;
	int ret;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
		return NULL;

	nr_tags = total_tags - reserved_tags;
	nr_cache = nr_tags / num_possible_cpus();

	if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
		nr_cache = BLK_MQ_TAG_CACHE_MIN;
	else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
		nr_cache = BLK_MQ_TAG_CACHE_MAX;

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;
	tags->nr_max_cache = nr_cache;
	tags->nr_batch_move = max(1u, nr_cache / 2);

	ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
				tags->nr_reserved_tags,
				tags->nr_max_cache,
				tags->nr_batch_move);
	if (ret)
		goto err_free_tags;

	if (reserved_tags) {
		/*
		 * With max_cahe and batch set to 1, the allocator fallbacks to
		 * no cached. It's fine reserved tags allocation is slow.
		 */
		ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
				1, 1);
		if (ret)
			goto err_reserved_tags;
	}

	return tags;

err_reserved_tags:
	percpu_ida_destroy(&tags->free_tags);
err_free_tags:
	kfree(tags);
	return NULL;
}
示例#2
0
文件: percpu_ida.c 项目: krzk/linux
/**
 * percpu_ida_init - initialize a percpu tag pool
 * @pool: pool to initialize
 * @nr_tags: number of tags that will be available for allocation
 *
 * Initializes @pool so that it can be used to allocate tags - integers in the
 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
 * preallocated array of tag structures.
 *
 * Allocation is percpu, but sharding is limited by nr_tags - for best
 * performance, the workload should not span more cpus than nr_tags / 128.
 */
int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
	unsigned long max_size, unsigned long batch_size)
{
	unsigned i, cpu, order;

	memset(pool, 0, sizeof(*pool));

	init_waitqueue_head(&pool->wait);
	spin_lock_init(&pool->lock);
	pool->nr_tags = nr_tags;
	pool->percpu_max_size = max_size;
	pool->percpu_batch_size = batch_size;

	/* Guard against overflow */
	if (nr_tags > (unsigned) INT_MAX + 1) {
		pr_err("percpu_ida_init(): nr_tags too large\n");
		return -EINVAL;
	}

	order = get_order(nr_tags * sizeof(unsigned));
	pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
	if (!pool->freelist)
		return -ENOMEM;

	for (i = 0; i < nr_tags; i++)
		pool->freelist[i] = i;

	pool->nr_free = nr_tags;

	pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
				       pool->percpu_max_size * sizeof(unsigned),
				       sizeof(unsigned));
	if (!pool->tag_cpu)
		goto err;

	for_each_possible_cpu(cpu)
		spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);

	return 0;
err:
	percpu_ida_destroy(pool);
	return -ENOMEM;
}
void blk_mq_free_tags(struct blk_mq_tags *tags)
{
	percpu_ida_destroy(&tags->free_tags);
	percpu_ida_destroy(&tags->reserved_tags);
	kfree(tags);
}