Beispiel #1
0
void dm_mq_cleanup_mapped_device(struct mapped_device *md)
{
	if (md->tag_set) {
		blk_mq_free_tag_set(md->tag_set);
		kfree(md->tag_set);
	}
}
Beispiel #2
0
void nbdx_unregister_block_device(struct nbdx_file *nbdx_file)
{
	del_gendisk(nbdx_file->disk);
	blk_cleanup_queue(nbdx_file->queue);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
	blk_mq_free_tag_set(&nbdx_file->tag_set);
#endif
	put_disk(nbdx_file->disk);
}
Beispiel #3
0
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
	struct request_queue *q;
	struct dm_target *immutable_tgt;
	int err;

	if (!dm_table_all_blk_mq_devices(t)) {
		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
		return -EINVAL;
	}

	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
	if (!md->tag_set)
		return -ENOMEM;

	md->tag_set->ops = &dm_mq_ops;
	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
	md->tag_set->numa_node = md->numa_node_id;
	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
	md->tag_set->driver_data = md;

	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
	immutable_tgt = dm_table_get_immutable_target(t);
	if (immutable_tgt && immutable_tgt->per_io_data_size) {
		/* any target-specific per-io data is immediately after the tio */
		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
		md->init_tio_pdu = true;
	}

	err = blk_mq_alloc_tag_set(md->tag_set);
	if (err)
		goto out_kfree_tag_set;

	q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}
	dm_init_md_queue(md);

	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
	if (err)
		goto out_cleanup_queue;

	return 0;

out_cleanup_queue:
	blk_cleanup_queue(q);
out_tag_set:
	blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
	kfree(md->tag_set);

	return err;
}
Beispiel #4
0
static void null_del_dev(struct nullb *nullb)
{
	list_del_init(&nullb->list);

	del_gendisk(nullb->disk);
	blk_cleanup_queue(nullb->q);
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
	put_disk(nullb->disk);
	kfree(nullb);
}
Beispiel #5
0
void bsg_remove_queue(struct request_queue *q)
{
	if (q) {
		struct bsg_set *bset =
			container_of(q->tag_set, struct bsg_set, tag_set);

		bsg_unregister_queue(q);
		blk_cleanup_queue(q);
		blk_mq_free_tag_set(&bset->tag_set);
		kfree(bset);
	}
}
Beispiel #6
0
/**
 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
 * @dev: device to attach bsg device to
 * @name: device to give bsg device
 * @job_fn: bsg job handler
 * @dd_job_size: size of LLD data needed for each job
 */
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
		bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
{
	struct bsg_set *bset;
	struct blk_mq_tag_set *set;
	struct request_queue *q;
	int ret = -ENOMEM;

	bset = kzalloc(sizeof(*bset), GFP_KERNEL);
	if (!bset)
		return ERR_PTR(-ENOMEM);

	bset->job_fn = job_fn;
	bset->timeout_fn = timeout;

	set = &bset->tag_set;
	set->ops = &bsg_mq_ops,
	set->nr_hw_queues = 1;
	set->queue_depth = 128;
	set->numa_node = NUMA_NO_NODE;
	set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
	set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
	if (blk_mq_alloc_tag_set(set))
		goto out_tag_set;

	q = blk_mq_init_queue(set);
	if (IS_ERR(q)) {
		ret = PTR_ERR(q);
		goto out_queue;
	}

	q->queuedata = dev;
	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);

	ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
	if (ret) {
		printk(KERN_ERR "%s: bsg interface failed to "
		       "initialize - register queue\n", dev->kobj.name);
		goto out_cleanup_queue;
	}

	return q;
out_cleanup_queue:
	blk_cleanup_queue(q);
out_queue:
	blk_mq_free_tag_set(set);
out_tag_set:
	kfree(bset);
	return ERR_PTR(ret);
}
Beispiel #7
0
static void null_del_dev(struct nullb *nullb)
{
	list_del_init(&nullb->list);

	if (use_lightnvm)
		null_nvm_unregister(nullb);
	else
		del_gendisk(nullb->disk);
	blk_cleanup_queue(nullb->q);
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
	if (!use_lightnvm)
		put_disk(nullb->disk);
	cleanup_queues(nullb);
	kfree(nullb);
}
Beispiel #8
0
static int null_add_dev(void)
{
	struct gendisk *disk;
	struct nullb *nullb;
	sector_t size;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	disk = nullb->disk = alloc_disk_node(1, home_node);
	if (!disk) {
		rv = -ENOMEM;
		goto out_cleanup_blk_queue;
	}

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	size = gb * 1024 * 1024 * 1024ULL;
	sector_div(size, bs);
	set_capacity(disk, size);

	disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
	disk->major		= null_major;
	disk->first_minor	= nullb->index;
	disk->fops		= &null_fops;
	disk->private_data	= nullb;
	disk->queue		= nullb->q;
	sprintf(disk->disk_name, "nullb%d", nullb->index);
	add_disk(disk);
	return 0;

out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}
Beispiel #9
0
int nbdx_register_block_device(struct nbdx_file *nbdx_file)
{
	sector_t size = nbdx_file->stbuf.st_size;
	int page_size = PAGE_SIZE;
	int err = 0;

	pr_debug("%s called\n", __func__);

	nbdx_file->major = nbdx_major;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
	nbdx_mq_reg.nr_hw_queues = submit_queues;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_mq_reg, nbdx_file);
#else
	nbdx_file->tag_set.ops = &nbdx_mq_ops;
	nbdx_file->tag_set.nr_hw_queues = submit_queues;
	nbdx_file->tag_set.queue_depth = NBDX_QUEUE_DEPTH;
	nbdx_file->tag_set.numa_node = NUMA_NO_NODE;
	nbdx_file->tag_set.cmd_size	= sizeof(struct raio_io_u);
	nbdx_file->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	nbdx_file->tag_set.driver_data = nbdx_file;

	err = blk_mq_alloc_tag_set(&nbdx_file->tag_set);
	if (err)
		goto out;

	nbdx_file->queue = blk_mq_init_queue(&nbdx_file->tag_set);
#endif
	if (IS_ERR(nbdx_file->queue)) {
		pr_err("%s: Failed to allocate blk queue ret=%ld\n",
		       __func__, PTR_ERR(nbdx_file->queue));
		err = PTR_ERR(nbdx_file->queue);
		goto blk_mq_init;
	}

	nbdx_file->queue->queuedata = nbdx_file;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nbdx_file->queue);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nbdx_file->queue);

	nbdx_file->disk = alloc_disk_node(1, NUMA_NO_NODE);
	if (!nbdx_file->disk) {
		pr_err("%s: Failed to allocate disk node\n", __func__);
		err = -ENOMEM;
		goto alloc_disk;
	}

	nbdx_file->disk->major = nbdx_file->major;
	nbdx_file->disk->first_minor = nbdx_file->index;
	nbdx_file->disk->fops = &nbdx_ops;
	nbdx_file->disk->queue = nbdx_file->queue;
	nbdx_file->disk->private_data = nbdx_file;
	blk_queue_logical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	blk_queue_physical_block_size(nbdx_file->queue, NBDX_SECT_SIZE);
	sector_div(page_size, NBDX_SECT_SIZE);
	blk_queue_max_hw_sectors(nbdx_file->queue, page_size * MAX_SGL_LEN);
	sector_div(size, NBDX_SECT_SIZE);
	set_capacity(nbdx_file->disk, size);
	sscanf(nbdx_file->dev_name, "%s", nbdx_file->disk->disk_name);
	add_disk(nbdx_file->disk);
	goto out;

alloc_disk:
	blk_cleanup_queue(nbdx_file->queue);
blk_mq_init:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
	blk_mq_free_tag_set(&nbdx_file->tag_set);
#endif
out:
	return err;
}
Beispiel #10
0
static int null_add_dev(void)
{
	struct nullb *nullb;
	int rv;

	nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
	if (!nullb) {
		rv = -ENOMEM;
		goto out;
	}

	spin_lock_init(&nullb->lock);

	if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
		submit_queues = nr_online_nodes;

	rv = setup_queues(nullb);
	if (rv)
		goto out_free_nullb;

	if (queue_mode == NULL_Q_MQ) {
		nullb->tag_set.ops = &null_mq_ops;
		nullb->tag_set.nr_hw_queues = submit_queues;
		nullb->tag_set.queue_depth = hw_queue_depth;
		nullb->tag_set.numa_node = home_node;
		nullb->tag_set.cmd_size	= sizeof(struct nullb_cmd);
		nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
		nullb->tag_set.driver_data = nullb;

		rv = blk_mq_alloc_tag_set(&nullb->tag_set);
		if (rv)
			goto out_cleanup_queues;

		nullb->q = blk_mq_init_queue(&nullb->tag_set);
		if (IS_ERR(nullb->q)) {
			rv = -ENOMEM;
			goto out_cleanup_tags;
		}
	} else if (queue_mode == NULL_Q_BIO) {
		nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_make_request(nullb->q, null_queue_bio);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	} else {
		nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
		if (!nullb->q) {
			rv = -ENOMEM;
			goto out_cleanup_queues;
		}
		blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
		blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
		rv = init_driver_queues(nullb);
		if (rv)
			goto out_cleanup_blk_queue;
	}

	nullb->q->queuedata = nullb;
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

	mutex_lock(&lock);
	nullb->index = nullb_indexes++;
	mutex_unlock(&lock);

	blk_queue_logical_block_size(nullb->q, bs);
	blk_queue_physical_block_size(nullb->q, bs);

	sprintf(nullb->disk_name, "nullb%d", nullb->index);

	if (use_lightnvm)
		rv = null_nvm_register(nullb);
	else
		rv = null_gendisk_register(nullb);

	if (rv)
		goto out_cleanup_blk_queue;

	mutex_lock(&lock);
	list_add_tail(&nullb->list, &nullb_list);
	mutex_unlock(&lock);

	return 0;
out_cleanup_blk_queue:
	blk_cleanup_queue(nullb->q);
out_cleanup_tags:
	if (queue_mode == NULL_Q_MQ)
		blk_mq_free_tag_set(&nullb->tag_set);
out_cleanup_queues:
	cleanup_queues(nullb);
out_free_nullb:
	kfree(nullb);
out:
	return rv;
}