示例#1
0
文件: dm-rq.c 项目: harlanstars/linux
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
	struct request_queue *q;
	struct dm_target *immutable_tgt;
	int err;

	if (!dm_table_all_blk_mq_devices(t)) {
		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
		return -EINVAL;
	}

	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
	if (!md->tag_set)
		return -ENOMEM;

	md->tag_set->ops = &dm_mq_ops;
	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
	md->tag_set->numa_node = md->numa_node_id;
	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
	md->tag_set->driver_data = md;

	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
	immutable_tgt = dm_table_get_immutable_target(t);
	if (immutable_tgt && immutable_tgt->per_io_data_size) {
		/* any target-specific per-io data is immediately after the tio */
		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
		md->init_tio_pdu = true;
	}

	err = blk_mq_alloc_tag_set(md->tag_set);
	if (err)
		goto out_kfree_tag_set;

	q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}
	dm_init_md_queue(md);

	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
	err = blk_mq_register_dev(disk_to_dev(md->disk), q);
	if (err)
		goto out_cleanup_queue;

	return 0;

out_cleanup_queue:
	blk_cleanup_queue(q);
out_tag_set:
	blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
	kfree(md->tag_set);

	return err;
}
示例#2
0
int blk_register_queue(struct gendisk *disk)
{
    int ret;
    struct device *dev = disk_to_dev(disk);
    struct request_queue *q = disk->queue;

    if (WARN_ON(!q))
        return -ENXIO;

    /*
     * SCSI probing may synchronously create and destroy a lot of
     * request_queues for non-existent devices.  Shutting down a fully
     * functional queue takes measureable wallclock time as RCU grace
     * periods are involved.  To avoid excessive latency in these
     * cases, a request_queue starts out in a degraded mode which is
     * faster to shut down and is made fully functional here as
     * request_queues for non-existent devices never get registered.
     */
    if (!blk_queue_init_done(q)) {
        queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
        percpu_ref_switch_to_percpu(&q->q_usage_counter);
        blk_queue_bypass_end(q);
    }

    ret = blk_trace_init_sysfs(dev);
    if (ret)
        return ret;

    ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
    if (ret < 0) {
        blk_trace_remove_sysfs(dev);
        return ret;
    }

    kobject_uevent(&q->kobj, KOBJ_ADD);

    if (q->mq_ops)
        blk_mq_register_dev(dev, q);

    if (!q->request_fn)
        return 0;

    ret = elv_register_queue(q);
    if (ret) {
        kobject_uevent(&q->kobj, KOBJ_REMOVE);
        kobject_del(&q->kobj);
        blk_trace_remove_sysfs(dev);
        kobject_put(&dev->kobj);
        return ret;
    }

    return 0;
}