示例#1
0
int blk_register_queue(struct gendisk *disk)
{
	add_my_disk(disk);
	int ret;
	struct device *dev = disk_to_dev(disk);
	struct request_queue *q = disk->queue;

	if (WARN_ON(!q))
		return -ENXIO;

	/*
	 * SCSI probing may synchronously create and destroy a lot of
	 * request_queues for non-existent devices.  Shutting down a fully
	 * functional queue takes measureable wallclock time as RCU grace
	 * periods are involved.  To avoid excessive latency in these
	 * cases, a request_queue starts out in a degraded mode which is
	 * faster to shut down and is made fully functional here as
	 * request_queues for non-existent devices never get registered.
	 */
	if (!blk_queue_init_done(q)) {
		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
		blk_queue_bypass_end(q);
		if (q->mq_ops)
			blk_mq_finish_init(q);
	}

	ret = blk_trace_init_sysfs(dev);
	if (ret)
		return ret;

	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
	if (ret < 0) {
		blk_trace_remove_sysfs(dev);
		return ret;
	}

	kobject_uevent(&q->kobj, KOBJ_ADD);

	if (q->mq_ops)
		blk_mq_register_disk(disk);

	if (!q->request_fn)
		return 0;

	ret = elv_register_queue(q);
	if (ret) {
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
		kobject_del(&q->kobj);
		blk_trace_remove_sysfs(dev);
		kobject_put(&dev->kobj);
		return ret;
	}

	return 0;
}
示例#2
0
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_target *immutable_tgt)
{
	struct request_queue *q;
	int err;

	if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
		DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
		return -EINVAL;
	}

	md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
	if (!md->tag_set)
		return -ENOMEM;

	md->tag_set->ops = &dm_mq_ops;
	md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
	md->tag_set->numa_node = md->numa_node_id;
	md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
	md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
	md->tag_set->driver_data = md;

	md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
	if (immutable_tgt && immutable_tgt->per_io_data_size) {
		/* any target-specific per-io data is immediately after the tio */
		md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
		md->init_tio_pdu = true;
	}

	err = blk_mq_alloc_tag_set(md->tag_set);
	if (err)
		goto out_kfree_tag_set;

	q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
	if (IS_ERR(q)) {
		err = PTR_ERR(q);
		goto out_tag_set;
	}
	dm_init_md_queue(md);

	/* backfill 'mq' sysfs registration normally done in blk_register_queue */
	blk_mq_register_disk(md->disk);

	return 0;

out_tag_set:
	blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
	kfree(md->tag_set);

	return err;
}
示例#3
0
int blk_register_queue(struct gendisk *disk)
{
    int ret;
    struct device *dev = disk_to_dev(disk);
    struct request_queue *q = disk->queue;

    if (WARN_ON(!q))
        return -ENXIO;

    /*
     * Initialization must be complete by now.  Finish the initial
     * bypass from queue allocation.
     */
    blk_queue_bypass_end(q);
    queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);

    ret = blk_trace_init_sysfs(dev);
    if (ret)
        return ret;

    ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
    if (ret < 0) {
        blk_trace_remove_sysfs(dev);
        return ret;
    }

    kobject_uevent(&q->kobj, KOBJ_ADD);

    if (q->mq_ops)
        blk_mq_register_disk(disk);

    if (!q->request_fn)
        return 0;

    ret = elv_register_queue(q);
    if (ret) {
        kobject_uevent(&q->kobj, KOBJ_REMOVE);
        kobject_del(&q->kobj);
        blk_trace_remove_sysfs(dev);
        kobject_put(&dev->kobj);
        return ret;
    }

    return 0;
}