int blk_register_queue(struct gendisk *disk)
{
	int ret;
	struct device *dev = disk_to_dev(disk);
	struct request_queue *q = disk->queue;

	if (WARN_ON(!q))
		return -ENXIO;

	ret = blk_trace_init_sysfs(dev);
	if (ret)
		return ret;

	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
	if (ret < 0) {
		blk_trace_remove_sysfs(dev);
		return ret;
	}

	kobject_uevent(&q->kobj, KOBJ_ADD);

	if (!q->request_fn)
		return 0;

	ret = elv_register_queue(q);
	if (ret) {
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
		kobject_del(&q->kobj);
		blk_trace_remove_sysfs(dev);
		kobject_put(&dev->kobj);
		return ret;
	}

	return 0;
}
Пример #2
0
int blk_register_queue(struct gendisk *disk)
{
	int ret;

	struct request_queue *q = disk->queue;

	if (WARN_ON(!q))
		return -ENXIO;

	if (!q->request_fn)
		return 0;

	ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
			  "%s", "queue");
	if (ret < 0)
		return ret;

	kobject_uevent(&q->kobj, KOBJ_ADD);

	ret = elv_register_queue(q);
	if (ret) {
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
		kobject_del(&q->kobj);
		return ret;
	}

	return 0;
}
Пример #3
0
int blk_register_queue(struct gendisk *disk)
{
	add_my_disk(disk);
	int ret;
	struct device *dev = disk_to_dev(disk);
	struct request_queue *q = disk->queue;

	if (WARN_ON(!q))
		return -ENXIO;

	/*
	 * SCSI probing may synchronously create and destroy a lot of
	 * request_queues for non-existent devices.  Shutting down a fully
	 * functional queue takes measureable wallclock time as RCU grace
	 * periods are involved.  To avoid excessive latency in these
	 * cases, a request_queue starts out in a degraded mode which is
	 * faster to shut down and is made fully functional here as
	 * request_queues for non-existent devices never get registered.
	 */
	if (!blk_queue_init_done(q)) {
		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
		blk_queue_bypass_end(q);
		if (q->mq_ops)
			blk_mq_finish_init(q);
	}

	ret = blk_trace_init_sysfs(dev);
	if (ret)
		return ret;

	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
	if (ret < 0) {
		blk_trace_remove_sysfs(dev);
		return ret;
	}

	kobject_uevent(&q->kobj, KOBJ_ADD);

	if (q->mq_ops)
		blk_mq_register_disk(disk);

	if (!q->request_fn)
		return 0;

	ret = elv_register_queue(q);
	if (ret) {
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
		kobject_del(&q->kobj);
		blk_trace_remove_sysfs(dev);
		kobject_put(&dev->kobj);
		return ret;
	}

	return 0;
}
Пример #4
0
/**
 * add_gendisk - add partitioning information to kernel list
 * @disk: per-device partitioning information
 *
 * This function registers the partitioning information in @disk
 * with the kernel.
 */
void add_disk(struct gendisk *disk)
{
	disk->flags |= GENHD_FL_UP;
	blk_register_region(MKDEV(disk->major, disk->first_minor),
			    disk->minors, NULL, exact_match, exact_lock, disk);
	register_disk(disk);
	elv_register_queue(disk);
#ifdef UCSB_IO
	ucsb_debug("Added disk %15s, diskk major:minor=%d:%d\n", 
			disk->disk_name, disk->major, disk->first_minor);
#endif /* ucsb_io */
}
Пример #5
0
int blk_register_queue(struct gendisk *disk)
{
    int ret;
    struct device *dev = disk_to_dev(disk);
    struct request_queue *q = disk->queue;

    if (WARN_ON(!q))
        return -ENXIO;

    /*
     * Initialization must be complete by now.  Finish the initial
     * bypass from queue allocation.
     */
    blk_queue_bypass_end(q);
    queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);

    ret = blk_trace_init_sysfs(dev);
    if (ret)
        return ret;

    ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
    if (ret < 0) {
        blk_trace_remove_sysfs(dev);
        return ret;
    }

    kobject_uevent(&q->kobj, KOBJ_ADD);

    if (q->mq_ops)
        blk_mq_register_disk(disk);

    if (!q->request_fn)
        return 0;

    ret = elv_register_queue(q);
    if (ret) {
        kobject_uevent(&q->kobj, KOBJ_REMOVE);
        kobject_del(&q->kobj);
        blk_trace_remove_sysfs(dev);
        kobject_put(&dev->kobj);
        return ret;
    }

    return 0;
}
Пример #6
0
/*
 * Fully initialize a .request_fn request-based queue.
 */
int dm_old_init_request_queue(struct mapped_device *md)
{
	/* Fully initialize the queue */
	if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
		return -EINVAL;

	/* disable dm_old_request_fn's merge heuristic by default */
	md->seq_rq_merge_deadline_usecs = 0;

	dm_init_normal_md_queue(md);
	blk_queue_softirq_done(md->queue, dm_softirq_done);
	blk_queue_prep_rq(md->queue, dm_old_prep_fn);

	/* Initialize the request-based DM worker thread */
	init_kthread_worker(&md->kworker);
	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
				       "kdmwork-%s", dm_device_name(md));

	elv_register_queue(md->queue);

	return 0;
}
Пример #7
0
/*
 * Fully initialize a .request_fn request-based queue.
 */
int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
	struct dm_target *immutable_tgt;

	/* Fully initialize the queue */
	md->queue->cmd_size = sizeof(struct dm_rq_target_io);
	md->queue->rq_alloc_data = md;
	md->queue->request_fn = dm_old_request_fn;
	md->queue->init_rq_fn = dm_rq_init_rq;

	immutable_tgt = dm_table_get_immutable_target(t);
	if (immutable_tgt && immutable_tgt->per_io_data_size) {
		/* any target-specific per-io data is immediately after the tio */
		md->queue->cmd_size += immutable_tgt->per_io_data_size;
		md->init_tio_pdu = true;
	}
	if (blk_init_allocated_queue(md->queue) < 0)
		return -EINVAL;

	/* disable dm_old_request_fn's merge heuristic by default */
	md->seq_rq_merge_deadline_usecs = 0;

	dm_init_normal_md_queue(md);
	blk_queue_softirq_done(md->queue, dm_softirq_done);

	/* Initialize the request-based DM worker thread */
	kthread_init_worker(&md->kworker);
	md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
				       "kdmwork-%s", dm_device_name(md));
	if (IS_ERR(md->kworker_task)) {
		int error = PTR_ERR(md->kworker_task);
		md->kworker_task = NULL;
		return error;
	}

	elv_register_queue(md->queue);

	return 0;
}
Пример #8
0
int blk_register_queue(struct gendisk *disk)
{
	int ret;
	struct device *dev = disk_to_dev(disk);
	struct request_queue *q = disk->queue;

	if (WARN_ON(!q))
		return -ENXIO;

	WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
		  "%s is registering an already registered queue\n",
		  kobject_name(&dev->kobj));
	queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);

	/*
	 * SCSI probing may synchronously create and destroy a lot of
	 * request_queues for non-existent devices.  Shutting down a fully
	 * functional queue takes measureable wallclock time as RCU grace
	 * periods are involved.  To avoid excessive latency in these
	 * cases, a request_queue starts out in a degraded mode which is
	 * faster to shut down and is made fully functional here as
	 * request_queues for non-existent devices never get registered.
	 */
	if (!blk_queue_init_done(q)) {
		queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
		percpu_ref_switch_to_percpu(&q->q_usage_counter);
		blk_queue_bypass_end(q);
	}

	ret = blk_trace_init_sysfs(dev);
	if (ret)
		return ret;

	/* Prevent changes through sysfs until registration is completed. */
	mutex_lock(&q->sysfs_lock);

	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
	if (ret < 0) {
		blk_trace_remove_sysfs(dev);
		goto unlock;
	}

	if (q->mq_ops) {
		__blk_mq_register_dev(dev, q);
		blk_mq_debugfs_register(q);
	}

	kobject_uevent(&q->kobj, KOBJ_ADD);

	wbt_enable_default(q);

	blk_throtl_register_queue(q);

	if (q->request_fn || (q->mq_ops && q->elevator)) {
		ret = elv_register_queue(q);
		if (ret) {
			kobject_uevent(&q->kobj, KOBJ_REMOVE);
			kobject_del(&q->kobj);
			blk_trace_remove_sysfs(dev);
			kobject_put(&dev->kobj);
			goto unlock;
		}
	}
	ret = 0;
unlock:
	mutex_unlock(&q->sysfs_lock);
	return ret;
}