/* * Fully initialize a .request_fn request-based queue. */ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) { struct dm_target *immutable_tgt; /* Fully initialize the queue */ md->queue->cmd_size = sizeof(struct dm_rq_target_io); md->queue->rq_alloc_data = md; md->queue->request_fn = dm_old_request_fn; md->queue->init_rq_fn = dm_rq_init_rq; immutable_tgt = dm_table_get_immutable_target(t); if (immutable_tgt && immutable_tgt->per_io_data_size) { /* any target-specific per-io data is immediately after the tio */ md->queue->cmd_size += immutable_tgt->per_io_data_size; md->init_tio_pdu = true; } if (blk_init_allocated_queue(md->queue) < 0) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; blk_queue_softirq_done(md->queue, dm_softirq_done); /* Initialize the request-based DM worker thread */ kthread_init_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); if (IS_ERR(md->kworker_task)) { int error = PTR_ERR(md->kworker_task); md->kworker_task = NULL; return error; } return 0; }
static int null_add_dev(void) { struct gendisk *disk; struct nullb *nullb; sector_t size; int rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) { rv = -ENOMEM; goto out; } spin_lock_init(&nullb->lock); if (queue_mode == NULL_Q_MQ && use_per_node_hctx) submit_queues = nr_online_nodes; rv = setup_queues(nullb); if (rv) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.driver_data = nullb; rv = blk_mq_alloc_tag_set(&nullb->tag_set); if (rv) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_make_request(nullb->q, null_queue_bio); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); if (!disk) { rv = -ENOMEM; goto out_cleanup_blk_queue; } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); nullb->index = nullb_indexes++; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; sector_div(size, bs); set_capacity(disk, size); disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; disk->major = null_major; disk->first_minor = nullb->index; disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); out_cleanup_queues: cleanup_queues(nullb); out_free_nullb: kfree(nullb); out: return rv; }
static int null_add_dev(void) { struct nullb *nullb; int rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) { rv = -ENOMEM; goto out; } spin_lock_init(&nullb->lock); if (queue_mode == NULL_Q_MQ && use_per_node_hctx) submit_queues = nr_online_nodes; rv = setup_queues(nullb); if (rv) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.driver_data = nullb; rv = blk_mq_alloc_tag_set(&nullb->tag_set); if (rv) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_make_request(nullb->q, null_queue_bio); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); nullb->index = nullb_indexes++; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); sprintf(nullb->disk_name, "nullb%d", nullb->index); if (use_lightnvm) rv = null_nvm_register(nullb); else rv = null_gendisk_register(nullb); if (rv) goto out_cleanup_blk_queue; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); mutex_unlock(&lock); return 0; out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); out_cleanup_queues: cleanup_queues(nullb); out_free_nullb: kfree(nullb); out: return rv; }
static int null_add_dev(void) { struct gendisk *disk; struct nullb *nullb; sector_t size; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) return -ENOMEM; spin_lock_init(&nullb->lock); if (setup_queues(nullb)) goto err; if (queue_mode == NULL_Q_MQ) { null_mq_reg.numa_node = home_node; null_mq_reg.queue_depth = hw_queue_depth; if (use_per_node_hctx) { null_mq_reg.ops->alloc_hctx = null_alloc_hctx; null_mq_reg.ops->free_hctx = null_free_hctx; null_mq_reg.nr_hw_queues = nr_online_nodes; } else { null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; null_mq_reg.nr_hw_queues = submit_queues; } nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); blk_queue_make_request(nullb->q, null_queue_bio); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); blk_queue_prep_rq(nullb->q, null_rq_prep_fn); if (nullb->q) blk_queue_softirq_done(nullb->q, null_softirq_done_fn); } if (!nullb->q) goto queue_fail; nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); if (!disk) { queue_fail: if (queue_mode == NULL_Q_MQ) blk_mq_free_queue(nullb->q); else blk_cleanup_queue(nullb->q); cleanup_queues(nullb); err: kfree(nullb); return -ENOMEM; } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); nullb->index = nullb_indexes++; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; sector_div(size, bs); set_capacity(disk, size); disk->flags |= GENHD_FL_EXT_DEVT; disk->major = null_major; disk->first_minor = nullb->index; disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; }