/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock * @subname: partition subname * * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; int ret = -ENOMEM; mq->card = card; mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) return -ENOMEM; mq->queue->queue_lock = lock; mq->queue->request_fn = mmc_request_fn; mq->queue->init_rq_fn = mmc_init_request; mq->queue->exit_rq_fn = mmc_exit_request; mq->queue->cmd_size = sizeof(struct mmc_queue_req); mq->queue->queuedata = mq; mq->qcnt = 0; ret = blk_init_allocated_queue(mq->queue); if (ret) { blk_cleanup_queue(mq->queue); return ret; } blk_queue_prep_rq(mq->queue, mmc_prep_request); mmc_setup_queue(mq, card); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto cleanup_queue; } return 0; cleanup_queue: blk_cleanup_queue(mq->queue); return ret; }
/* * Fully initialize a .request_fn request-based queue. */ int dm_old_init_request_queue(struct mapped_device *md) { /* Fully initialize the queue */ if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL)) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_prep_rq(md->queue, dm_old_prep_fn); /* Initialize the request-based DM worker thread */ init_kthread_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); elv_register_queue(md->queue); return 0; }
/* * Fully initialize a .request_fn request-based queue. */ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) { struct dm_target *immutable_tgt; /* Fully initialize the queue */ md->queue->cmd_size = sizeof(struct dm_rq_target_io); md->queue->rq_alloc_data = md; md->queue->request_fn = dm_old_request_fn; md->queue->init_rq_fn = dm_rq_init_rq; immutable_tgt = dm_table_get_immutable_target(t); if (immutable_tgt && immutable_tgt->per_io_data_size) { /* any target-specific per-io data is immediately after the tio */ md->queue->cmd_size += immutable_tgt->per_io_data_size; md->init_tio_pdu = true; } if (blk_init_allocated_queue(md->queue) < 0) return -EINVAL; /* disable dm_old_request_fn's merge heuristic by default */ md->seq_rq_merge_deadline_usecs = 0; dm_init_normal_md_queue(md); blk_queue_softirq_done(md->queue, dm_softirq_done); /* Initialize the request-based DM worker thread */ kthread_init_worker(&md->kworker); md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, "kdmwork-%s", dm_device_name(md)); if (IS_ERR(md->kworker_task)) { int error = PTR_ERR(md->kworker_task); md->kworker_task = NULL; return error; } elv_register_queue(md->queue); return 0; }