int dm_mq_init_request_queue(struct mapped_device *md, struct dm_target *immutable_tgt) { struct request_queue *q; int err; if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) { DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); return -EINVAL; } md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); if (!md->tag_set) return -ENOMEM; md->tag_set->ops = &dm_mq_ops; md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); md->tag_set->numa_node = md->numa_node_id; md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); md->tag_set->driver_data = md; md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); if (immutable_tgt && immutable_tgt->per_io_data_size) { /* any target-specific per-io data is immediately after the tio */ md->tag_set->cmd_size += immutable_tgt->per_io_data_size; md->init_tio_pdu = true; } err = blk_mq_alloc_tag_set(md->tag_set); if (err) goto out_kfree_tag_set; q = blk_mq_init_allocated_queue(md->tag_set, md->queue); if (IS_ERR(q)) { err = PTR_ERR(q); goto out_tag_set; } dm_init_md_queue(md); /* backfill 'mq' sysfs registration normally done in blk_register_queue */ blk_mq_register_disk(md->disk); return 0; out_tag_set: blk_mq_free_tag_set(md->tag_set); out_kfree_tag_set: kfree(md->tag_set); return err; }
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, const char *buf, size_t count) { unsigned deadline; if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED) return count; if (kstrtouint(buf, 10, &deadline)) return -EINVAL; if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; md->seq_rq_merge_deadline_usecs = deadline; return count; }