static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) { int nr_phys_segs = bio_phys_segments(q, bio); if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) goto no_merge; if (blk_integrity_merge_bio(q, req, bio) == false) goto no_merge; /* * This will form the start of a new hw segment. Bump both * counters. */ req->nr_phys_segments += nr_phys_segs; return 1; no_merge: req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; }
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { struct request_queue *q = dev->q; struct request *rq; struct bio *bio = rqd->bio; rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); if (IS_ERR(rq)) return -ENOMEM; rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->__sector = bio->bi_iter.bi_sector; rq->ioprio = bio_prio(bio); if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; rq->end_io_data = rqd; blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); return 0; }
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { struct request_queue *q = dev->q; struct nvme_ns *ns = q->queuedata; struct request *rq; struct bio *bio = rqd->bio; struct nvme_nvm_command *cmd; rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); if (IS_ERR(rq)) return -ENOMEM; cmd = kzalloc(sizeof(struct nvme_nvm_command) + sizeof(struct nvme_nvm_completion), GFP_KERNEL); if (!cmd) { blk_mq_free_request(rq); return -ENOMEM; } rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->ioprio = bio_prio(bio); if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; nvme_nvm_rqtocmd(rq, rqd, ns, cmd); rq->cmd = (unsigned char *)cmd; rq->cmd_len = sizeof(struct nvme_nvm_command); rq->special = cmd + 1; rq->end_io_data = rqd; blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); return 0; }