static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd) { struct request_queue *q = dev->q; struct request *rq; struct nvme_nvm_command cmd; int ret = 0; memset(&cmd, 0, sizeof(struct nvme_nvm_command)); rq = nvme_nvm_alloc_request(q, rqd, &cmd); if (IS_ERR(rq)) return PTR_ERR(rq); /* I/Os can fail and the error is signaled through rqd. Callers must * handle the error accordingly. */ blk_execute_rq(q, NULL, rq, 0); if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) ret = -EINTR; rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64); rqd->error = nvme_req(rq)->status; blk_mq_free_request(rq); return ret; }
static void nvme_nvm_end_io(struct request *rq, blk_status_t status) { struct nvm_rq *rqd = rq->end_io_data; rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64); rqd->error = nvme_req(rq)->status; nvm_end_io(rqd); kfree(nvme_req(rq)->cmd); blk_mq_free_request(rq); }
static void nvme_nvm_end_io(struct request *rq, int error) { struct nvm_rq *rqd = rq->end_io_data; struct nvme_nvm_completion *cqe = rq->special; if (cqe) rqd->ppa_status = le64_to_cpu(cqe->result); nvm_end_io(rqd, error); kfree(rq->cmd); blk_mq_free_request(rq); }
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { struct request_queue *q = dev->q; struct nvme_ns *ns = q->queuedata; struct request *rq; struct bio *bio = rqd->bio; struct nvme_nvm_command *cmd; rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); if (IS_ERR(rq)) return -ENOMEM; cmd = kzalloc(sizeof(struct nvme_nvm_command) + sizeof(struct nvme_nvm_completion), GFP_KERNEL); if (!cmd) { blk_mq_free_request(rq); return -ENOMEM; } rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->ioprio = bio_prio(bio); if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; nvme_nvm_rqtocmd(rq, rqd, ns, cmd); rq->cmd = (unsigned char *)cmd; rq->cmd_len = sizeof(struct nvme_nvm_command); rq->special = cmd + 1; rq->end_io_data = rqd; blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); return 0; }
static int nvme_nvm_submit_user_cmd(struct request_queue *q, struct nvme_ns *ns, struct nvme_nvm_command *vcmd, void __user *ubuf, unsigned int bufflen, void __user *meta_buf, unsigned int meta_len, void __user *ppa_buf, unsigned int ppa_len, u32 *result, u64 *status, unsigned int timeout) { bool write = nvme_is_write((struct nvme_command *)vcmd); struct nvm_dev *dev = ns->ndev; struct gendisk *disk = ns->disk; struct request *rq; struct bio *bio = NULL; __le64 *ppa_list = NULL; dma_addr_t ppa_dma; __le64 *metadata = NULL; dma_addr_t metadata_dma; DECLARE_COMPLETION_ONSTACK(wait); int ret = 0; rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0, NVME_QID_ANY); if (IS_ERR(rq)) { ret = -ENOMEM; goto err_cmd; } rq->timeout = timeout ? timeout : ADMIN_TIMEOUT; if (ppa_buf && ppa_len) { ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma); if (!ppa_list) { ret = -ENOMEM; goto err_rq; } if (copy_from_user(ppa_list, (void __user *)ppa_buf, sizeof(u64) * (ppa_len + 1))) { ret = -EFAULT; goto err_ppa; } vcmd->ph_rw.spba = cpu_to_le64(ppa_dma); } else { vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf); } if (ubuf && bufflen) { ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL); if (ret) goto err_ppa; bio = rq->bio; if (meta_buf && meta_len) { metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &metadata_dma); if (!metadata) { ret = -ENOMEM; goto err_map; } if (write) { if (copy_from_user(metadata, (void __user *)meta_buf, meta_len)) { ret = -EFAULT; goto err_meta; } } vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); } bio->bi_disk = disk; } blk_execute_rq(q, NULL, rq, 0); if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) ret = -EINTR; else if (nvme_req(rq)->status & 0x7ff) ret = -EIO; if (result) *result = nvme_req(rq)->status & 0x7ff; if (status) *status = le64_to_cpu(nvme_req(rq)->result.u64); if (metadata && !ret && !write) { if (copy_to_user(meta_buf, (void *)metadata, meta_len)) ret = -EFAULT; } err_meta: if (meta_buf && meta_len) dma_pool_free(dev->dma_pool, metadata, metadata_dma); err_map: if (bio) blk_rq_unmap_user(bio); err_ppa: if (ppa_buf && ppa_len) dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); err_rq: blk_mq_free_request(rq); err_cmd: return ret; }