static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, fmode_t mode) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); int ret; job->request_len = hdr->request_len; job->request = memdup_user(uptr64(hdr->request), hdr->request_len); if (IS_ERR(job->request)) return PTR_ERR(job->request); if (hdr->dout_xfer_len && hdr->din_xfer_len) { job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); if (IS_ERR(job->bidi_rq)) { ret = PTR_ERR(job->bidi_rq); goto out; } ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); if (ret) goto out_free_bidi_rq; job->bidi_bio = job->bidi_rq->bio; } else { job->bidi_rq = NULL; job->bidi_bio = NULL; } return 0; out_free_bidi_rq: if (job->bidi_rq) blk_put_request(job->bidi_rq); out: kfree(job->request); return ret; }
static int sg_io(struct request_queue *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr, fmode_t mode) { unsigned long start_time; int writing = 0, ret = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) return -EIO; if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (!rq) return -ENOMEM; if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { blk_put_request(rq); return -EFAULT; } if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; size_t iov_data_len; struct sg_iovec *sg_iov; struct iovec *iov; int i; sg_iov = kmalloc(size, GFP_KERNEL); if (!sg_iov) { ret = -ENOMEM; goto out; } if (copy_from_user(sg_iov, hdr->dxferp, size)) { kfree(sg_iov); ret = -EFAULT; goto out; } /* * Sum up the vecs, making sure they don't overflow */ iov = (struct iovec *) sg_iov; iov_data_len = 0; for (i = 0; i < hdr->iovec_count; i++) { if (iov_data_len + iov[i].iov_len < iov_data_len) { kfree(sg_iov); ret = -EINVAL; goto out; } iov_data_len += iov[i].iov_len; } /* SG_IO howto says that the shorter of the two wins */ if (hdr->dxfer_len < iov_data_len) { hdr->iovec_count = iov_shorten(iov, hdr->iovec_count, hdr->dxfer_len); iov_data_len = hdr->dxfer_len; } ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count, iov_data_len, GFP_KERNEL); kfree(sg_iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, GFP_KERNEL); if (ret) goto out; bio = rq->bio; memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->retries = 0; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_execute_rq(q, bd_disk, rq, 0); hdr->duration = jiffies_to_msecs(jiffies - start_time); return blk_complete_sghdr_rq(rq, hdr, bio); out: blk_put_request(rq); return ret; }
static int sg_io(struct request_queue *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr, fmode_t mode) { unsigned long start_time; ssize_t ret = 0; int writing = 0; int at_head = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) return -EIO; if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } if (hdr->flags & SG_FLAG_Q_AT_HEAD) at_head = 1; ret = -ENOMEM; rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (IS_ERR(rq)) return PTR_ERR(rq); blk_rq_set_block_pc(rq); if (hdr->cmd_len > BLK_MAX_CDB) { rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); if (!rq->cmd) goto out_put_request; } ret = blk_fill_sghdr_rq(q, rq, hdr, mode); if (ret < 0) goto out_free_cdb; ret = 0; if (hdr->iovec_count) { struct iov_iter i; struct iovec *iov = NULL; ret = import_iovec(rq_data_dir(rq), hdr->dxferp, hdr->iovec_count, 0, &iov, &i); if (ret < 0) goto out_free_cdb; /* SG_IO howto says that the shorter of the two wins */ iov_iter_truncate(&i, hdr->dxfer_len); ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, GFP_KERNEL); if (ret) goto out_free_cdb; bio = rq->bio; memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->retries = 0; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_execute_rq(q, bd_disk, rq, at_head); hdr->duration = jiffies_to_msecs(jiffies - start_time); ret = blk_complete_sghdr_rq(rq, hdr, bio); out_free_cdb: if (rq->cmd != rq->__cmd) kfree(rq->cmd); out_put_request: blk_put_request(rq); return ret; }
static int sg_io(struct file *file, request_queue_t *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { unsigned long start_time, timeout; int writing = 0, ret = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; unsigned char cmd[BLK_MAX_CDB]; struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; if (verify_command(file, cmd)) return -EPERM; if (hdr->dxfer_len > (q->max_hw_sectors << 9)) return -EIO; if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (!rq) return -ENOMEM; /* * fill in request structure */ rq->cmd_len = hdr->cmd_len; memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ memcpy(rq->cmd, cmd, hdr->cmd_len); memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->cmd_type = REQ_TYPE_BLOCK_PC; timeout = msecs_to_jiffies(hdr->timeout); rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_TIMEOUT; if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; struct sg_iovec *iov; iov = kmalloc(size, GFP_KERNEL); if (!iov) { ret = -ENOMEM; goto out; } if (copy_from_user(iov, hdr->dxferp, size)) { kfree(iov); ret = -EFAULT; goto out; } ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, hdr->dxfer_len); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); if (ret) goto out; bio = rq->bio; rq->retries = 0; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_execute_rq(q, bd_disk, rq, 0); /* write to all output members */ hdr->status = 0xff & rq->errors; hdr->masked_status = status_byte(rq->errors); hdr->msg_status = msg_byte(rq->errors); hdr->host_status = host_byte(rq->errors); hdr->driver_status = driver_byte(rq->errors); hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->resid = rq->data_len; hdr->duration = ((jiffies - start_time) * 1000) / HZ; hdr->sb_len_wr = 0; if (rq->sense_len && hdr->sbp) { int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); if (!copy_to_user(hdr->sbp, rq->sense, len)) hdr->sb_len_wr = len; } if (blk_rq_unmap_user(bio)) ret = -EFAULT; /* may not have succeeded, but output values written to control * structure (struct sg_io_hdr). */ out: blk_put_request(rq); return ret; }
static int sg_io(struct file *file, request_queue_t *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { unsigned long start_time; int reading, writing; struct request *rq; struct bio *bio; char sense[SCSI_SENSE_BUFFERSIZE]; unsigned char cmd[BLK_MAX_CDB]; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; if (verify_command(file, cmd)) return -EPERM; /* * we'll do that later */ if (hdr->iovec_count) return -EOPNOTSUPP; if (hdr->dxfer_len > (q->max_sectors << 9)) return -EIO; reading = writing = 0; if (hdr->dxfer_len) { switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_FROM_DEV: reading = 1; /* fall through */ case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_FROM_DEV: reading = 1; break; } rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, hdr->dxfer_len); if (IS_ERR(rq)) return PTR_ERR(rq); } else rq = blk_get_request(q, READ, __GFP_WAIT); /* * fill in request structure */ rq->cmd_len = hdr->cmd_len; memcpy(rq->cmd, cmd, hdr->cmd_len); if (sizeof(rq->cmd) != hdr->cmd_len) memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->flags |= REQ_BLOCK_PC; bio = rq->bio; /* * bounce this after holding a reference to the original bio, it's * needed for proper unmapping */ if (rq->bio) blk_queue_bounce(q, &rq->bio); rq->timeout = (hdr->timeout * HZ) / 1000; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_TIMEOUT; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_execute_rq(q, bd_disk, rq); /* write to all output members */ hdr->status = rq->errors; hdr->masked_status = (hdr->status >> 1) & 0x1f; hdr->msg_status = 0; hdr->host_status = 0; hdr->driver_status = 0; hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->resid = rq->data_len; hdr->duration = ((jiffies - start_time) * 1000) / HZ; hdr->sb_len_wr = 0; if (rq->sense_len && hdr->sbp) { int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); if (!copy_to_user(hdr->sbp, rq->sense, len)) hdr->sb_len_wr = len; } if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) return -EFAULT; /* may not have succeeded, but output values written to control * structure (struct sg_io_hdr). */ return 0; }
static int sg_io(struct file *file, struct request_queue *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr) { unsigned long start_time; int writing = 0, ret = 0, has_write_perm = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; if (hdr->dxfer_len > (q->max_hw_sectors << 9)) return -EIO; if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (!rq) return -ENOMEM; if (file) has_write_perm = file->f_mode & FMODE_WRITE; if (blk_fill_sghdr_rq(q, rq, hdr, has_write_perm)) { blk_put_request(rq); return -EFAULT; } if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; struct sg_iovec *iov; iov = kmalloc(size, GFP_KERNEL); if (!iov) { ret = -ENOMEM; goto out; } if (copy_from_user(iov, hdr->dxferp, size)) { kfree(iov); ret = -EFAULT; goto out; } ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count, hdr->dxfer_len); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); if (ret) goto out; bio = rq->bio; memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->retries = 0; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_execute_rq(q, bd_disk, rq, 0); hdr->duration = jiffies_to_msecs(jiffies - start_time); return blk_complete_sghdr_rq(rq, hdr, bio); out: blk_put_request(rq); return ret; }
ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count, 0, NULL, &iov); if (ret < 0) { kfree(iov); goto out_free_cdb; } /* SG_IO howto says that the shorter of the two wins */ iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count, min_t(unsigned, ret, hdr->dxfer_len)); ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, GFP_KERNEL); if (ret) goto out_free_cdb; bio = rq->bio; memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->retries = 0; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error.
static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, int cmd_len, int data_direction, void *buffer, unsigned bufflen, int use_sg, int timeout, int retries) { struct request *req; struct page **pages = NULL; struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; int err = 0; int write = (data_direction == DMA_TO_DEVICE); req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL); if (!req) return DRIVER_ERROR << 24; req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_QUIET; SRpnt->bio = NULL; if (use_sg) { struct scatterlist *sg, *sgl = (struct scatterlist *)buffer; int i; pages = kzalloc(use_sg * sizeof(struct page *), GFP_KERNEL); if (!pages) goto free_req; for_each_sg(sgl, sg, use_sg, i) pages[i] = sg_page(sg); mdata->null_mapped = 1; mdata->page_order = get_order(sgl[0].length); mdata->nr_entries = DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order); mdata->offset = 0; err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL); if (err) { kfree(pages); goto free_req; } SRpnt->bio = req->bio; mdata->pages = pages; } else if (bufflen) { err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL); if (err) goto free_req; } req->cmd_len = cmd_len; memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ memcpy(req->cmd, cmd, req->cmd_len); req->sense = SRpnt->sense; req->sense_len = 0; req->timeout = timeout; req->retries = retries; req->end_io_data = SRpnt; blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async); return 0; free_req: blk_put_request(req); return DRIVER_ERROR << 24; }
static int sg_io(struct request_queue *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr, fmode_t mode) { unsigned long start_time; int writing = 0, ret = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; struct bio *bio; if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) return -EIO; if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (!rq) return -ENOMEM; if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { blk_put_request(rq); return -EFAULT; } if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; size_t iov_data_len; struct sg_iovec *sg_iov; struct iovec *iov; int i; sg_iov = kmalloc(size, GFP_KERNEL); if (!sg_iov) { ret = -ENOMEM; goto out; } if (copy_from_user(sg_iov, hdr->dxferp, size)) { kfree(sg_iov); ret = -EFAULT; goto out; } iov = (struct iovec *) sg_iov; iov_data_len = 0; for (i = 0; i < hdr->iovec_count; i++) { if (iov_data_len + iov[i].iov_len < iov_data_len) { kfree(sg_iov); ret = -EINVAL; goto out; } iov_data_len += iov[i].iov_len; } if (hdr->dxfer_len < iov_data_len) { hdr->iovec_count = iov_shorten(iov, hdr->iovec_count, hdr->dxfer_len); iov_data_len = hdr->dxfer_len; } ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count, iov_data_len, GFP_KERNEL); kfree(sg_iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, GFP_KERNEL); if (ret) goto out; bio = rq->bio; memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->retries = 0; start_time = jiffies; blk_execute_rq(q, bd_disk, rq, 0); hdr->duration = jiffies_to_msecs(jiffies - start_time); return blk_complete_sghdr_rq(rq, hdr, bio); out: blk_put_request(rq); return ret; }
static int sg_io(struct request_queue *q, struct gendisk *bd_disk, struct sg_io_hdr *hdr, fmode_t mode) { unsigned long start_time; int writing = 0, ret = 0; struct request *rq; char sense[SCSI_SENSE_BUFFERSIZE]; struct bio *bio; #if defined (CONFIG_MIPS_BCM_NDVD) int cdb_data_xfer_len; unsigned char cmd[BLK_MAX_CDB]; #endif if (hdr->interface_id != 'S') return -EINVAL; if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; if (hdr->dxfer_len > (q->max_hw_sectors << 9)) return -EIO; #if defined (CONFIG_MIPS_BCM_NDVD) if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) return -EFAULT; if (hdr->dxfer_len && bd_disk->sector_size) { cdb_data_xfer_len = cdb_xfer_len(cmd, bd_disk->sector_size); if (hdr->dxfer_len < cdb_data_xfer_len) { printk(KERN_WARNING "\n!!! %s - CMD 0x%02x: CDB Xfer Len 0x%08x GT SG Resource Len 0x%08x !!!\n\n", __FUNCTION__, cmd[0], cdb_data_xfer_len, hdr->dxfer_len); BUG(); return -EIO; } } else if (!bd_disk->sector_size) printk(KERN_WARNING "\n!!! %s - DISK SECTOR SIZE UNINITIALIZED !!!", __FUNCTION__); #endif if (hdr->dxfer_len) switch (hdr->dxfer_direction) { default: return -EINVAL; case SG_DXFER_TO_DEV: writing = 1; break; case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: break; } rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); if (!rq) return -ENOMEM; if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { blk_put_request(rq); return -EFAULT; } if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; struct sg_iovec *iov; iov = kmalloc(size, GFP_KERNEL); if (!iov) { ret = -ENOMEM; goto out; } if (copy_from_user(iov, hdr->dxferp, size)) { kfree(iov); ret = -EFAULT; goto out; } ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, hdr->dxfer_len, GFP_KERNEL); kfree(iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, GFP_KERNEL); if (ret) goto out; bio = rq->bio; memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; rq->retries = 0; start_time = jiffies; /* ignore return value. All information is passed back to caller * (if he doesn't check that is his problem). * N.B. a non-zero SCSI status is _not_ necessarily an error. */ blk_execute_rq(q, bd_disk, rq, 0); hdr->duration = jiffies_to_msecs(jiffies - start_time); return blk_complete_sghdr_rq(rq, hdr, bio); out: blk_put_request(rq); return ret; }
static int nvme_nvm_submit_user_cmd(struct request_queue *q, struct nvme_ns *ns, struct nvme_nvm_command *vcmd, void __user *ubuf, unsigned int bufflen, void __user *meta_buf, unsigned int meta_len, void __user *ppa_buf, unsigned int ppa_len, u32 *result, u64 *status, unsigned int timeout) { bool write = nvme_is_write((struct nvme_command *)vcmd); struct nvm_dev *dev = ns->ndev; struct gendisk *disk = ns->disk; struct request *rq; struct bio *bio = NULL; __le64 *ppa_list = NULL; dma_addr_t ppa_dma; __le64 *metadata = NULL; dma_addr_t metadata_dma; DECLARE_COMPLETION_ONSTACK(wait); int ret = 0; rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0, NVME_QID_ANY); if (IS_ERR(rq)) { ret = -ENOMEM; goto err_cmd; } rq->timeout = timeout ? timeout : ADMIN_TIMEOUT; if (ppa_buf && ppa_len) { ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma); if (!ppa_list) { ret = -ENOMEM; goto err_rq; } if (copy_from_user(ppa_list, (void __user *)ppa_buf, sizeof(u64) * (ppa_len + 1))) { ret = -EFAULT; goto err_ppa; } vcmd->ph_rw.spba = cpu_to_le64(ppa_dma); } else { vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf); } if (ubuf && bufflen) { ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL); if (ret) goto err_ppa; bio = rq->bio; if (meta_buf && meta_len) { metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &metadata_dma); if (!metadata) { ret = -ENOMEM; goto err_map; } if (write) { if (copy_from_user(metadata, (void __user *)meta_buf, meta_len)) { ret = -EFAULT; goto err_meta; } } vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); } bio->bi_disk = disk; } blk_execute_rq(q, NULL, rq, 0); if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) ret = -EINTR; else if (nvme_req(rq)->status & 0x7ff) ret = -EIO; if (result) *result = nvme_req(rq)->status & 0x7ff; if (status) *status = le64_to_cpu(nvme_req(rq)->result.u64); if (metadata && !ret && !write) { if (copy_to_user(meta_buf, (void *)metadata, meta_len)) ret = -EFAULT; } err_meta: if (meta_buf && meta_len) dma_pool_free(dev->dma_pool, metadata, metadata_dma); err_map: if (bio) blk_rq_unmap_user(bio); err_ppa: if (ppa_buf && ppa_len) dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); err_rq: blk_mq_free_request(rq); err_cmd: return ret; }