static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
				 struct bio *bio)
{
	int r, ret = 0;

	/*
	 * fill in all the output members
	 */
	hdr->status = rq->errors & 0xff;
	hdr->masked_status = status_byte(rq->errors);
	hdr->msg_status = msg_byte(rq->errors);
	hdr->host_status = host_byte(rq->errors);
	hdr->driver_status = driver_byte(rq->errors);
	hdr->info = 0;
	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->resid = rq->resid_len;
	hdr->sb_len_wr = 0;

	if (rq->sense_len && hdr->sbp) {
		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

		if (!copy_to_user(hdr->sbp, rq->sense, len))
			hdr->sb_len_wr = len;
		else
			ret = -EFAULT;
	}

	r = blk_rq_unmap_user(bio);
	if (!ret)
		ret = r;
	blk_put_request(rq);

	return ret;
}
Beispiel #2
0
/**
 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 * @q:		request queue where request should be inserted
 * @rq:		request structure to fill
 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 * @ubuf:	the user buffer
 * @len:	length of user data
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 *    a kernel bounce buffer is used.
 *
 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 *    still in process context.
 *
 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 *    before being submitted to the device, as pages mapped may be out of
 *    reach. It's the callers responsibility to make sure this happens. The
 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 *    unmapping.
 */
int blk_rq_map_user(struct request_queue *q, struct request *rq,
		    struct rq_map_data *map_data, void __user *ubuf,
		    unsigned long len, gfp_t gfp_mask)
{
	unsigned long bytes_read = 0;
	struct bio *bio = NULL;
	int ret;

	if (len > (queue_max_hw_sectors(q) << 9))
		return -EINVAL;
	if (!len)
		return -EINVAL;

	if (!ubuf && (!map_data || !map_data->null_mapped))
		return -EINVAL;

	while (bytes_read != len) {
		unsigned long map_len, end, start;

		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
								>> PAGE_SHIFT;
		start = (unsigned long)ubuf >> PAGE_SHIFT;

		/*
		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
		 * pages. If this happens we just lower the requested
		 * mapping len by a page so that we can fit
		 */
		if (end - start > BIO_MAX_PAGES)
			map_len -= PAGE_SIZE;

		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
					gfp_mask);
		if (ret < 0)
			goto unmap_rq;
		if (!bio)
			bio = rq->bio;
		bytes_read += ret;
		ubuf += ret;

		if (map_data)
			map_data->offset += ret;
	}

	if (!bio_flagged(bio, BIO_USER_MAPPED))
		rq->cmd_flags |= REQ_COPY_USER;

	rq->buffer = NULL;
	return 0;
unmap_rq:
	blk_rq_unmap_user(bio);
	rq->bio = NULL;
	return ret;
}
Beispiel #3
0
static void bsg_transport_free_rq(struct request *rq)
{
	struct bsg_job *job = blk_mq_rq_to_pdu(rq);

	if (job->bidi_rq) {
		blk_rq_unmap_user(job->bidi_bio);
		blk_put_request(job->bidi_rq);
	}

	kfree(job->request);
}
Beispiel #4
0
/* Wakeup from interrupt */
static void osst_end_async(struct request *req, int update)
{
	struct osst_request *SRpnt = req->end_io_data;
	struct osst_tape *STp = SRpnt->stp;
	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;

	STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
#if DEBUG
	STp->write_pending = 0;
#endif
	if (SRpnt->waiting)
		complete(SRpnt->waiting);

	if (SRpnt->bio) {
		kfree(mdata->pages);
		blk_rq_unmap_user(SRpnt->bio);
	}

	__blk_put_request(req->q, req);
}
static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
{
	blk_rq_unmap_user(tcmd->bio);
}
Beispiel #6
0
static int sg_io(struct file *file, request_queue_t *q,
		struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
	unsigned long start_time, timeout;
	int writing = 0, ret = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	unsigned char cmd[BLK_MAX_CDB];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;
	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
		return -EFAULT;
	if (verify_command(file, cmd))
		return -EPERM;

	if (hdr->dxfer_len > (q->max_hw_sectors << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	/*
	 * fill in request structure
	 */
	rq->cmd_len = hdr->cmd_len;
	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
	memcpy(rq->cmd, cmd, hdr->cmd_len);

	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;

	rq->cmd_type = REQ_TYPE_BLOCK_PC;

	timeout = msecs_to_jiffies(hdr->timeout);
	rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX;
	if (!rq->timeout)
		rq->timeout = q->sg_timeout;
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_TIMEOUT;

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		struct sg_iovec *iov;

		iov = kmalloc(size, GFP_KERNEL);
		if (!iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(iov, hdr->dxferp, size)) {
			kfree(iov);
			ret = -EFAULT;
			goto out;
		}

		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
					  hdr->dxfer_len);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);

	if (ret)
		goto out;

	bio = rq->bio;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, 0);

	/* write to all output members */
	hdr->status = 0xff & rq->errors;
	hdr->masked_status = status_byte(rq->errors);
	hdr->msg_status = msg_byte(rq->errors);
	hdr->host_status = host_byte(rq->errors);
	hdr->driver_status = driver_byte(rq->errors);
	hdr->info = 0;
	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->resid = rq->data_len;
	hdr->duration = ((jiffies - start_time) * 1000) / HZ;
	hdr->sb_len_wr = 0;

	if (rq->sense_len && hdr->sbp) {
		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

		if (!copy_to_user(hdr->sbp, rq->sense, len))
			hdr->sb_len_wr = len;
	}

	if (blk_rq_unmap_user(bio))
		ret = -EFAULT;

	/* may not have succeeded, but output values written to control
	 * structure (struct sg_io_hdr).  */
out:
	blk_put_request(rq);
	return ret;
}
Beispiel #7
0
static int sg_io(struct file *file, request_queue_t *q,
                 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
    unsigned long start_time;
    int reading, writing;
    struct request *rq;
    struct bio *bio;
    char sense[SCSI_SENSE_BUFFERSIZE];
    unsigned char cmd[BLK_MAX_CDB];

    if (hdr->interface_id != 'S')
        return -EINVAL;
    if (hdr->cmd_len > BLK_MAX_CDB)
        return -EINVAL;
    if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
        return -EFAULT;
    if (verify_command(file, cmd))
        return -EPERM;

    /*
     * we'll do that later
     */
    if (hdr->iovec_count)
        return -EOPNOTSUPP;

    if (hdr->dxfer_len > (q->max_sectors << 9))
        return -EIO;

    reading = writing = 0;
    if (hdr->dxfer_len) {
        switch (hdr->dxfer_direction) {
        default:
            return -EINVAL;
        case SG_DXFER_TO_FROM_DEV:
            reading = 1;
        /* fall through */
        case SG_DXFER_TO_DEV:
            writing = 1;
            break;
        case SG_DXFER_FROM_DEV:
            reading = 1;
            break;
        }

        rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp,
                             hdr->dxfer_len);

        if (IS_ERR(rq))
            return PTR_ERR(rq);
    } else
        rq = blk_get_request(q, READ, __GFP_WAIT);

    /*
     * fill in request structure
     */
    rq->cmd_len = hdr->cmd_len;
    memcpy(rq->cmd, cmd, hdr->cmd_len);
    if (sizeof(rq->cmd) != hdr->cmd_len)
        memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);

    memset(sense, 0, sizeof(sense));
    rq->sense = sense;
    rq->sense_len = 0;

    rq->flags |= REQ_BLOCK_PC;
    bio = rq->bio;

    /*
     * bounce this after holding a reference to the original bio, it's
     * needed for proper unmapping
     */
    if (rq->bio)
        blk_queue_bounce(q, &rq->bio);

    rq->timeout = (hdr->timeout * HZ) / 1000;
    if (!rq->timeout)
        rq->timeout = q->sg_timeout;
    if (!rq->timeout)
        rq->timeout = BLK_DEFAULT_TIMEOUT;

    start_time = jiffies;

    /* ignore return value. All information is passed back to caller
     * (if he doesn't check that is his problem).
     * N.B. a non-zero SCSI status is _not_ necessarily an error.
     */
    blk_execute_rq(q, bd_disk, rq);

    /* write to all output members */
    hdr->status = rq->errors;
    hdr->masked_status = (hdr->status >> 1) & 0x1f;
    hdr->msg_status = 0;
    hdr->host_status = 0;
    hdr->driver_status = 0;
    hdr->info = 0;
    if (hdr->masked_status || hdr->host_status || hdr->driver_status)
        hdr->info |= SG_INFO_CHECK;
    hdr->resid = rq->data_len;
    hdr->duration = ((jiffies - start_time) * 1000) / HZ;
    hdr->sb_len_wr = 0;

    if (rq->sense_len && hdr->sbp) {
        int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

        if (!copy_to_user(hdr->sbp, rq->sense, len))
            hdr->sb_len_wr = len;
    }

    if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
        return -EFAULT;

    /* may not have succeeded, but output values written to control
     * structure (struct sg_io_hdr).  */
    return 0;
}
Beispiel #8
0
/*
 * unmap a request that was previously mapped to this sg_io_hdr. handles
 * both sg and non-sg sg_io_hdr.
 */
static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
{
	blk_rq_unmap_user(rq->bio);
	blk_put_request(rq);
	return 0;
}
Beispiel #9
0
static int nvme_nvm_submit_user_cmd(struct request_queue *q,
				struct nvme_ns *ns,
				struct nvme_nvm_command *vcmd,
				void __user *ubuf, unsigned int bufflen,
				void __user *meta_buf, unsigned int meta_len,
				void __user *ppa_buf, unsigned int ppa_len,
				u32 *result, u64 *status, unsigned int timeout)
{
	bool write = nvme_is_write((struct nvme_command *)vcmd);
	struct nvm_dev *dev = ns->ndev;
	struct gendisk *disk = ns->disk;
	struct request *rq;
	struct bio *bio = NULL;
	__le64 *ppa_list = NULL;
	dma_addr_t ppa_dma;
	__le64 *metadata = NULL;
	dma_addr_t metadata_dma;
	DECLARE_COMPLETION_ONSTACK(wait);
	int ret = 0;

	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0,
			NVME_QID_ANY);
	if (IS_ERR(rq)) {
		ret = -ENOMEM;
		goto err_cmd;
	}

	rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;

	if (ppa_buf && ppa_len) {
		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
		if (!ppa_list) {
			ret = -ENOMEM;
			goto err_rq;
		}
		if (copy_from_user(ppa_list, (void __user *)ppa_buf,
						sizeof(u64) * (ppa_len + 1))) {
			ret = -EFAULT;
			goto err_ppa;
		}
		vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
	} else {
		vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
	}

	if (ubuf && bufflen) {
		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
		if (ret)
			goto err_ppa;
		bio = rq->bio;

		if (meta_buf && meta_len) {
			metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
								&metadata_dma);
			if (!metadata) {
				ret = -ENOMEM;
				goto err_map;
			}

			if (write) {
				if (copy_from_user(metadata,
						(void __user *)meta_buf,
						meta_len)) {
					ret = -EFAULT;
					goto err_meta;
				}
			}
			vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
		}

		bio->bi_disk = disk;
	}

	blk_execute_rq(q, NULL, rq, 0);

	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
		ret = -EINTR;
	else if (nvme_req(rq)->status & 0x7ff)
		ret = -EIO;
	if (result)
		*result = nvme_req(rq)->status & 0x7ff;
	if (status)
		*status = le64_to_cpu(nvme_req(rq)->result.u64);

	if (metadata && !ret && !write) {
		if (copy_to_user(meta_buf, (void *)metadata, meta_len))
			ret = -EFAULT;
	}
err_meta:
	if (meta_buf && meta_len)
		dma_pool_free(dev->dma_pool, metadata, metadata_dma);
err_map:
	if (bio)
		blk_rq_unmap_user(bio);
err_ppa:
	if (ppa_buf && ppa_len)
		dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
err_rq:
	blk_mq_free_request(rq);
err_cmd:
	return ret;
}