Пример #1
0
static void blk_handle_requests(struct XenBlkDev *blkdev)
{
    RING_IDX rc, rp;
    struct ioreq *ioreq;

    blkdev->more_work = 0;

    rc = blkdev->rings.common.req_cons;
    rp = blkdev->rings.common.sring->req_prod;
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */

    if (use_aio)
        blk_send_response_all(blkdev);
    while ((rc != rp)) {
        /* pull request from ring */
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc))
            break;
        ioreq = ioreq_start(blkdev);
        if (ioreq == NULL) {
            blkdev->more_work++;
            break;
        }
        blk_get_request(blkdev, ioreq, rc);
        blkdev->rings.common.req_cons = ++rc;

        /* parse them */
        if (ioreq_parse(ioreq) != 0) {
            if (blk_send_response_one(ioreq))
                xen_be_send_notify(&blkdev->xendev);
            ioreq_release(ioreq);
            continue;
        }

        if (use_aio) {
            /* run i/o in aio mode */
            ioreq_runio_qemu_aio(ioreq);
        } else {
            /* run i/o in sync mode */
            ioreq_runio_qemu_sync(ioreq);
        }
    }
    if (!use_aio)
        blk_send_response_all(blkdev);

    if (blkdev->more_work && blkdev->requests_inflight < max_requests)
        qemu_bh_schedule(blkdev->bh);
}
/* Send basic block requests */
static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
			      int cmd, int data)
{
	struct request *rq;
	int err;

	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
	rq->cmd[0] = cmd;
	rq->cmd[4] = data;
	rq->cmd_len = 6;
	err = blk_execute_rq(q, bd_disk, rq, 0);
	blk_put_request(rq);

	return err;
}
Пример #3
0
/* Send basic block requests */
static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)
{
	struct request *rq;
	int err;

	rq = blk_get_request(q, WRITE, __GFP_WAIT);
	rq->flags |= REQ_BLOCK_PC;
	rq->data = NULL;
	rq->data_len = 0;
	rq->timeout = BLK_DEFAULT_TIMEOUT;
	memset(rq->cmd, 0, sizeof(rq->cmd));
	rq->cmd[0] = cmd;
	rq->cmd[4] = data;
	rq->cmd_len = 6;
	err = blk_execute_rq(q, bd_disk, rq, 0);
	blk_put_request(rq);

	return err;
}
Пример #4
0
/* Send basic block requests */
static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
			      int cmd, int data)
{
	struct request *rq;
	int err;

	rq = blk_get_request(q, REQ_OP_SCSI_OUT, 0);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
	scsi_req(rq)->cmd[0] = cmd;
	scsi_req(rq)->cmd[4] = data;
	scsi_req(rq)->cmd_len = 6;
	blk_execute_rq(q, bd_disk, rq, 0);
	err = scsi_req(rq)->result ? -EIO : 0;
	blk_put_request(rq);

	return err;
}
Пример #5
0
/* Send basic block requests */
static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
			      int cmd, int data)
{
	struct request *rq;
	int err;

	rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	blk_rq_set_block_pc(rq);
	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
	rq->cmd[0] = cmd;
	rq->cmd[4] = data;
	rq->cmd_len = 6;
	err = blk_execute_rq(q, bd_disk, rq, 0);
	blk_put_request(rq);

	return err;
}
Пример #6
0
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
{
    struct request *rq;
    struct request_queue *q = mbox->txq->queue;
    int ret = 0;

    rq = blk_get_request(q, WRITE, GFP_ATOMIC);
    if (unlikely(!rq)) {
        ret = -ENOMEM;
        goto fail;
    }

    rq->data = (void *)msg;
    blk_insert_request(q, rq, 0, arg);

    schedule_work(&mbox->txq->work);
 fail:
    return ret;
}
Пример #7
0
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
	struct request_queue *q = vblk->disk->queue;
	struct request *req;
	int err;

	req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
	if (IS_ERR(req))
		return PTR_ERR(req);

	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
	if (err)
		goto out;

	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
out:
	blk_put_request(req);
	return err;
}
Пример #8
0
static int idescsi_check_condition(ide_drive_t *drive,
		struct request *failed_cmd)
{
	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
	struct ide_atapi_pc   *pc;
	struct request *rq;
	u8             *buf;

	/* stuff a sense request in front of our current request */
	pc = kzalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
	rq = blk_get_request(drive->queue, READ, GFP_ATOMIC);
	buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC);
	if (!pc || !rq || !buf) {
		kfree(buf);
		if (rq)
			blk_put_request(rq);
		kfree(pc);
		return -ENOMEM;
	}
	rq->special = (char *) pc;
	pc->rq = rq;
	pc->buf = buf;
	pc->c[0] = REQUEST_SENSE;
	pc->c[4] = pc->req_xfer = pc->buf_size = SCSI_SENSE_BUFFERSIZE;
	rq->cmd_type = REQ_TYPE_SENSE;
	rq->cmd_flags |= REQ_PREEMPT;
	pc->timeout = jiffies + WAIT_READY;
	/* NOTE! Save the failed packet command in "rq->buffer" */
	rq->buffer = (void *) failed_cmd->special;
	pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd;
	if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
		printk ("ide-scsi: %s: queue cmd = ", drive->name);
		ide_scsi_hex_dump(pc->c, 6);
	}
	rq->rq_disk = scsi->disk;
	rq->ref_count++;
	memcpy(rq->cmd, pc->c, 12);
	ide_do_drive_cmd(drive, rq);
	return 0;
}
Пример #9
0
static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
		fmode_t mode)
{
	struct bsg_job *job = blk_mq_rq_to_pdu(rq);
	int ret;

	job->request_len = hdr->request_len;
	job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
	if (IS_ERR(job->request))
		return PTR_ERR(job->request);

	if (hdr->dout_xfer_len && hdr->din_xfer_len) {
		job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
		if (IS_ERR(job->bidi_rq)) {
			ret = PTR_ERR(job->bidi_rq);
			goto out;
		}

		ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
				uptr64(hdr->din_xferp), hdr->din_xfer_len,
				GFP_KERNEL);
		if (ret)
			goto out_free_bidi_rq;

		job->bidi_bio = job->bidi_rq->bio;
	} else {
		job->bidi_rq = NULL;
		job->bidi_bio = NULL;
	}

	return 0;

out_free_bidi_rq:
	if (job->bidi_rq)
		blk_put_request(job->bidi_rq);
out:
	kfree(job->request);
	return ret;
}
Пример #10
0
int generic_ide_resume(struct device *dev)
{
	ide_drive_t *drive = dev_get_drvdata(dev);
	ide_drive_t *pair = ide_get_pair_dev(drive);
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq;
	struct request_pm_state rqpm;
	int err;

	if (ide_port_acpi(hwif)) {
		/* call ACPI _PS0 / _STM only once */
		if ((drive->dn & 1) == 0 || pair == NULL) {
			ide_acpi_set_state(hwif, 1);
			ide_acpi_push_timing(hwif);
		}

		ide_acpi_exec_tfs(drive);
	}

	memset(&rqpm, 0, sizeof(rqpm));
	rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
	rq->cmd_type = REQ_TYPE_PM_RESUME;
	rq->cmd_flags |= REQ_PREEMPT;
	rq->special = &rqpm;
	rqpm.pm_step = IDE_PM_START_RESUME;
	rqpm.pm_state = PM_EVENT_ON;

	err = blk_execute_rq(drive->queue, NULL, rq, 1);
	blk_put_request(rq);

	if (err == 0 && dev->driver) {
		struct ide_driver *drv = to_ide_driver(dev->driver);

		if (drv->resume)
			drv->resume(drive);
	}

	return err;
}
Пример #11
0
int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
		       int arg)
{
	struct request_queue *q = drive->queue;
	struct request *rq;
	int ret = 0;

	if (!(setting->flags & DS_SYNC))
		return setting->set(drive, arg);

	rq = blk_get_request(q, READ, __GFP_WAIT);
	rq->cmd_type = REQ_TYPE_SPECIAL;
	rq->cmd_len = 5;
	rq->cmd[0] = REQ_DEVSET_EXEC;
	*(int *)&rq->cmd[1] = arg;
	rq->special = setting->set;

	if (blk_execute_rq(q, NULL, rq, 0))
		ret = rq->errors;
	blk_put_request(rq);

	return ret;
}
Пример #12
0
/*
 * hp_sw_start_stop - Send START STOP UNIT command
 * @sdev: sdev command should be sent to
 *
 * Sending START STOP UNIT activates the SP.
 */
static int hp_sw_start_stop(struct hp_sw_dh_data *h)
{
	struct request *req;

	req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
	if (IS_ERR(req))
		return SCSI_DH_RES_TEMP_UNAVAIL;

	blk_rq_set_block_pc(req);
	req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
			  REQ_FAILFAST_DRIVER;
	req->cmd_len = COMMAND_SIZE(START_STOP);
	req->cmd[0] = START_STOP;
	req->cmd[4] = 1;	/* Start spin cycle */
	req->timeout = HP_SW_TIMEOUT;
	req->sense = h->sense;
	memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
	req->sense_len = 0;
	req->end_io_data = h;

	blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
	return SCSI_DH_OK;
}
Пример #13
0
int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
		       int arg)
{
	struct request_queue *q = drive->queue;
	struct request *rq;
	int ret = 0;

	if (!(setting->flags & DS_SYNC))
		return setting->set(drive, arg);

	rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
	ide_req(rq)->type = ATA_PRIV_MISC;
	scsi_req(rq)->cmd_len = 5;
	scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
	*(int *)&scsi_req(rq)->cmd[1] = arg;
	ide_req(rq)->special = setting->set;

	blk_execute_rq(q, NULL, rq, 0);
	ret = scsi_req(rq)->result;
	blk_put_request(rq);

	return ret;
}
Пример #14
0
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
	struct request *rq;
	mbox_msg_t msg;
	struct request_queue *q = mbox->rxq->queue;

	while (!mbox_fifo_empty(mbox)) {
		rq = blk_get_request(q, WRITE, GFP_ATOMIC);
		if (unlikely(!rq))
			goto nomem;

		msg = mbox_fifo_read(mbox);


		blk_insert_request(q, rq, 0, (void *)msg);
		if (mbox->ops->type == OMAP_MBOX_TYPE1)
			break;
	}

	/* no more messages in the fifo. clear IRQ source. */
	ack_mbox_irq(mbox, IRQ_RX);
nomem:
	schedule_work(&mbox->rxq->work);
}
Пример #15
0
int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
	ide_drive_t *drive = to_ide_device(dev);
	ide_drive_t *pair = ide_get_pair_dev(drive);
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq;
	struct ide_pm_state rqpm;
	int ret;

	if (ide_port_acpi(hwif)) {
		/* call ACPI _GTM only once */
		if ((drive->dn & 1) == 0 || pair == NULL)
			ide_acpi_get_timing(hwif);
	}

	memset(&rqpm, 0, sizeof(rqpm));
	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
	ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
	rq->special = &rqpm;
	rqpm.pm_step = IDE_PM_START_SUSPEND;
	if (mesg.event == PM_EVENT_PRETHAW)
		mesg.event = PM_EVENT_FREEZE;
	rqpm.pm_state = mesg.event;

	blk_execute_rq(drive->queue, NULL, rq, 0);
	ret = scsi_req(rq)->result ? -EIO : 0;
	blk_put_request(rq);

	if (ret == 0 && ide_port_acpi(hwif)) {
		/* call ACPI _PS3 only after both devices are suspended */
		if ((drive->dn & 1) || pair == NULL)
			ide_acpi_set_state(hwif, 0);
	}

	return ret;
}
Пример #16
0
/**
 * sg_scsi_ioctl  --  handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
 * @q:		request queue to send scsi commands down
 * @disk:	gendisk to operate on (option)
 * @mode:	mode used to open the file through which the ioctl has been
 *		submitted
 * @sic:	userspace structure describing the command to perform
 *
 * Send down the scsi command described by @sic to the device below
 * the request queue @q.  If @file is non-NULL it's used to perform
 * fine-grained permission checks that allow users to send down
 * non-destructive SCSI commands.  If the caller has a struct gendisk
 * available it should be passed in as @disk to allow the low level
 * driver to use the information contained in it.  A non-NULL @disk
 * is only allowed if the caller knows that the low level driver doesn't
 * need it (e.g. in the scsi subsystem).
 *
 * Notes:
 *   -  This interface is deprecated - users should use the SG_IO
 *      interface instead, as this is a more flexible approach to
 *      performing SCSI commands on a device.
 *   -  The SCSI command length is determined by examining the 1st byte
 *      of the given command. There is no way to override this.
 *   -  Data transfers are limited to PAGE_SIZE
 *   -  The length (x + y) must be at least OMAX_SB_LEN bytes long to
 *      accommodate the sense buffer when an error occurs.
 *      The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
 *      old code will not be surprised.
 *   -  If a Unix error occurs (e.g. ENOMEM) then the user will receive
 *      a negative return and the Unix error code in 'errno'.
 *      If the SCSI command succeeds then 0 is returned.
 *      Positive numbers returned are the compacted SCSI error codes (4
 *      bytes in one int) where the lowest byte is the SCSI status.
 */
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
		struct scsi_ioctl_command __user *sic)
{
	enum { OMAX_SB_LEN = 16 };	/* For backward compatibility */
	struct request *rq;
	struct scsi_request *req;
	int err;
	unsigned int in_len, out_len, bytes, opcode, cmdlen;
	char *buffer = NULL;

	if (!sic)
		return -EINVAL;

	/*
	 * get in an out lengths, verify they don't exceed a page worth of data
	 */
	if (get_user(in_len, &sic->inlen))
		return -EFAULT;
	if (get_user(out_len, &sic->outlen))
		return -EFAULT;
	if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
		return -EINVAL;
	if (get_user(opcode, sic->data))
		return -EFAULT;

	bytes = max(in_len, out_len);
	if (bytes) {
		buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
		if (!buffer)
			return -ENOMEM;

	}

	rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto error_free_buffer;
	}
	req = scsi_req(rq);

	cmdlen = COMMAND_SIZE(opcode);

	/*
	 * get command and data to send to device, if any
	 */
	err = -EFAULT;
	req->cmd_len = cmdlen;
	if (copy_from_user(req->cmd, sic->data, cmdlen))
		goto error;

	if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
		goto error;

	err = blk_verify_command(req->cmd, mode);
	if (err)
		goto error;

	/* default.  possible overriden later */
	req->retries = 5;

	switch (opcode) {
	case SEND_DIAGNOSTIC:
	case FORMAT_UNIT:
		rq->timeout = FORMAT_UNIT_TIMEOUT;
		req->retries = 1;
		break;
	case START_STOP:
		rq->timeout = START_STOP_TIMEOUT;
		break;
	case MOVE_MEDIUM:
		rq->timeout = MOVE_MEDIUM_TIMEOUT;
		break;
	case READ_ELEMENT_STATUS:
		rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
		break;
	case READ_DEFECT_DATA:
		rq->timeout = READ_DEFECT_DATA_TIMEOUT;
		req->retries = 1;
		break;
	default:
		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
		break;
	}

	if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) {
		err = DRIVER_ERROR << 24;
		goto error;
	}

	blk_execute_rq(q, disk, rq, 0);

	err = req->result & 0xff;	/* only 8 bit SCSI status */
	if (err) {
		if (req->sense_len && req->sense) {
			bytes = (OMAX_SB_LEN > req->sense_len) ?
				req->sense_len : OMAX_SB_LEN;
			if (copy_to_user(sic->data, req->sense, bytes))
				err = -EFAULT;
		}
	} else {
		if (copy_to_user(sic->data, buffer, out_len))
			err = -EFAULT;
	}
	
error:
	blk_put_request(rq);

error_free_buffer:
	kfree(buffer);

	return err;
}
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	int writing = 0, ret = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
		blk_put_request(rq);
		return -EFAULT;
	}

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		size_t iov_data_len;
		struct sg_iovec *sg_iov;
		struct iovec *iov;
		int i;

		sg_iov = kmalloc(size, GFP_KERNEL);
		if (!sg_iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
			kfree(sg_iov);
			ret = -EFAULT;
			goto out;
		}

		/*
		 * Sum up the vecs, making sure they don't overflow
		 */
		iov = (struct iovec *) sg_iov;
		iov_data_len = 0;
		for (i = 0; i < hdr->iovec_count; i++) {
			if (iov_data_len + iov[i].iov_len < iov_data_len) {
				kfree(sg_iov);
				ret = -EINVAL;
				goto out;
			}
			iov_data_len += iov[i].iov_len;
		}

		/* SG_IO howto says that the shorter of the two wins */
		if (hdr->dxfer_len < iov_data_len) {
			hdr->iovec_count = iov_shorten(iov,
						       hdr->iovec_count,
						       hdr->dxfer_len);
			iov_data_len = hdr->dxfer_len;
		}

		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
					  iov_data_len, GFP_KERNEL);
		kfree(sg_iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, 0);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	return blk_complete_sghdr_rq(rq, hdr, bio);
out:
	blk_put_request(rq);
	return ret;
}
Пример #18
0
static int sg_io(struct file *file, struct request_queue *q,
		struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
	unsigned long start_time;
	int writing = 0, ret = 0, has_write_perm = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;

	if (hdr->dxfer_len > (q->max_hw_sectors << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	if (file)
		has_write_perm = file->f_mode & FMODE_WRITE;

	if (blk_fill_sghdr_rq(q, rq, hdr, has_write_perm)) {
		blk_put_request(rq);
		return -EFAULT;
	}

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		struct sg_iovec *iov;

		iov = kmalloc(size, GFP_KERNEL);
		if (!iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(iov, hdr->dxferp, size)) {
			kfree(iov);
			ret = -EFAULT;
			goto out;
		}

		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
					  hdr->dxfer_len);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);

	if (ret)
		goto out;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, 0);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	return blk_complete_sghdr_rq(rq, hdr, bio);
out:
	blk_put_request(rq);
	return ret;
}
Пример #19
0
/*
* Get block request for REQ_BLOCK_PC command issued to path.  Currently
* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
*
* Uses data and sense buffers in hardware handler context structure and
* assumes serial servicing of commands, both issuance and completion.
*/
static struct request *get_req(struct scsi_device *sdev, int cmd)
{
	struct clariion_dh_data *csdev = get_clariion_data(sdev);
	struct request *rq;
	unsigned char *page22;
	int len = 0;

	rq = blk_get_request(sdev->request_queue,
			(cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
	if (!rq) {
		sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
		return NULL;
	}

	memset(&rq->cmd, 0, BLK_MAX_CDB);
	rq->cmd[0] = cmd;
	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);

	switch (cmd) {
	case MODE_SELECT:
		if (csdev->short_trespass) {
			page22 = csdev->hr ? short_trespass_hr : short_trespass;
			len = sizeof(short_trespass);
		} else {
			page22 = csdev->hr ? long_trespass_hr : long_trespass;
			len = sizeof(long_trespass);
		}
		/*
		 * Can't DMA from kernel BSS -- must copy selected trespass
		 * command mode page contents to context buffer which is
		 * allocated by kmalloc.
		 */
		BUG_ON((len > CLARIION_BUFFER_SIZE));
		memcpy(csdev->buffer, page22, len);
		rq->cmd_flags |= REQ_RW;
		rq->cmd[1] = 0x10;
		break;
	case INQUIRY:
		rq->cmd[1] = 0x1;
		rq->cmd[2] = 0xC0;
		len = CLARIION_BUFFER_SIZE;
		memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
		break;
	default:
		BUG_ON(1);
		break;
	}

	rq->cmd[4] = len;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	rq->cmd_flags |= REQ_FAILFAST;
	rq->timeout = CLARIION_TIMEOUT;
	rq->retries = CLARIION_RETRIES;

	rq->sense = csdev->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

	if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
							len, GFP_ATOMIC)) {
		__blk_put_request(rq->q, rq);
		return NULL;
	}

	return rq;
}
Пример #20
0
static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
			int cmd_len, int data_direction, void *buffer, unsigned bufflen,
			int use_sg, int timeout, int retries)
{
	struct request *req;
	struct page **pages = NULL;
	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;

	int err = 0;
	int write = (data_direction == DMA_TO_DEVICE);

	req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
	if (!req)
		return DRIVER_ERROR << 24;

	req->cmd_type = REQ_TYPE_BLOCK_PC;
	req->cmd_flags |= REQ_QUIET;

	SRpnt->bio = NULL;

	if (use_sg) {
		struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
		int i;

		pages = kzalloc(use_sg * sizeof(struct page *), GFP_KERNEL);
		if (!pages)
			goto free_req;

		for_each_sg(sgl, sg, use_sg, i)
			pages[i] = sg_page(sg);

		mdata->null_mapped = 1;

		mdata->page_order = get_order(sgl[0].length);
		mdata->nr_entries =
			DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order);
		mdata->offset = 0;

		err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL);
		if (err) {
			kfree(pages);
			goto free_req;
		}
		SRpnt->bio = req->bio;
		mdata->pages = pages;

	} else if (bufflen) {
		err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL);
		if (err)
			goto free_req;
	}

	req->cmd_len = cmd_len;
	memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
	memcpy(req->cmd, cmd, req->cmd_len);
	req->sense = SRpnt->sense;
	req->sense_len = 0;
	req->timeout = timeout;
	req->retries = retries;
	req->end_io_data = SRpnt;

	blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
	return 0;
free_req:
	blk_put_request(req);
	return DRIVER_ERROR << 24;
}
Пример #21
0
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	int writing = 0, ret = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
		blk_put_request(rq);
		return -EFAULT;
	}

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		size_t iov_data_len;
		struct sg_iovec *sg_iov;
		struct iovec *iov;
		int i;

		sg_iov = kmalloc(size, GFP_KERNEL);
		if (!sg_iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
			kfree(sg_iov);
			ret = -EFAULT;
			goto out;
		}

		iov = (struct iovec *) sg_iov;
		iov_data_len = 0;
		for (i = 0; i < hdr->iovec_count; i++) {
			if (iov_data_len + iov[i].iov_len < iov_data_len) {
				kfree(sg_iov);
				ret = -EINVAL;
				goto out;
			}
			iov_data_len += iov[i].iov_len;
		}

		
		if (hdr->dxfer_len < iov_data_len) {
			hdr->iovec_count = iov_shorten(iov,
						       hdr->iovec_count,
						       hdr->dxfer_len);
			iov_data_len = hdr->dxfer_len;
		}

		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
					  iov_data_len, GFP_KERNEL);
		kfree(sg_iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	blk_execute_rq(q, bd_disk, rq, 0);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	return blk_complete_sghdr_rq(rq, hdr, bio);
out:
	blk_put_request(rq);
	return ret;
}
Пример #22
0
static int sg_io(request_queue_t *q, struct block_device *bdev,
		 struct sg_io_hdr *hdr)
{
	unsigned long start_time;
	int reading, writing;
	struct request *rq;
	struct bio *bio;
	char sense[SCSI_SENSE_BUFFERSIZE];
	void *buffer;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > sizeof(rq->cmd))
		return -EINVAL;

	/*
	 * we'll do that later
	 */
	if (hdr->iovec_count)
		return -EOPNOTSUPP;

	if (hdr->dxfer_len > (q->max_sectors << 9))
		return -EIO;

	reading = writing = 0;
	buffer = NULL;
	bio = NULL;
	if (hdr->dxfer_len) {
		unsigned int bytes = (hdr->dxfer_len + 511) & ~511;

		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_FROM_DEV:
			reading = 1;
			/* fall through */
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_FROM_DEV:
			reading = 1;
			break;
		}

		/*
		 * first try to map it into a bio. reading from device will
		 * be a write to vm.
		 */
		bio = bio_map_user(bdev, (unsigned long) hdr->dxferp,
				   hdr->dxfer_len, reading);

		/*
		 * if bio setup failed, fall back to slow approach
		 */
		if (!bio) {
			buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER);
			if (!buffer)
				return -ENOMEM;

			if (writing) {
				if (copy_from_user(buffer, hdr->dxferp,
						   hdr->dxfer_len))
					goto out_buffer;
			} else
				memset(buffer, 0, hdr->dxfer_len);
		}
	}

	rq = blk_get_request(q, writing ? WRITE : READ, __GFP_WAIT);

	/*
	 * fill in request structure
	 */
	rq->cmd_len = hdr->cmd_len;
	memcpy(rq->cmd, hdr->cmdp, hdr->cmd_len);
	if (sizeof(rq->cmd) != hdr->cmd_len)
		memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);

	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;

	rq->flags |= REQ_BLOCK_PC;

	rq->bio = rq->biotail = NULL;

	if (bio)
		blk_rq_bio_prep(q, rq, bio);

	rq->data = buffer;
	rq->data_len = hdr->dxfer_len;

	rq->timeout = (hdr->timeout * HZ) / 1000;
	if (!rq->timeout)
		rq->timeout = q->sg_timeout;
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_TIMEOUT;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_do_rq(q, bdev, rq);

	if (bio)
		bio_unmap_user(bio, reading);

	/* write to all output members */
	hdr->status = rq->errors;	
	hdr->masked_status = (hdr->status >> 1) & 0x1f;
	hdr->msg_status = 0;
	hdr->host_status = 0;
	hdr->driver_status = 0;
	hdr->info = 0;
	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->resid = rq->data_len;
	hdr->duration = ((jiffies - start_time) * 1000) / HZ;
	hdr->sb_len_wr = 0;

	if (rq->sense_len && hdr->sbp) {
		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

		if (!copy_to_user(hdr->sbp, rq->sense, len))
			hdr->sb_len_wr = len;
	}

	blk_put_request(rq);

	if (buffer) {
		if (reading)
			if (copy_to_user(hdr->dxferp, buffer, hdr->dxfer_len))
				goto out_buffer;

		kfree(buffer);
	}

	/* may not have succeeded, but output values written to control
	 * structure (struct sg_io_hdr).  */
	return 0;
out_buffer:
	kfree(buffer);
	return -EFAULT;
}
Пример #23
0
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	ssize_t ret = 0;
	int writing = 0;
	int at_head = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}
	if (hdr->flags & SG_FLAG_Q_AT_HEAD)
		at_head = 1;

	ret = -ENOMEM;
	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	blk_rq_set_block_pc(rq);

	if (hdr->cmd_len > BLK_MAX_CDB) {
		rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
		if (!rq->cmd)
			goto out_put_request;
	}

	ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
	if (ret < 0)
		goto out_free_cdb;

	ret = 0;
	if (hdr->iovec_count) {
		struct iov_iter i;
		struct iovec *iov = NULL;

		ret = import_iovec(rq_data_dir(rq),
				   hdr->dxferp, hdr->iovec_count,
				   0, &iov, &i);
		if (ret < 0)
			goto out_free_cdb;

		/* SG_IO howto says that the shorter of the two wins */
		iov_iter_truncate(&i, hdr->dxfer_len);

		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out_free_cdb;

	bio = rq->bio;
	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, at_head);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	ret = blk_complete_sghdr_rq(rq, hdr, bio);

out_free_cdb:
	if (rq->cmd != rq->__cmd)
		kfree(rq->cmd);
out_put_request:
	blk_put_request(rq);
	return ret;
}
Пример #24
0
int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
		    int write, void *buffer, unsigned *bufflen,
		    struct request_sense *sense, int timeout,
		    unsigned int cmd_flags)
{
	struct cdrom_info *info = drive->driver_data;
	struct request_sense local_sense;
	int retries = 10;
	unsigned int flags = 0;

	if (!sense)
		sense = &local_sense;

	ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
				  "cmd_flags: 0x%x",
				  cmd[0], write, timeout, cmd_flags);

	/* start of retry loop */
	do {
		struct request *rq;
		int error;

		rq = blk_get_request(drive->queue, write, __GFP_WAIT);

		memcpy(rq->cmd, cmd, BLK_MAX_CDB);
		rq->cmd_type = REQ_TYPE_ATA_PC;
		rq->sense = sense;
		rq->cmd_flags |= cmd_flags;
		rq->timeout = timeout;
		if (buffer) {
			rq->data = buffer;
			rq->data_len = *bufflen;
		}

		error = blk_execute_rq(drive->queue, info->disk, rq, 0);

		if (buffer)
			*bufflen = rq->data_len;

		flags = rq->cmd_flags;
		blk_put_request(rq);

		/*
		 * FIXME: we should probably abort/retry or something in case of
		 * failure.
		 */
		if (flags & REQ_FAILED) {
			/*
			 * The request failed.  Retry if it was due to a unit
			 * attention status (usually means media was changed).
			 */
			struct request_sense *reqbuf = sense;

			if (reqbuf->sense_key == UNIT_ATTENTION)
				cdrom_saw_media_change(drive);
			else if (reqbuf->sense_key == NOT_READY &&
				 reqbuf->asc == 4 && reqbuf->ascq != 4) {
				/*
				 * The drive is in the process of loading
				 * a disk.  Retry, but wait a little to give
				 * the drive time to complete the load.
				 */
				ssleep(2);
			} else {
				/* otherwise, don't retry */
				retries = 0;
			}
			--retries;
		}

		/* end of retry loop */
	} while ((flags & REQ_FAILED) && retries >= 0);

	/* return an error if the command failed */
	return (flags & REQ_FAILED) ? -EIO : 0;
}
/**
 * sg_scsi_ioctl  --  handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
 * @file:	file this ioctl operates on (optional)
 * @q:		request queue to send scsi commands down
 * @disk:	gendisk to operate on (option)
 * @sic:	userspace structure describing the command to perform
 *
 * Send down the scsi command described by @sic to the device below
 * the request queue @q.  If @file is non-NULL it's used to perform
 * fine-grained permission checks that allow users to send down
 * non-destructive SCSI commands.  If the caller has a struct gendisk
 * available it should be passed in as @disk to allow the low level
 * driver to use the information contained in it.  A non-NULL @disk
 * is only allowed if the caller knows that the low level driver doesn't
 * need it (e.g. in the scsi subsystem).
 *
 * Notes:
 *   -  This interface is deprecated - users should use the SG_IO
 *      interface instead, as this is a more flexible approach to
 *      performing SCSI commands on a device.
 *   -  The SCSI command length is determined by examining the 1st byte
 *      of the given command. There is no way to override this.
 *   -  Data transfers are limited to PAGE_SIZE
 *   -  The length (x + y) must be at least OMAX_SB_LEN bytes long to
 *      accommodate the sense buffer when an error occurs.
 *      The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
 *      old code will not be surprised.
 *   -  If a Unix error occurs (e.g. ENOMEM) then the user will receive
 *      a negative return and the Unix error code in 'errno'.
 *      If the SCSI command succeeds then 0 is returned.
 *      Positive numbers returned are the compacted SCSI error codes (4
 *      bytes in one int) where the lowest byte is the SCSI status.
 */
#define OMAX_SB_LEN 16          /* For backward compatibility */
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
		struct scsi_ioctl_command __user *sic)
{
	struct request *rq;
	int err;
	unsigned int in_len, out_len, bytes, opcode, cmdlen;
	char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];

	if (!sic)
		return -EINVAL;

	/*
	 * get in an out lengths, verify they don't exceed a page worth of data
	 */
	if (get_user(in_len, &sic->inlen))
		return -EFAULT;
	if (get_user(out_len, &sic->outlen))
		return -EFAULT;
	if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
		return -EINVAL;
	if (get_user(opcode, sic->data))
		return -EFAULT;

	bytes = max(in_len, out_len);
	if (bytes) {
		buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
		if (!buffer)
			return -ENOMEM;

	}

	rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);

	cmdlen = COMMAND_SIZE(opcode);

	/*
	 * get command and data to send to device, if any
	 */
	err = -EFAULT;
	rq->cmd_len = cmdlen;
	if (copy_from_user(rq->cmd, sic->data, cmdlen))
		goto error;

	if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
		goto error;

	err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
	if (err)
		goto error;

	/* default.  possible overriden later */
	rq->retries = 5;

	switch (opcode) {
	case SEND_DIAGNOSTIC:
	case FORMAT_UNIT:
		rq->timeout = FORMAT_UNIT_TIMEOUT;
		rq->retries = 1;
		break;
	case START_STOP:
		rq->timeout = START_STOP_TIMEOUT;
		break;
	case MOVE_MEDIUM:
		rq->timeout = MOVE_MEDIUM_TIMEOUT;
		break;
	case READ_ELEMENT_STATUS:
		rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
		break;
	case READ_DEFECT_DATA:
		rq->timeout = READ_DEFECT_DATA_TIMEOUT;
		rq->retries = 1;
		break;
	default:
		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
		break;
	}

	if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
		err = DRIVER_ERROR << 24;
		goto out;
	}

	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;

	blk_execute_rq(q, disk, rq, 0);

out:
	err = rq->errors & 0xff;	/* only 8 bit SCSI status */
	if (err) {
		if (rq->sense_len && rq->sense) {
			bytes = (OMAX_SB_LEN > rq->sense_len) ?
				rq->sense_len : OMAX_SB_LEN;
			if (copy_to_user(sic->data, rq->sense, bytes))
				err = -EFAULT;
		}
	} else {
		if (copy_to_user(sic->data, buffer, out_len))
			err = -EFAULT;
	}
	
error:
	kfree(buffer);
	blk_put_request(rq);
	return err;
}
Пример #26
0
int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
{
    request_queue_t *q;
    struct request *rq;
    int close = 0, err;

    q = bd_disk->queue;
    if (!q)
        return -ENXIO;

    if (blk_get_queue(q))
        return -ENXIO;

    switch (cmd) {
    /*
     * new sgv3 interface
     */
    case SG_GET_VERSION_NUM:
        err = sg_get_version(arg);
        break;
    case SCSI_IOCTL_GET_IDLUN:
        err = scsi_get_idlun(q, arg);
        break;
    case SCSI_IOCTL_GET_BUS_NUMBER:
        err = scsi_get_bus(q, arg);
        break;
    case SG_SET_TIMEOUT:
        err = sg_set_timeout(q, arg);
        break;
    case SG_GET_TIMEOUT:
        err = sg_get_timeout(q);
        break;
    case SG_GET_RESERVED_SIZE:
        err = sg_get_reserved_size(q, arg);
        break;
    case SG_SET_RESERVED_SIZE:
        err = sg_set_reserved_size(q, arg);
        break;
    case SG_EMULATED_HOST:
        err = sg_emulated_host(q, arg);
        break;
    case SG_IO: {
        struct sg_io_hdr hdr;

        err = -EFAULT;
        if (copy_from_user(&hdr, arg, sizeof(hdr)))
            break;
        err = sg_io(file, q, bd_disk, &hdr);
        if (err == -EFAULT)
            break;

        if (copy_to_user(arg, &hdr, sizeof(hdr)))
            err = -EFAULT;
        break;
    }
    case CDROM_SEND_PACKET: {
        struct cdrom_generic_command cgc;
        struct sg_io_hdr hdr;

        err = -EFAULT;
        if (copy_from_user(&cgc, arg, sizeof(cgc)))
            break;
        cgc.timeout = clock_t_to_jiffies(cgc.timeout);
        memset(&hdr, 0, sizeof(hdr));
        hdr.interface_id = 'S';
        hdr.cmd_len = sizeof(cgc.cmd);
        hdr.dxfer_len = cgc.buflen;
        err = 0;
        switch (cgc.data_direction) {
        case CGC_DATA_UNKNOWN:
            hdr.dxfer_direction = SG_DXFER_UNKNOWN;
            break;
        case CGC_DATA_WRITE:
            hdr.dxfer_direction = SG_DXFER_TO_DEV;
            break;
        case CGC_DATA_READ:
            hdr.dxfer_direction = SG_DXFER_FROM_DEV;
            break;
        case CGC_DATA_NONE:
            hdr.dxfer_direction = SG_DXFER_NONE;
            break;
        default:
            err = -EINVAL;
        }
        if (err)
            break;

        hdr.dxferp = cgc.buffer;
        hdr.sbp = cgc.sense;
        if (hdr.sbp)
            hdr.mx_sb_len = sizeof(struct request_sense);
        hdr.timeout = cgc.timeout;
        hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
        hdr.cmd_len = sizeof(cgc.cmd);

        err = sg_io(file, q, bd_disk, &hdr);
        if (err == -EFAULT)
            break;

        if (hdr.status)
            err = -EIO;

        cgc.stat = err;
        cgc.buflen = hdr.resid;
        if (copy_to_user(arg, &cgc, sizeof(cgc)))
            err = -EFAULT;

        break;
    }

    /*
     * old junk scsi send command ioctl
     */
    case SCSI_IOCTL_SEND_COMMAND:
        err = -EINVAL;
        if (!arg)
            break;

        err = sg_scsi_ioctl(file, q, bd_disk, arg);
        break;
    case CDROMCLOSETRAY:
        close = 1;
    case CDROMEJECT:
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
        rq->flags |= REQ_BLOCK_PC;
        rq->data = NULL;
        rq->data_len = 0;
        rq->timeout = BLK_DEFAULT_TIMEOUT;
        memset(rq->cmd, 0, sizeof(rq->cmd));
        rq->cmd[0] = GPCMD_START_STOP_UNIT;
        rq->cmd[4] = 0x02 + (close != 0);
        rq->cmd_len = 6;
        err = blk_execute_rq(q, bd_disk, rq);
        blk_put_request(rq);
        break;
    default:
        err = -ENOTTY;
    }

    blk_put_queue(q);
    return err;
}
Пример #27
0
static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
                         struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
{
    struct request *rq;
    int err, in_len, out_len, bytes, opcode, cmdlen;
    char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];

    /*
     * get in an out lengths, verify they don't exceed a page worth of data
     */
    if (get_user(in_len, &sic->inlen))
        return -EFAULT;
    if (get_user(out_len, &sic->outlen))
        return -EFAULT;
    if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
        return -EINVAL;
    if (get_user(opcode, sic->data))
        return -EFAULT;

    bytes = max(in_len, out_len);
    if (bytes) {
        buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER);
        if (!buffer)
            return -ENOMEM;

        memset(buffer, 0, bytes);
    }

    rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);

    cmdlen = COMMAND_SIZE(opcode);

    /*
     * get command and data to send to device, if any
     */
    err = -EFAULT;
    rq->cmd_len = cmdlen;
    if (copy_from_user(rq->cmd, sic->data, cmdlen))
        goto error;

    if (copy_from_user(buffer, sic->data + cmdlen, in_len))
        goto error;

    err = verify_command(file, rq->cmd);
    if (err)
        goto error;

    switch (opcode) {
    case SEND_DIAGNOSTIC:
    case FORMAT_UNIT:
        rq->timeout = FORMAT_UNIT_TIMEOUT;
        break;
    case START_STOP:
        rq->timeout = START_STOP_TIMEOUT;
        break;
    case MOVE_MEDIUM:
        rq->timeout = MOVE_MEDIUM_TIMEOUT;
        break;
    case READ_ELEMENT_STATUS:
        rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
        break;
    case READ_DEFECT_DATA:
        rq->timeout = READ_DEFECT_DATA_TIMEOUT;
        break;
    default:
        rq->timeout = BLK_DEFAULT_TIMEOUT;
        break;
    }

    memset(sense, 0, sizeof(sense));
    rq->sense = sense;
    rq->sense_len = 0;

    rq->data = buffer;
    rq->data_len = bytes;
    rq->flags |= REQ_BLOCK_PC;

    blk_execute_rq(q, bd_disk, rq);
    err = rq->errors & 0xff;	/* only 8 bit SCSI status */
    if (err) {
        if (rq->sense_len && rq->sense) {
            bytes = (OMAX_SB_LEN > rq->sense_len) ?
                    rq->sense_len : OMAX_SB_LEN;
            if (copy_to_user(sic->data, rq->sense, bytes))
                err = -EFAULT;
        }
    } else {
        if (copy_to_user(sic->data, buffer, out_len))
            err = -EFAULT;
    }

error:
    kfree(buffer);
    blk_put_request(rq);
    return err;
}
Пример #28
0
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	ssize_t ret = 0;
	int writing = 0;
	int at_head = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}
	if (hdr->flags & SG_FLAG_Q_AT_HEAD)
		at_head = 1;

	ret = -ENOMEM;
	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	blk_rq_set_block_pc(rq);

	if (hdr->cmd_len > BLK_MAX_CDB) {
		rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
		if (!rq->cmd)
			goto out_put_request;
	}

	ret = -EFAULT;
	if (blk_fill_sghdr_rq(q, rq, hdr, mode))
		goto out_free_cdb;

	ret = 0;
	if (hdr->iovec_count) {
		struct iov_iter i;
		struct iovec *iov = NULL;

		ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
					    0, NULL, &iov);
		if (ret < 0) {
			kfree(iov);
			goto out_free_cdb;
		}

		/* SG_IO howto says that the shorter of the two wins */
		iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
			      min_t(unsigned, ret, hdr->dxfer_len));

		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
		kfree(iov);
	} else if (hdr->dxfer_len)
Пример #29
0
static int sg_io(struct file *file, request_queue_t *q,
                 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
    unsigned long start_time;
    int reading, writing;
    struct request *rq;
    struct bio *bio;
    char sense[SCSI_SENSE_BUFFERSIZE];
    unsigned char cmd[BLK_MAX_CDB];

    if (hdr->interface_id != 'S')
        return -EINVAL;
    if (hdr->cmd_len > BLK_MAX_CDB)
        return -EINVAL;
    if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
        return -EFAULT;
    if (verify_command(file, cmd))
        return -EPERM;

    /*
     * we'll do that later
     */
    if (hdr->iovec_count)
        return -EOPNOTSUPP;

    if (hdr->dxfer_len > (q->max_sectors << 9))
        return -EIO;

    reading = writing = 0;
    if (hdr->dxfer_len) {
        switch (hdr->dxfer_direction) {
        default:
            return -EINVAL;
        case SG_DXFER_TO_FROM_DEV:
            reading = 1;
        /* fall through */
        case SG_DXFER_TO_DEV:
            writing = 1;
            break;
        case SG_DXFER_FROM_DEV:
            reading = 1;
            break;
        }

        rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp,
                             hdr->dxfer_len);

        if (IS_ERR(rq))
            return PTR_ERR(rq);
    } else
        rq = blk_get_request(q, READ, __GFP_WAIT);

    /*
     * fill in request structure
     */
    rq->cmd_len = hdr->cmd_len;
    memcpy(rq->cmd, cmd, hdr->cmd_len);
    if (sizeof(rq->cmd) != hdr->cmd_len)
        memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);

    memset(sense, 0, sizeof(sense));
    rq->sense = sense;
    rq->sense_len = 0;

    rq->flags |= REQ_BLOCK_PC;
    bio = rq->bio;

    /*
     * bounce this after holding a reference to the original bio, it's
     * needed for proper unmapping
     */
    if (rq->bio)
        blk_queue_bounce(q, &rq->bio);

    rq->timeout = (hdr->timeout * HZ) / 1000;
    if (!rq->timeout)
        rq->timeout = q->sg_timeout;
    if (!rq->timeout)
        rq->timeout = BLK_DEFAULT_TIMEOUT;

    start_time = jiffies;

    /* ignore return value. All information is passed back to caller
     * (if he doesn't check that is his problem).
     * N.B. a non-zero SCSI status is _not_ necessarily an error.
     */
    blk_execute_rq(q, bd_disk, rq);

    /* write to all output members */
    hdr->status = rq->errors;
    hdr->masked_status = (hdr->status >> 1) & 0x1f;
    hdr->msg_status = 0;
    hdr->host_status = 0;
    hdr->driver_status = 0;
    hdr->info = 0;
    if (hdr->masked_status || hdr->host_status || hdr->driver_status)
        hdr->info |= SG_INFO_CHECK;
    hdr->resid = rq->data_len;
    hdr->duration = ((jiffies - start_time) * 1000) / HZ;
    hdr->sb_len_wr = 0;

    if (rq->sense_len && hdr->sbp) {
        int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

        if (!copy_to_user(hdr->sbp, rq->sense, len))
            hdr->sb_len_wr = len;
    }

    if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
        return -EFAULT;

    /* may not have succeeded, but output values written to control
     * structure (struct sg_io_hdr).  */
    return 0;
}
Пример #30
0
static int sg_io(struct file *file, request_queue_t *q,
		struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
	unsigned long start_time, timeout;
	int writing = 0, ret = 0;
	struct request *rq;
	char sense[SCSI_SENSE_BUFFERSIZE];
	unsigned char cmd[BLK_MAX_CDB];
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;
	if (hdr->cmd_len > BLK_MAX_CDB)
		return -EINVAL;
	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
		return -EFAULT;
	if (verify_command(file, cmd))
		return -EPERM;

	if (hdr->dxfer_len > (q->max_hw_sectors << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}

	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
	if (!rq)
		return -ENOMEM;

	/*
	 * fill in request structure
	 */
	rq->cmd_len = hdr->cmd_len;
	memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
	memcpy(rq->cmd, cmd, hdr->cmd_len);

	memset(sense, 0, sizeof(sense));
	rq->sense = sense;
	rq->sense_len = 0;

	rq->cmd_type = REQ_TYPE_BLOCK_PC;

	timeout = msecs_to_jiffies(hdr->timeout);
	rq->timeout = (timeout < INT_MAX) ? timeout : INT_MAX;
	if (!rq->timeout)
		rq->timeout = q->sg_timeout;
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_TIMEOUT;

	if (hdr->iovec_count) {
		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
		struct sg_iovec *iov;

		iov = kmalloc(size, GFP_KERNEL);
		if (!iov) {
			ret = -ENOMEM;
			goto out;
		}

		if (copy_from_user(iov, hdr->dxferp, size)) {
			kfree(iov);
			ret = -EFAULT;
			goto out;
		}

		ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
					  hdr->dxfer_len);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);

	if (ret)
		goto out;

	bio = rq->bio;
	rq->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, 0);

	/* write to all output members */
	hdr->status = 0xff & rq->errors;
	hdr->masked_status = status_byte(rq->errors);
	hdr->msg_status = msg_byte(rq->errors);
	hdr->host_status = host_byte(rq->errors);
	hdr->driver_status = driver_byte(rq->errors);
	hdr->info = 0;
	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->resid = rq->data_len;
	hdr->duration = ((jiffies - start_time) * 1000) / HZ;
	hdr->sb_len_wr = 0;

	if (rq->sense_len && hdr->sbp) {
		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);

		if (!copy_to_user(hdr->sbp, rq->sense, len))
			hdr->sb_len_wr = len;
	}

	if (blk_rq_unmap_user(bio))
		ret = -EFAULT;

	/* may not have succeeded, but output values written to control
	 * structure (struct sg_io_hdr).  */
out:
	blk_put_request(rq);
	return ret;
}