Пример #1
0
ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
{
	int err, (*setfunc)(ide_drive_t *, int) = ide_req(rq)->special;

	err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
	if (err)
		scsi_req(rq)->result = err;
	ide_complete_rq(drive, 0, blk_rq_bytes(rq));
	return ide_stopped;
}
Пример #2
0
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
				 struct bio *bio)
{
	struct scsi_request *req = scsi_req(rq);
	int r, ret = 0;

	/*
	 * fill in all the output members
	 */
	hdr->status = req->result & 0xff;
	hdr->masked_status = status_byte(req->result);
	hdr->msg_status = msg_byte(req->result);
	hdr->host_status = host_byte(req->result);
	hdr->driver_status = driver_byte(req->result);
	hdr->info = 0;
	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
		hdr->info |= SG_INFO_CHECK;
	hdr->resid = req->resid_len;
	hdr->sb_len_wr = 0;

	if (req->sense_len && hdr->sbp) {
		int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);

		if (!copy_to_user(hdr->sbp, req->sense, len))
			hdr->sb_len_wr = len;
		else
			ret = -EFAULT;
	}

	r = blk_rq_unmap_user(bio);
	if (!ret)
		ret = r;

	return ret;
}
Пример #3
0
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
		struct request *rq)
{
	struct ide_cmd *cmd = rq->special;

	if (cmd) {
		if (cmd->protocol == ATA_PROT_PIO) {
			ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
			ide_map_sg(drive, cmd);
		}

		return do_rw_taskfile(drive, cmd);
	}

 	/*
 	 * NULL is actually a valid way of waiting for
 	 * all current requests to be flushed from the queue.
 	 */
#ifdef DEBUG
 	printk("%s: DRIVE_CMD (null)\n", drive->name);
#endif
	scsi_req(rq)->result = 0;
	ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));

 	return ide_stopped;
}
Пример #4
0
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
			     struct sg_io_hdr *hdr, fmode_t mode)
{
	struct scsi_request *req = scsi_req(rq);

	if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
		return -EFAULT;
	if (blk_verify_command(req->cmd, mode))
		return -EPERM;

	/*
	 * fill in request structure
	 */
	req->cmd_len = hdr->cmd_len;

	rq->timeout = msecs_to_jiffies(hdr->timeout);
	if (!rq->timeout)
		rq->timeout = q->sg_timeout;
	if (!rq->timeout)
		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
	if (rq->timeout < BLK_MIN_SG_TIMEOUT)
		rq->timeout = BLK_MIN_SG_TIMEOUT;

	return 0;
}
Пример #5
0
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
	struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
	char buf[80];

	__scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
	seq_printf(m, ", .cmd=%s", buf);
}
Пример #6
0
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
	u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
	u8 media = drive->media;

	drive->failed_pc = NULL;

	if ((media == ide_floppy || media == ide_tape) && drv_req) {
		scsi_req(rq)->result = 0;
	} else {
		if (media == ide_tape)
			scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL;
		else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
			scsi_req(rq)->result = -EIO;
	}

	ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
}
Пример #7
0
/* Send basic block requests */
static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
			      int cmd, int data)
{
	struct request *rq;
	int err;

	rq = blk_get_request(q, REQ_OP_SCSI_OUT, 0);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
	scsi_req(rq)->cmd[0] = cmd;
	scsi_req(rq)->cmd[4] = data;
	scsi_req(rq)->cmd_len = 6;
	blk_execute_rq(q, bd_disk, rq, 0);
	err = scsi_req(rq)->result ? -EIO : 0;
	blk_put_request(rq);

	return err;
}
Пример #8
0
static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
{
	struct request *rq = drive->hwif->rq;

	if (rq && ata_misc_request(rq) &&
	    scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
		if (err <= 0 && rq->errors == 0)
			rq->errors = -EIO;
		ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
	}
}
Пример #9
0
int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
		       int arg)
{
	struct request_queue *q = drive->queue;
	struct request *rq;
	int ret = 0;

	if (!(setting->flags & DS_SYNC))
		return setting->set(drive, arg);

	rq = blk_get_request(q, REQ_OP_DRV_IN, 0);
	ide_req(rq)->type = ATA_PRIV_MISC;
	scsi_req(rq)->cmd_len = 5;
	scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
	*(int *)&scsi_req(rq)->cmd[1] = arg;
	ide_req(rq)->special = setting->set;

	blk_execute_rq(q, NULL, rq, 0);
	ret = scsi_req(rq)->result;
	blk_put_request(rq);

	return ret;
}
Пример #10
0
static int ide_pm_execute_rq(struct request *rq)
{
	struct request_queue *q = rq->q;
	DECLARE_COMPLETION_ONSTACK(wait);

	rq->end_io_data = &wait;
	rq->end_io = ide_end_sync_rq;

	spin_lock_irq(q->queue_lock);
	if (unlikely(blk_queue_dying(q))) {
		rq->rq_flags |= RQF_QUIET;
		scsi_req(rq)->result = -ENXIO;
		__blk_end_request_all(rq, BLK_STS_OK);
		spin_unlock_irq(q->queue_lock);
		return -ENXIO;
	}
	__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
	__blk_run_queue_uncond(q);
	spin_unlock_irq(q->queue_lock);

	wait_for_completion_io(&wait);

	return scsi_req(rq)->result ? -EIO : 0;
}
Пример #11
0
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
{
	u8 cmd = scsi_req(rq)->cmd[0];

	switch (cmd) {
	case REQ_PARK_HEADS:
	case REQ_UNPARK_HEADS:
		return ide_do_park_unpark(drive, rq);
	case REQ_DEVSET_EXEC:
		return ide_do_devset(drive, rq);
	case REQ_DRIVE_RESET:
		return ide_do_reset(drive);
	default:
		BUG();
	}
}
Пример #12
0
static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
			      unsigned int write)
{
	unsigned int nr_bytes;

	u8 saved_io_32bit = drive->io_32bit;

	if (cmd->tf_flags & IDE_TFLAG_FS)
		scsi_req(cmd->rq)->result = 0;

	if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
		drive->io_32bit = 0;

	touch_softlockup_watchdog();

	if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
		nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9);
	else
Пример #13
0
int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
	ide_drive_t *drive = to_ide_device(dev);
	ide_drive_t *pair = ide_get_pair_dev(drive);
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq;
	struct ide_pm_state rqpm;
	int ret;

	if (ide_port_acpi(hwif)) {
		/* call ACPI _GTM only once */
		if ((drive->dn & 1) == 0 || pair == NULL)
			ide_acpi_get_timing(hwif);
	}

	memset(&rqpm, 0, sizeof(rqpm));
	rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
	ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
	rq->special = &rqpm;
	rqpm.pm_step = IDE_PM_START_SUSPEND;
	if (mesg.event == PM_EVENT_PRETHAW)
		mesg.event = PM_EVENT_FREEZE;
	rqpm.pm_state = mesg.event;

	blk_execute_rq(drive->queue, NULL, rq, 0);
	ret = scsi_req(rq)->result ? -EIO : 0;
	blk_put_request(rq);

	if (ret == 0 && ide_port_acpi(hwif)) {
		/* call ACPI _PS3 only after both devices are suspended */
		if ((drive->dn & 1) || pair == NULL)
			ide_acpi_set_state(hwif, 0);
	}

	return ret;
}
Пример #14
0
/**
 * sg_scsi_ioctl  --  handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
 * @q:		request queue to send scsi commands down
 * @disk:	gendisk to operate on (option)
 * @mode:	mode used to open the file through which the ioctl has been
 *		submitted
 * @sic:	userspace structure describing the command to perform
 *
 * Send down the scsi command described by @sic to the device below
 * the request queue @q.  If @file is non-NULL it's used to perform
 * fine-grained permission checks that allow users to send down
 * non-destructive SCSI commands.  If the caller has a struct gendisk
 * available it should be passed in as @disk to allow the low level
 * driver to use the information contained in it.  A non-NULL @disk
 * is only allowed if the caller knows that the low level driver doesn't
 * need it (e.g. in the scsi subsystem).
 *
 * Notes:
 *   -  This interface is deprecated - users should use the SG_IO
 *      interface instead, as this is a more flexible approach to
 *      performing SCSI commands on a device.
 *   -  The SCSI command length is determined by examining the 1st byte
 *      of the given command. There is no way to override this.
 *   -  Data transfers are limited to PAGE_SIZE
 *   -  The length (x + y) must be at least OMAX_SB_LEN bytes long to
 *      accommodate the sense buffer when an error occurs.
 *      The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
 *      old code will not be surprised.
 *   -  If a Unix error occurs (e.g. ENOMEM) then the user will receive
 *      a negative return and the Unix error code in 'errno'.
 *      If the SCSI command succeeds then 0 is returned.
 *      Positive numbers returned are the compacted SCSI error codes (4
 *      bytes in one int) where the lowest byte is the SCSI status.
 */
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
		struct scsi_ioctl_command __user *sic)
{
	enum { OMAX_SB_LEN = 16 };	/* For backward compatibility */
	struct request *rq;
	struct scsi_request *req;
	int err;
	unsigned int in_len, out_len, bytes, opcode, cmdlen;
	char *buffer = NULL;

	if (!sic)
		return -EINVAL;

	/*
	 * get in an out lengths, verify they don't exceed a page worth of data
	 */
	if (get_user(in_len, &sic->inlen))
		return -EFAULT;
	if (get_user(out_len, &sic->outlen))
		return -EFAULT;
	if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
		return -EINVAL;
	if (get_user(opcode, sic->data))
		return -EFAULT;

	bytes = max(in_len, out_len);
	if (bytes) {
		buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
		if (!buffer)
			return -ENOMEM;

	}

	rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
	if (IS_ERR(rq)) {
		err = PTR_ERR(rq);
		goto error_free_buffer;
	}
	req = scsi_req(rq);

	cmdlen = COMMAND_SIZE(opcode);

	/*
	 * get command and data to send to device, if any
	 */
	err = -EFAULT;
	req->cmd_len = cmdlen;
	if (copy_from_user(req->cmd, sic->data, cmdlen))
		goto error;

	if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
		goto error;

	err = blk_verify_command(req->cmd, mode);
	if (err)
		goto error;

	/* default.  possible overriden later */
	req->retries = 5;

	switch (opcode) {
	case SEND_DIAGNOSTIC:
	case FORMAT_UNIT:
		rq->timeout = FORMAT_UNIT_TIMEOUT;
		req->retries = 1;
		break;
	case START_STOP:
		rq->timeout = START_STOP_TIMEOUT;
		break;
	case MOVE_MEDIUM:
		rq->timeout = MOVE_MEDIUM_TIMEOUT;
		break;
	case READ_ELEMENT_STATUS:
		rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
		break;
	case READ_DEFECT_DATA:
		rq->timeout = READ_DEFECT_DATA_TIMEOUT;
		req->retries = 1;
		break;
	default:
		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
		break;
	}

	if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) {
		err = DRIVER_ERROR << 24;
		goto error;
	}

	blk_execute_rq(q, disk, rq, 0);

	err = req->result & 0xff;	/* only 8 bit SCSI status */
	if (err) {
		if (req->sense_len && req->sense) {
			bytes = (OMAX_SB_LEN > req->sense_len) ?
				req->sense_len : OMAX_SB_LEN;
			if (copy_to_user(sic->data, req->sense, bytes))
				err = -EFAULT;
		}
	} else {
		if (copy_to_user(sic->data, buffer, out_len))
			err = -EFAULT;
	}
	
error:
	blk_put_request(rq);

error_free_buffer:
	kfree(buffer);

	return err;
}
Пример #15
0
static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
		struct sg_io_hdr *hdr, fmode_t mode)
{
	unsigned long start_time;
	ssize_t ret = 0;
	int writing = 0;
	int at_head = 0;
	struct request *rq;
	struct scsi_request *req;
	struct bio *bio;

	if (hdr->interface_id != 'S')
		return -EINVAL;

	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
		return -EIO;

	if (hdr->dxfer_len)
		switch (hdr->dxfer_direction) {
		default:
			return -EINVAL;
		case SG_DXFER_TO_DEV:
			writing = 1;
			break;
		case SG_DXFER_TO_FROM_DEV:
		case SG_DXFER_FROM_DEV:
			break;
		}
	if (hdr->flags & SG_FLAG_Q_AT_HEAD)
		at_head = 1;

	ret = -ENOMEM;
	rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
	if (IS_ERR(rq))
		return PTR_ERR(rq);
	req = scsi_req(rq);

	if (hdr->cmd_len > BLK_MAX_CDB) {
		req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
		if (!req->cmd)
			goto out_put_request;
	}

	ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
	if (ret < 0)
		goto out_free_cdb;

	ret = 0;
	if (hdr->iovec_count) {
		struct iov_iter i;
		struct iovec *iov = NULL;

		ret = import_iovec(rq_data_dir(rq),
				   hdr->dxferp, hdr->iovec_count,
				   0, &iov, &i);
		if (ret < 0)
			goto out_free_cdb;

		/* SG_IO howto says that the shorter of the two wins */
		iov_iter_truncate(&i, hdr->dxfer_len);

		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
		kfree(iov);
	} else if (hdr->dxfer_len)
		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
				      GFP_KERNEL);

	if (ret)
		goto out_free_cdb;

	bio = rq->bio;
	req->retries = 0;

	start_time = jiffies;

	/* ignore return value. All information is passed back to caller
	 * (if he doesn't check that is his problem).
	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
	 */
	blk_execute_rq(q, bd_disk, rq, at_head);

	hdr->duration = jiffies_to_msecs(jiffies - start_time);

	ret = blk_complete_sghdr_rq(rq, hdr, bio);

out_free_cdb:
	scsi_req_free_cmd(req);
out_put_request:
	blk_put_request(rq);
	return ret;
}
Пример #16
0
/*
 * Issue a new request to a device.
 */
void do_ide_request(struct request_queue *q)
{
	ide_drive_t	*drive = q->queuedata;
	ide_hwif_t	*hwif = drive->hwif;
	struct ide_host *host = hwif->host;
	struct request	*rq = NULL;
	ide_startstop_t	startstop;
	unsigned long queue_run_ms = 3; /* old plug delay */

	spin_unlock_irq(q->queue_lock);

	/* HLD do_request() callback might sleep, make sure it's okay */
	might_sleep();

	if (ide_lock_host(host, hwif))
		goto plug_device_2;

	spin_lock_irq(&hwif->lock);

	if (!ide_lock_port(hwif)) {
		ide_hwif_t *prev_port;

		WARN_ON_ONCE(hwif->rq);
repeat:
		prev_port = hwif->host->cur_port;
		if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
		    time_after(drive->sleep, jiffies)) {
			unsigned long left = jiffies - drive->sleep;

			queue_run_ms = jiffies_to_msecs(left + 1);
			ide_unlock_port(hwif);
			goto plug_device;
		}

		if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
		    hwif != prev_port) {
			ide_drive_t *cur_dev =
				prev_port ? prev_port->cur_dev : NULL;

			/*
			 * set nIEN for previous port, drives in the
			 * quirk list may not like intr setups/cleanups
			 */
			if (cur_dev &&
			    (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
				prev_port->tp_ops->write_devctl(prev_port,
								ATA_NIEN |
								ATA_DEVCTL_OBS);

			hwif->host->cur_port = hwif;
		}
		hwif->cur_dev = drive;
		drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);

		spin_unlock_irq(&hwif->lock);
		spin_lock_irq(q->queue_lock);
		/*
		 * we know that the queue isn't empty, but this can happen
		 * if the q->prep_rq_fn() decides to kill a request
		 */
		if (!rq)
			rq = blk_fetch_request(drive->queue);

		spin_unlock_irq(q->queue_lock);
		spin_lock_irq(&hwif->lock);

		if (!rq) {
			ide_unlock_port(hwif);
			goto out;
		}

		/*
		 * Sanity: don't accept a request that isn't a PM request
		 * if we are currently power managed. This is very important as
		 * blk_stop_queue() doesn't prevent the blk_fetch_request()
		 * above to return us whatever is in the queue. Since we call
		 * ide_do_request() ourselves, we end up taking requests while
		 * the queue is blocked...
		 * 
		 * We let requests forced at head of queue with ide-preempt
		 * though. I hope that doesn't happen too much, hopefully not
		 * unless the subdriver triggers such a thing in its own PM
		 * state machine.
		 */
		if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
		    ata_pm_request(rq) == 0 &&
		    (rq->rq_flags & RQF_PREEMPT) == 0) {
			/* there should be no pending command at this point */
			ide_unlock_port(hwif);
			goto plug_device;
		}

		scsi_req(rq)->resid_len = blk_rq_bytes(rq);
		hwif->rq = rq;

		spin_unlock_irq(&hwif->lock);
		startstop = start_request(drive, rq);
		spin_lock_irq(&hwif->lock);

		if (startstop == ide_stopped) {
			rq = hwif->rq;
			hwif->rq = NULL;
			goto repeat;
		}
	} else
		goto plug_device;
out:
	spin_unlock_irq(&hwif->lock);
	if (rq == NULL)
		ide_unlock_host(host);
	spin_lock_irq(q->queue_lock);
	return;

plug_device:
	spin_unlock_irq(&hwif->lock);
	ide_unlock_host(host);
plug_device_2:
	spin_lock_irq(q->queue_lock);
	__ide_requeue_and_plug(q, rq);
}