Esempio n. 1
0
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{
	unsigned int nr_bytes = nr_sectors << 9;
	struct request *rq = HWGROUP(drive)->rq;
	int rc, error = 0;

	if (!nr_bytes) {
		if (blk_pc_request(rq))
			nr_bytes = rq->data_len;
		else
			nr_bytes = rq->hard_cur_sectors << 9;
	}

	/*
	 * if failfast is set on a request, override number of sectors
	 * and complete the whole request right now
	 */
	if (blk_noretry_request(rq) && uptodate <= 0)
		nr_bytes = rq->hard_nr_sectors << 9;

	if (blk_fs_request(rq) == 0 && uptodate <= 0 && rq->errors == 0)
		rq->errors = -EIO;

	if (uptodate <= 0)
		error = uptodate ? uptodate : -EIO;

	rc = ide_end_rq(drive, rq, error, nr_bytes);
	if (rc == 0)
		drive->hwif->hwgroup->rq = NULL;

	return rc;
}
Esempio n. 2
0
static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq,
				     u8 stat, u8 err)
{
	ide_hwif_t *hwif = drive->hwif;

	if ((stat & ATA_BUSY) ||
	    ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
		/* other bits are useless when BUSY */
		rq->errors |= ERROR_RESET;
	} else if (stat & ATA_ERR) {
		/* err has different meaning on cdrom and tape */
		if (err == ATA_ABORTED) {
			if ((drive->dev_flags & IDE_DFLAG_LBA) &&
			    /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
			    hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
				return ide_stopped;
		} else if ((err & BAD_CRC) == BAD_CRC) {
			/* UDMA crc error, just retry the operation */
			drive->crc_count++;
		} else if (err & (ATA_BBK | ATA_UNC)) {
			/* retries won't help these */
			rq->errors = ERROR_MAX;
		} else if (err & ATA_TRK0NF) {
			/* help it find track zero */
			rq->errors |= ERROR_RECAL;
		}
	}

	if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
	    (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
		int nsect = drive->mult_count ? drive->mult_count : 1;

		ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
	}

	if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
		ide_kill_rq(drive, rq);
		return ide_stopped;
	}

	if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
		rq->errors |= ERROR_RESET;

	if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
		++rq->errors;
		return ide_do_reset(drive);
	}

	if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
		drive->special_flags |= IDE_SFLAG_RECALIBRATE;

	++rq->errors;

	return ide_stopped;
}
Esempio n. 3
0
int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = hwif->rq;
	int rc;

	/*
	 * if failfast is set on a request, override number of sectors
	 * and complete the whole request right now
	 */
	if (blk_noretry_request(rq) && error <= 0)
		nr_bytes = blk_rq_sectors(rq) << 9;

	rc = ide_end_rq(drive, rq, error, nr_bytes);
	if (rc == 0)
		hwif->rq = NULL;

	return rc;
}
Esempio n. 4
0
int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
					  int nr_sectors)
{
	int ret = 1;

	BUG_ON(!(rq->flags & REQ_STARTED));

	/*
	 * if failfast is set on a request, override number of sectors and
	 * complete the whole request right now
	 */
	if (blk_noretry_request(rq) && end_io_error(uptodate))
		nr_sectors = rq->hard_nr_sectors;

	if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
 		rq->errors = -EIO;
	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
		drive->state = 0;
		HWGROUP(drive)->hwif->ide_dma_on(drive);
	}

	if (!end_that_request_first(rq, uptodate, nr_sectors)) {
		add_disk_randomness(rq->rq_disk);

		if (blk_rq_tagged(rq))
			blk_queue_end_tag(drive->queue, rq);

		blkdev_dequeue_request(rq);
		HWGROUP(drive)->rq = NULL;
		end_that_request_last(rq);
		ret = 0;
	}
	return ret;
}
Esempio n. 5
0
static int __ide_end_request(ide_drive_t *drive, struct request *rq,
			     int uptodate, unsigned int nr_bytes, int dequeue)
{
	int ret = 1;
	int error = 0;

	if (uptodate <= 0)
		error = uptodate ? uptodate : -EIO;

	/*
	 * if failfast is set on a request, override number of sectors and
	 * complete the whole request right now
	 */
	if (blk_noretry_request(rq) && error)
		nr_bytes = rq->hard_nr_sectors << 9;

	if (!blk_fs_request(rq) && error && !rq->errors)
		rq->errors = -EIO;

	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
	    drive->retry_pio <= 3) {
		drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
		ide_dma_on(drive);
	}

	if (!blk_end_request(rq, error, nr_bytes))
		ret = 0;

	if (ret == 0 && dequeue)
		drive->hwif->rq = NULL;

	return ret;
}
Esempio n. 6
0
/*
 * Returns:
 * 0: if the request should be continued.
 * 1: if the request will be going through error recovery.
 * 2: if the request should be ended.
 */
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = hwif->rq;
	int stat, err, sense_key;

	/* check for errors */
	stat = hwif->tp_ops->read_status(hwif);

	if (stat_ret)
		*stat_ret = stat;

	if (OK_STAT(stat, good_stat, BAD_R_STAT))
		return 0;

	/* get the IDE error register */
	err = ide_read_error(drive);
	sense_key = err >> 4;

	ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, "
				  "rq->cmd_type: 0x%x, err: 0x%x",
				  stat, good_stat, rq->cmd[0], rq->cmd_type,
				  err);

	if (blk_sense_request(rq)) {
		/*
		 * We got an error trying to get sense info from the drive
		 * (probably while trying to recover from a former error).
		 * Just give up.
		 */
		rq->cmd_flags |= REQ_FAILED;
		return 2;
	} else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) {
		/* All other functions, except for READ. */

		/*
		 * if we have an error, pass back CHECK_CONDITION as the
		 * scsi status byte
		 */
		if (blk_pc_request(rq) && !rq->errors)
			rq->errors = SAM_STAT_CHECK_CONDITION;

		/* check for tray open */
		if (sense_key == NOT_READY) {
			cdrom_saw_media_change(drive);
		} else if (sense_key == UNIT_ATTENTION) {
			/* check for media change */
			cdrom_saw_media_change(drive);
			return 0;
		} else if (sense_key == ILLEGAL_REQUEST &&
			   rq->cmd[0] == GPCMD_START_STOP_UNIT) {
			/*
			 * Don't print error message for this condition--
			 * SFF8090i indicates that 5/24/00 is the correct
			 * response to a request to close the tray if the
			 * drive doesn't have that capability.
			 * cdrom_log_sense() knows this!
			 */
		} else if (!(rq->cmd_flags & REQ_QUIET)) {
			/* otherwise, print an error */
			ide_dump_status(drive, "packet command error", stat);
		}

		rq->cmd_flags |= REQ_FAILED;

		/*
		 * instead of playing games with moving completions around,
		 * remove failed request completely and end it when the
		 * request sense has completed
		 */
		goto end_request;

	} else if (blk_fs_request(rq)) {
		int do_end_request = 0;

		/* handle errors from READ and WRITE requests */

		if (blk_noretry_request(rq))
			do_end_request = 1;

		if (sense_key == NOT_READY) {
			/* tray open */
			if (rq_data_dir(rq) == READ) {
				cdrom_saw_media_change(drive);

				/* fail the request */
				printk(KERN_ERR PFX "%s: tray open\n",
						drive->name);
				do_end_request = 1;
			} else {
				struct cdrom_info *info = drive->driver_data;

				/*
				 * Allow the drive 5 seconds to recover, some
				 * devices will return this error while flushing
				 * data from cache.
				 */
				if (!rq->errors)
					info->write_timeout = jiffies +
							ATAPI_WAIT_WRITE_BUSY;
				rq->errors = 1;
				if (time_after(jiffies, info->write_timeout))
					do_end_request = 1;
				else {
					struct request_queue *q = drive->queue;
					unsigned long flags;

					/*
					 * take a breather relying on the unplug
					 * timer to kick us again
					 */
					spin_lock_irqsave(q->queue_lock, flags);
					blk_plug_device(q);
					spin_unlock_irqrestore(q->queue_lock, flags);

					return 1;
				}
			}
		} else if (sense_key == UNIT_ATTENTION) {
			/* media change */
			cdrom_saw_media_change(drive);

			/*
			 * Arrange to retry the request but be sure to give up
			 * if we've retried too many times.
			 */
			if (++rq->errors > ERROR_MAX)
				do_end_request = 1;
		} else if (sense_key == ILLEGAL_REQUEST ||
			   sense_key == DATA_PROTECT) {
			/*
			 * No point in retrying after an illegal request or data
			 * protect error.
			 */
			ide_dump_status(drive, "command error", stat);
			do_end_request = 1;
		} else if (sense_key == MEDIUM_ERROR) {
			/*
			 * No point in re-trying a zillion times on a bad
			 * sector. If we got here the error is not correctable.
			 */
			ide_dump_status(drive, "media error (bad sector)",
					stat);
			do_end_request = 1;
		} else if (sense_key == BLANK_CHECK) {
			/* disk appears blank ?? */
			ide_dump_status(drive, "media error (blank)", stat);
			do_end_request = 1;
		} else if ((err & ~ATA_ABORTED) != 0) {
			/* go to the default handler for other errors */
			ide_error(drive, "cdrom_decode_status", stat);
			return 1;
		} else if ((++rq->errors > ERROR_MAX)) {
			/* we've racked up too many retries, abort */
			do_end_request = 1;
		}

		/*
		 * End a request through request sense analysis when we have
		 * sense data. We need this in order to perform end of media
		 * processing.
		 */
		if (do_end_request)
			goto end_request;

		/*
		 * If we got a CHECK_CONDITION status, queue
		 * a request sense command.
		 */
		if (stat & ATA_ERR)
			cdrom_queue_request_sense(drive, NULL, NULL);
		return 1;
	} else {
		blk_dump_rq_flags(rq, PFX "bad rq");
		return 2;
	}

end_request:
	if (stat & ATA_ERR) {
		struct request_queue *q = drive->queue;
		unsigned long flags;

		spin_lock_irqsave(q->queue_lock, flags);
		blkdev_dequeue_request(rq);
		spin_unlock_irqrestore(q->queue_lock, flags);

		hwif->rq = NULL;

		cdrom_queue_request_sense(drive, rq->sense, rq);
		return 1;
	} else
		return 2;
}