Example #1
0
/*
 * Returns:
 * 0: if the request should be continued.
 * 1: if the request will be going through error recovery.
 * 2: if the request should be ended.
 */
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = hwif->rq;
	int stat, err, sense_key;

	/* check for errors */
	stat = hwif->tp_ops->read_status(hwif);

	if (stat_ret)
		*stat_ret = stat;

	if (OK_STAT(stat, good_stat, BAD_R_STAT))
		return 0;

	/* get the IDE error register */
	err = ide_read_error(drive);
	sense_key = err >> 4;

	ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, "
				  "rq->cmd_type: 0x%x, err: 0x%x",
				  stat, good_stat, rq->cmd[0], rq->cmd_type,
				  err);

	if (blk_sense_request(rq)) {
		/*
		 * We got an error trying to get sense info from the drive
		 * (probably while trying to recover from a former error).
		 * Just give up.
		 */
		rq->cmd_flags |= REQ_FAILED;
		return 2;
	} else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) {
		/* All other functions, except for READ. */

		/*
		 * if we have an error, pass back CHECK_CONDITION as the
		 * scsi status byte
		 */
		if (blk_pc_request(rq) && !rq->errors)
			rq->errors = SAM_STAT_CHECK_CONDITION;

		/* check for tray open */
		if (sense_key == NOT_READY) {
			cdrom_saw_media_change(drive);
		} else if (sense_key == UNIT_ATTENTION) {
			/* check for media change */
			cdrom_saw_media_change(drive);
			return 0;
		} else if (sense_key == ILLEGAL_REQUEST &&
			   rq->cmd[0] == GPCMD_START_STOP_UNIT) {
			/*
			 * Don't print error message for this condition--
			 * SFF8090i indicates that 5/24/00 is the correct
			 * response to a request to close the tray if the
			 * drive doesn't have that capability.
			 * cdrom_log_sense() knows this!
			 */
		} else if (!(rq->cmd_flags & REQ_QUIET)) {
			/* otherwise, print an error */
			ide_dump_status(drive, "packet command error", stat);
		}

		rq->cmd_flags |= REQ_FAILED;

		/*
		 * instead of playing games with moving completions around,
		 * remove failed request completely and end it when the
		 * request sense has completed
		 */
		goto end_request;

	} else if (blk_fs_request(rq)) {
		int do_end_request = 0;

		/* handle errors from READ and WRITE requests */

		if (blk_noretry_request(rq))
			do_end_request = 1;

		if (sense_key == NOT_READY) {
			/* tray open */
			if (rq_data_dir(rq) == READ) {
				cdrom_saw_media_change(drive);

				/* fail the request */
				printk(KERN_ERR PFX "%s: tray open\n",
						drive->name);
				do_end_request = 1;
			} else {
				struct cdrom_info *info = drive->driver_data;

				/*
				 * Allow the drive 5 seconds to recover, some
				 * devices will return this error while flushing
				 * data from cache.
				 */
				if (!rq->errors)
					info->write_timeout = jiffies +
							ATAPI_WAIT_WRITE_BUSY;
				rq->errors = 1;
				if (time_after(jiffies, info->write_timeout))
					do_end_request = 1;
				else {
					struct request_queue *q = drive->queue;
					unsigned long flags;

					/*
					 * take a breather relying on the unplug
					 * timer to kick us again
					 */
					spin_lock_irqsave(q->queue_lock, flags);
					blk_plug_device(q);
					spin_unlock_irqrestore(q->queue_lock, flags);

					return 1;
				}
			}
		} else if (sense_key == UNIT_ATTENTION) {
			/* media change */
			cdrom_saw_media_change(drive);

			/*
			 * Arrange to retry the request but be sure to give up
			 * if we've retried too many times.
			 */
			if (++rq->errors > ERROR_MAX)
				do_end_request = 1;
		} else if (sense_key == ILLEGAL_REQUEST ||
			   sense_key == DATA_PROTECT) {
			/*
			 * No point in retrying after an illegal request or data
			 * protect error.
			 */
			ide_dump_status(drive, "command error", stat);
			do_end_request = 1;
		} else if (sense_key == MEDIUM_ERROR) {
			/*
			 * No point in re-trying a zillion times on a bad
			 * sector. If we got here the error is not correctable.
			 */
			ide_dump_status(drive, "media error (bad sector)",
					stat);
			do_end_request = 1;
		} else if (sense_key == BLANK_CHECK) {
			/* disk appears blank ?? */
			ide_dump_status(drive, "media error (blank)", stat);
			do_end_request = 1;
		} else if ((err & ~ATA_ABORTED) != 0) {
			/* go to the default handler for other errors */
			ide_error(drive, "cdrom_decode_status", stat);
			return 1;
		} else if ((++rq->errors > ERROR_MAX)) {
			/* we've racked up too many retries, abort */
			do_end_request = 1;
		}

		/*
		 * End a request through request sense analysis when we have
		 * sense data. We need this in order to perform end of media
		 * processing.
		 */
		if (do_end_request)
			goto end_request;

		/*
		 * If we got a CHECK_CONDITION status, queue
		 * a request sense command.
		 */
		if (stat & ATA_ERR)
			cdrom_queue_request_sense(drive, NULL, NULL);
		return 1;
	} else {
		blk_dump_rq_flags(rq, PFX "bad rq");
		return 2;
	}

end_request:
	if (stat & ATA_ERR) {
		struct request_queue *q = drive->queue;
		unsigned long flags;

		spin_lock_irqsave(q->queue_lock, flags);
		blkdev_dequeue_request(rq);
		spin_unlock_irqrestore(q->queue_lock, flags);

		hwif->rq = NULL;

		cdrom_queue_request_sense(drive, rq->sense, rq);
		return 1;
	} else
		return 2;
}
Example #2
0
int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
{
	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
	    (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
#ifndef CONFIG_IDEDMA_IVB
		if ((drive->id->hw_config & 0x6000) == 0) {
#else /* !CONFIG_IDEDMA_IVB */
		if (((drive->id->hw_config & 0x2000) == 0) ||
		    ((drive->id->hw_config & 0x4000) == 0)) {
#endif /* CONFIG_IDEDMA_IVB */
			printk("%s: Speed warnings UDMA 3/4/5 is not "
				"functional.\n", drive->name);
			return 1;
		}
		if (!HWIF(drive)->udma_four) {
			printk("%s: Speed warnings UDMA 3/4/5 is not "
				"functional.\n",
				HWIF(drive)->name);
			return 1;
		}
	}
	return 0;
}

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, ide_task_t *args)
{
	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
	    (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
	    (drive->id->dma_ultra ||
	     drive->id->dma_mword ||
	     drive->id->dma_1word))
		return 1;

	return 0;
}

#ifdef CONFIG_BLK_DEV_IDEDMA
static u8 ide_auto_reduce_xfer (ide_drive_t *drive)
{
	if (!drive->crc_count)
		return drive->current_speed;
	drive->crc_count = 0;

	switch(drive->current_speed) {
		case XFER_UDMA_7:	return XFER_UDMA_6;
		case XFER_UDMA_6:	return XFER_UDMA_5;
		case XFER_UDMA_5:	return XFER_UDMA_4;
		case XFER_UDMA_4:	return XFER_UDMA_3;
		case XFER_UDMA_3:	return XFER_UDMA_2;
		case XFER_UDMA_2:	return XFER_UDMA_1;
		case XFER_UDMA_1:	return XFER_UDMA_0;
			/*
			 * OOPS we do not goto non Ultra DMA modes
			 * without iCRC's available we force
			 * the system to PIO and make the user
			 * invoke the ATA-1 ATA-2 DMA modes.
			 */
		case XFER_UDMA_0:
		default:		return XFER_PIO_4;
	}
}
#endif /* CONFIG_BLK_DEV_IDEDMA */

/*
 * Update the 
 */
int ide_driveid_update (ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	struct hd_driveid *id;
#if 0
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id)
		return 0;

	taskfile_lib_get_identify(drive, (char *)&id);

	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}
	return 1;
#else
	/*
	 * Re-read drive->id for possible DMA mode
	 * change (copied from ide-probe.c)
	 */
	unsigned long timeout, flags;

	SELECT_MASK(drive, 1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
	msleep(50);
	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
	timeout = jiffies + WAIT_WORSTCASE;
	do {
		if (time_after(jiffies, timeout)) {
			SELECT_MASK(drive, 0);
			return 0;	/* drive timed-out */
		}
		msleep(50);	/* give drive a breather */
	} while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
	msleep(50);	/* wait for IRQ and DRQ_STAT */
	if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
		SELECT_MASK(drive, 0);
		printk("%s: CHECK for good STATUS\n", drive->name);
		return 0;
	}
	local_irq_save(flags);
	SELECT_MASK(drive, 0);
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id) {
		local_irq_restore(flags);
		return 0;
	}
	ata_input_data(drive, id, SECTOR_WORDS);
	(void) hwif->INB(IDE_STATUS_REG);	/* clear drive IRQ */
	local_irq_enable();
	local_irq_restore(flags);
	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}

	return 1;
#endif
}
Example #3
0
/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif	= HWIF(drive);
	int	i, error	= 1;
	u8 stat;

//	while (HWGROUP(drive)->busy)
//		msleep(50);

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (hwif->ide_dma_check)	 /* check if host supports DMA */
		hwif->ide_dma_host_off(drive);
#endif

	/*
	 * Don't use ide_wait_cmd here - it will
	 * attempt to set_geometry and recalibrate,
	 * but for some reason these don't work at
	 * this point (lost interrupt).
	 */
        /*
         * Select the drive, and issue the SETFEATURES command
         */
	disable_irq_nosync(hwif->irq);
	
	/*
	 *	FIXME: we race against the running IRQ here if
	 *	this is called from non IRQ context. If we use
	 *	disable_irq() we hang on the error path. Work
	 *	is needed.
	 */
	 
	udelay(1);
	SELECT_DRIVE(drive);
	SELECT_MASK(drive, 0);
	udelay(1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
	hwif->OUTB(speed, IDE_NSECTOR_REG);
	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
	hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
	udelay(1);
	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
		unsigned long flags, timeout;
		local_irq_set(flags);
		timeout = jiffies + WAIT_CMD;
		while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
			if (time_after(jiffies, timeout))
				break;
		}
		local_irq_restore(flags);
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}

	SELECT_MASK(drive, 0);

	enable_irq(hwif->irq);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	drive->id->dma_ultra &= ~0xFF00;
	drive->id->dma_mword &= ~0x0F00;
	drive->id->dma_1word &= ~0x0F00;

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (speed >= XFER_SW_DMA_0)
		hwif->ide_dma_host_on(drive);
	else if (hwif->ide_dma_check)	/* check if host supports DMA */
		hwif->ide_dma_off_quietly(drive);
#endif

	switch(speed) {
		case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
		case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
		case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
		case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
		case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
		case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
		case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
		case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
		case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
		case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
		case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
		case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
		case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
		case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		default: break;
	}
	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Example #4
0
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
{
	ide_hwif_t *hwif = HWIF(drive);
	unsigned long flags;
	struct request *rq;

	spin_lock_irqsave(&ide_lock, flags);
	rq = HWGROUP(drive)->rq;
	spin_unlock_irqrestore(&ide_lock, flags);

	if (rq->flags & REQ_DRIVE_CMD) {
		u8 *args = (u8 *) rq->buffer;
		if (rq->errors == 0)
			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);

		if (args) {
			args[0] = stat;
			args[1] = err;
			args[2] = hwif->INB(IDE_NSECTOR_REG);
		}
	} else if (rq->flags & REQ_DRIVE_TASK) {
		u8 *args = (u8 *) rq->buffer;
		if (rq->errors == 0)
			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);

		if (args) {
			args[0] = stat;
			args[1] = err;
			args[2] = hwif->INB(IDE_NSECTOR_REG);
			args[3] = hwif->INB(IDE_SECTOR_REG);
			args[4] = hwif->INB(IDE_LCYL_REG);
			args[5] = hwif->INB(IDE_HCYL_REG);
			args[6] = hwif->INB(IDE_SELECT_REG);
		}
	} else if (rq->flags & REQ_DRIVE_TASKFILE) {
		ide_task_t *args = (ide_task_t *) rq->special;
		if (rq->errors == 0)
			rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
			
		if (args) {
			if (args->tf_in_flags.b.data) {
				u16 data				= hwif->INW(IDE_DATA_REG);
				args->tfRegister[IDE_DATA_OFFSET]	= (data) & 0xFF;
				args->hobRegister[IDE_DATA_OFFSET]	= (data >> 8) & 0xFF;
			}
			args->tfRegister[IDE_ERROR_OFFSET]   = err;
			/* be sure we're looking at the low order bits */
			hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
			args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
			args->tfRegister[IDE_SECTOR_OFFSET]  = hwif->INB(IDE_SECTOR_REG);
			args->tfRegister[IDE_LCYL_OFFSET]    = hwif->INB(IDE_LCYL_REG);
			args->tfRegister[IDE_HCYL_OFFSET]    = hwif->INB(IDE_HCYL_REG);
			args->tfRegister[IDE_SELECT_OFFSET]  = hwif->INB(IDE_SELECT_REG);
			args->tfRegister[IDE_STATUS_OFFSET]  = stat;

			if (drive->addressing == 1) {
				hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
				args->hobRegister[IDE_FEATURE_OFFSET]	= hwif->INB(IDE_FEATURE_REG);
				args->hobRegister[IDE_NSECTOR_OFFSET]	= hwif->INB(IDE_NSECTOR_REG);
				args->hobRegister[IDE_SECTOR_OFFSET]	= hwif->INB(IDE_SECTOR_REG);
				args->hobRegister[IDE_LCYL_OFFSET]	= hwif->INB(IDE_LCYL_REG);
				args->hobRegister[IDE_HCYL_OFFSET]	= hwif->INB(IDE_HCYL_REG);
			}
		}
Example #5
0
/*
 * Update the 
 */
int ide_driveid_update (ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	struct hd_driveid *id;
#if 0
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id)
		return 0;

	taskfile_lib_get_identify(drive, (char *)&id);

	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}
	return 1;
#else
	/*
	 * Re-read drive->id for possible DMA mode
	 * change (copied from ide-probe.c)
	 */
	unsigned long timeout, flags;

	SELECT_MASK(drive, 1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
	msleep(50);
	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
	timeout = jiffies + WAIT_WORSTCASE;
	do {
		if (time_after(jiffies, timeout)) {
			SELECT_MASK(drive, 0);
			return 0;	/* drive timed-out */
		}
		msleep(50);	/* give drive a breather */
	} while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
	msleep(50);	/* wait for IRQ and DRQ_STAT */
	if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
		SELECT_MASK(drive, 0);
		printk("%s: CHECK for good STATUS\n", drive->name);
		return 0;
	}
	local_irq_save(flags);
	SELECT_MASK(drive, 0);
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id) {
		local_irq_restore(flags);
		return 0;
	}
	ata_input_data(drive, id, SECTOR_WORDS);
	(void) hwif->INB(IDE_STATUS_REG);	/* clear drive IRQ */
	local_irq_enable();
	local_irq_restore(flags);
	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}

	return 1;
#endif
}
Example #6
0
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
{
	ide_hwif_t *hwif = HWIF(drive);
	unsigned long flags;
	struct request *rq;

	spin_lock_irqsave(&io_request_lock, flags);
	rq = HWGROUP(drive)->rq;
	spin_unlock_irqrestore(&io_request_lock, flags);

	switch(rq->cmd) {
		case IDE_DRIVE_CMD:
		{
			u8 *args = (u8 *) rq->buffer;
			if (rq->errors == 0)
				rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);

			if (args) {
				args[0] = stat;
				args[1] = err;
				args[2] = hwif->INB(IDE_NSECTOR_REG);
			}
			break;
		}
		case IDE_DRIVE_TASK:
		{
			u8 *args = (u8 *) rq->buffer;
			if (rq->errors == 0)
				rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);

			if (args) {
				args[0] = stat;
				args[1] = err;
				args[2] = hwif->INB(IDE_NSECTOR_REG);
				args[3] = hwif->INB(IDE_SECTOR_REG);
				args[4] = hwif->INB(IDE_LCYL_REG);
				args[5] = hwif->INB(IDE_HCYL_REG);
				args[6] = hwif->INB(IDE_SELECT_REG);
			}
			break;
		}
		case IDE_DRIVE_TASKFILE:
		{
			ide_task_t *args = (ide_task_t *) rq->special;
			if (rq->errors == 0)
				rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
				
			if (args) {
				if (args->tf_in_flags.b.data) {
					u16 data			= hwif->INW(IDE_DATA_REG);
					args->tfRegister[IDE_DATA_OFFSET]	= (data) & 0xFF;
					args->hobRegister[IDE_DATA_OFFSET_HOB]	= (data >> 8) & 0xFF;
				}
				args->tfRegister[IDE_ERROR_OFFSET]   = err;
				args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
				args->tfRegister[IDE_SECTOR_OFFSET]  = hwif->INB(IDE_SECTOR_REG);
				args->tfRegister[IDE_LCYL_OFFSET]    = hwif->INB(IDE_LCYL_REG);
				args->tfRegister[IDE_HCYL_OFFSET]    = hwif->INB(IDE_HCYL_REG);
				args->tfRegister[IDE_SELECT_OFFSET]  = hwif->INB(IDE_SELECT_REG);
				args->tfRegister[IDE_STATUS_OFFSET]  = stat;

				if (drive->addressing == 1) {
					hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG_HOB);
					args->hobRegister[IDE_FEATURE_OFFSET_HOB] = hwif->INB(IDE_FEATURE_REG);
					args->hobRegister[IDE_NSECTOR_OFFSET_HOB] = hwif->INB(IDE_NSECTOR_REG);
					args->hobRegister[IDE_SECTOR_OFFSET_HOB]  = hwif->INB(IDE_SECTOR_REG);
					args->hobRegister[IDE_LCYL_OFFSET_HOB]    = hwif->INB(IDE_LCYL_REG);
					args->hobRegister[IDE_HCYL_OFFSET_HOB]    = hwif->INB(IDE_HCYL_REG);
				}
			}
			break;
		}
		default:
			break;
	}
Example #7
0
/*
 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
 * during an ide reset operation. If the drives have not yet responded,
 * and we have not yet hit our maximum waiting time, then the timer is restarted
 * for another 50ms.
 */
static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
{
	ide_hwgroup_t *hwgroup	= HWGROUP(drive);
	ide_hwif_t *hwif	= HWIF(drive);
	u8 tmp;
	/*
	if (hwif->reset_poll != NULL) {
		if (hwif->reset_poll(drive)) {
			printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
				hwif->name, drive->name);
			return ide_stopped;
		}
	}
	*/
	if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
		if (time_before(jiffies, hwgroup->poll_timeout)) {
			if (HWGROUP(drive)->handler != NULL)
				BUG();
			ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
			/* continue polling */
			return ide_started;
		}
		printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
		drive->failures++;
		// Added by Frank(96/08/02)
		hwgroup->polling = 0;	/* done polling */
		return ide_stopped;
		//************************
	} else  {
		printk("%s: reset: ", hwif->name);
		if ((tmp = hwif->INB(IDE_ERROR_REG)) == 1) {
			// Added by Frank(96/08/02)
			//if (hwif->reset_poll(drive)) {
				printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
					hwif->name, drive->name);
				drive->failures++;
				hwgroup->polling = 0;
				return ide_stopped;
			//}//************************
			//printk("success\n");
			//drive->failures = 0;
		} else {
			drive->failures++;
			printk("master: ");
			switch (tmp & 0x7f) {
				case 1: printk("passed");
					break;
				case 2: printk("formatter device error");
					break;
				case 3: printk("sector buffer error");
					break;
				case 4: printk("ECC circuitry error");
					break;
				case 5: printk("controlling MPU error");
					break;
				default:printk("error (0x%02x?)", tmp);
			}
			if (tmp & 0x80)
				printk("; slave: failed");
			printk("\n");
		}
	}
	hwgroup->polling = 0;	/* done polling */
	return ide_stopped;
}
Example #8
0
/*
 * Verify that we are doing an approved SETFEATURES_XFER with respect
 * to the hardware being able to support request.  Since some hardware
 * can improperly report capabilties, we check to see if the host adapter
 * in combination with the device (usually a disk) properly detect
 * and acknowledge each end of the ribbon.
 */
int ide_ata66_check (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect > XFER_UDMA_2) &&
            (feature == SETFEATURES_XFER))
    {
        if (!HWIF(drive)->udma_four)
        {
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", HWIF(drive)->name);
            return 1;
        }
#ifndef CONFIG_IDEDMA_IVB
        if ((drive->id->hw_config & 0x6000) == 0)
        {
#else /* !CONFIG_IDEDMA_IVB */
        if (((drive->id->hw_config & 0x2000) == 0) ||
                ((drive->id->hw_config & 0x4000) == 0))
        {
#endif /* CONFIG_IDEDMA_IVB */
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", drive->name);
            return 1;
        }
    }
    return 0;
}

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect >= XFER_SW_DMA_0) &&
            (feature == SETFEATURES_XFER) &&
            (drive->id->dma_ultra ||
             drive->id->dma_mword ||
             drive->id->dma_1word))
        return 1;

    return 0;
}

/*
 *  All hosts that use the 80c ribbon mus use!
 */
byte eighty_ninty_three (ide_drive_t *drive)
{
    return ((byte) ((HWIF(drive)->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
                    (drive->id->hw_config & 0x4000) &&
#endif /* CONFIG_IDEDMA_IVB */
                    (drive->id->hw_config & 0x6000)) ? 1 : 0);
}

/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, byte speed)
{
    ide_hwif_t *hwif = HWIF(drive);
    int	i, error = 1;
    byte stat;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    byte unit = (drive->select.b.unit & 0x01);
    outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    /*
     * Don't use ide_wait_cmd here - it will
     * attempt to set_geometry and recalibrate,
     * but for some reason these don't work at
     * this point (lost interrupt).
     */
    /*
     * Select the drive, and issue the SETFEATURES command
     */
    disable_irq(hwif->irq);	/* disable_irq_nosync ?? */
    udelay(1);
    SELECT_DRIVE(HWIF(drive), drive);
    SELECT_MASK(HWIF(drive), drive, 0);
    udelay(1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
    OUT_BYTE(speed, IDE_NSECTOR_REG);
    OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
    OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
    if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
        OUT_BYTE(drive->ctl, IDE_CONTROL_REG);
    udelay(1);
    /*
     * Wait for drive to become non-BUSY
     */
    if ((stat = GET_STAT()) & BUSY_STAT)
    {
        unsigned long flags, timeout;
        __save_flags(flags);	/* local CPU only */
        ide__sti();		/* local CPU only -- for jiffies */
        timeout = jiffies + WAIT_CMD;
        while ((stat = GET_STAT()) & BUSY_STAT)
        {
            if (0 < (signed long)(jiffies - timeout))
                break;
        }
        __restore_flags(flags); /* local CPU only */
    }

    /*
     * Allow status to settle, then read it again.
     * A few rare drives vastly violate the 400ns spec here,
     * so we'll wait up to 10usec for a "good" status
     * rather than expensively fail things immediately.
     * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
     */
    for (i = 0; i < 10; i++)
    {
        udelay(1);
        if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT))
        {
            error = 0;
            break;
        }
    }

    SELECT_MASK(HWIF(drive), drive, 0);

    enable_irq(hwif->irq);

    if (error)
    {
        (void) ide_dump_status(drive, "set_drive_speed_status", stat);
        return error;
    }

    drive->id->dma_ultra &= ~0xFF00;
    drive->id->dma_mword &= ~0x0F00;
    drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    if (speed > XFER_PIO_4)
    {
        outb(inb(hwif->dma_base+2)|(1<<(5+unit)), hwif->dma_base+2);
    }
    else
    {
        outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
    }
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    switch(speed)
    {
    case XFER_UDMA_7:
        drive->id->dma_ultra |= 0x8080;
        break;
    case XFER_UDMA_6:
        drive->id->dma_ultra |= 0x4040;
        break;
    case XFER_UDMA_5:
        drive->id->dma_ultra |= 0x2020;
        break;
    case XFER_UDMA_4:
        drive->id->dma_ultra |= 0x1010;
        break;
    case XFER_UDMA_3:
        drive->id->dma_ultra |= 0x0808;
        break;
    case XFER_UDMA_2:
        drive->id->dma_ultra |= 0x0404;
        break;
    case XFER_UDMA_1:
        drive->id->dma_ultra |= 0x0202;
        break;
    case XFER_UDMA_0:
        drive->id->dma_ultra |= 0x0101;
        break;
    case XFER_MW_DMA_2:
        drive->id->dma_mword |= 0x0404;
        break;
    case XFER_MW_DMA_1:
        drive->id->dma_mword |= 0x0202;
        break;
    case XFER_MW_DMA_0:
        drive->id->dma_mword |= 0x0101;
        break;
    case XFER_SW_DMA_2:
        drive->id->dma_1word |= 0x0404;
        break;
    case XFER_SW_DMA_1:
        drive->id->dma_1word |= 0x0202;
        break;
    case XFER_SW_DMA_0:
        drive->id->dma_1word |= 0x0101;
        break;
    default:
        break;
    }
    return error;
}