Ejemplo n.º 1
0
/*
 * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
 * during an atapi drive reset operation. If the drive has not yet responded,
 * and we have not yet hit our maximum waiting time, then the timer is restarted
 * for another 50ms.
 */
static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
{
	ide_hwgroup_t *hwgroup	= HWGROUP(drive);
	ide_hwif_t *hwif	= HWIF(drive);
	u8 stat;

	SELECT_DRIVE(drive);
	udelay (10);

	if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
		printk("%s: ATAPI reset complete\n", drive->name);
	} else {
		if (time_before(jiffies, hwgroup->poll_timeout)) {
			if (HWGROUP(drive)->handler != NULL)
				BUG();
			ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
			/* continue polling */
			return ide_started;
		}
		/* end of polling */
		hwgroup->poll_timeout = 0;
		printk("%s: ATAPI reset timed-out, status=0x%02x\n",
				drive->name, stat);
		/* do it the old fashioned way */
		return do_reset1(drive, 1);
	}
	/* done polling */
	hwgroup->poll_timeout = 0;
	return ide_stopped;
}
Ejemplo n.º 2
0
/* Note: We don't use the generic routine here because some of Apple's
 * controller seem to be very sensitive about how things are done.
 * We should probably set the NIEN bit, but that's an example of thing
 * that can cause the controller to hang under some circumstances when
 * done on the media-bay CD-ROM during boot. We do get occasional
 * spurrious interrupts because of that.
 * --BenH
 */
static int
pmac_ide_do_setfeature(ide_drive_t *drive, byte command)
{
	unsigned long flags;
	int result = 1;

	save_flags(flags);
	cli();
	udelay(1);
	SELECT_DRIVE(HWIF(drive), drive);
	SELECT_MASK(HWIF(drive), drive, 0);
	udelay(1);
	if(wait_for_ready(drive)) {
		printk(KERN_ERR "pmac_ide_do_setfeature disk not ready before SET_FEATURE!\n");
		goto out;
	}
	OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
	OUT_BYTE(command, IDE_NSECTOR_REG);
	OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
	udelay(1);
	result = wait_for_ready(drive);
	if (result)
		printk(KERN_ERR "pmac_ide_do_setfeature disk not ready after SET_FEATURE !\n");
out:
	restore_flags(flags);
	
	return result;
}
Ejemplo n.º 3
0
static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
	struct request_pm_state *pm = rq->data;

	if (blk_pm_suspend_request(rq) &&
	    pm->pm_step == IDE_PM_START_SUSPEND)
		/* Mark drive blocked when starting the suspend sequence. */
		drive->dev_flags |= IDE_DFLAG_BLOCKED;
	else if (blk_pm_resume_request(rq) &&
		 pm->pm_step == IDE_PM_START_RESUME) {
		/* 
		 * The first thing we do on wakeup is to wait for BSY bit to
		 * go away (with a looong timeout) as a drive on this hwif may
		 * just be POSTing itself.
		 * We do that before even selecting as the "other" device on
		 * the bus may be broken enough to walk on our toes at this
		 * point.
		 */
		ide_hwif_t *hwif = drive->hwif;
		int rc;
#ifdef DEBUG_PM
		printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
		rc = ide_wait_not_busy(hwif, 35000);
		if (rc)
			printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
		SELECT_DRIVE(drive);
		hwif->tp_ops->set_irq(hwif, 1);
		rc = ide_wait_not_busy(hwif, 100000);
		if (rc)
			printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
	}
}
Ejemplo n.º 4
0
/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif	= HWIF(drive);
	int	i, error	= 1;
	u8 stat;

//	while (HWGROUP(drive)->busy)
//		ide_delay_50ms();

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
	hwif->ide_dma_host_off(drive);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

	/*
	 * Don't use ide_wait_cmd here - it will
	 * attempt to set_geometry and recalibrate,
	 * but for some reason these don't work at
	 * this point (lost interrupt).
	 */
        /*
         * Select the drive, and issue the SETFEATURES command
         */
	disable_irq_nosync(hwif->irq);
	udelay(1);
	SELECT_DRIVE(drive);
	SELECT_MASK(drive, 0);
	udelay(1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
	hwif->OUTB(speed, IDE_NSECTOR_REG);
	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
	hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
	udelay(1);
	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
		unsigned long flags, timeout;
		local_irq_set(flags);
		timeout = jiffies + WAIT_CMD;
		while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
			if (time_after(jiffies, timeout))
				break;
		}
		local_irq_restore(flags);
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}

	SELECT_MASK(drive, 0);

	enable_irq(hwif->irq);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	drive->id->dma_ultra &= ~0xFF00;
	drive->id->dma_mword &= ~0x0F00;
	drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
	if (speed >= XFER_SW_DMA_0)
		hwif->ide_dma_host_on(drive);
	else
		hwif->ide_dma_off_quietly(drive);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

	switch(speed) {
		case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
		case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
		case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
		case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
		case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
		case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
		case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
		case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
		case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
		case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
		case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
		case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
		case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
		case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		default: break;
	}
	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Ejemplo n.º 5
0
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
{
	unsigned int unit;
	unsigned long flags;
	ide_hwif_t *hwif;
	ide_hwgroup_t *hwgroup;
	
	spin_lock_irqsave(&io_request_lock, flags);
	
	hwgroup = HWGROUP(drive);
	hwif = HWIF(drive);
	
	/* We must not reset with running handlers */
	if(hwgroup->handler != NULL)
		BUG();

	/* For an ATAPI device, first try an ATAPI SRST. */
	if (drive->media != ide_disk && !do_not_try_atapi) {
		pre_reset(drive);
		SELECT_DRIVE(drive);
		udelay (20);
		hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
		hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
		__ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
		spin_unlock_irqrestore(&io_request_lock, flags);
		return ide_started;
	}

	/*
	 * First, reset any device state data we were maintaining
	 * for any of the drives on this interface.
	 */
	for (unit = 0; unit < MAX_DRIVES; ++unit)
		pre_reset(&hwif->drives[unit]);

#if OK_TO_RESET_CONTROLLER
	if (!IDE_CONTROL_REG) {
		spin_unlock_irqrestore(&io_request_lock, flags);
		return ide_stopped;
	}

	/*
	 * Note that we also set nIEN while resetting the device,
	 * to mask unwanted interrupts from the interface during the reset.
	 * However, due to the design of PC hardware, this will cause an
	 * immediate interrupt due to the edge transition it produces.
	 * This single interrupt gives us a "fast poll" for drives that
	 * recover from reset very quickly, saving us the first 50ms wait time.
	 */
	/* set SRST and nIEN */
	hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG);
	/* more than enough time */
	udelay(10);
	if (drive->quirk_list == 2) {
		/* clear SRST and nIEN */
		hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG);
	} else {
		/* clear SRST, leave nIEN */
		hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG);
	}
	/* more than enough time */
	udelay(10);
	
	hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
	__ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
	/*
	 * Some weird controller like resetting themselves to a strange
	 * state when the disks are reset this way. At least, the Winbond
	 * 553 documentation says that
	 */
	if (hwif->resetproc != NULL) {
		hwif->resetproc(drive);
	}

#endif	/* OK_TO_RESET_CONTROLLER */
	spin_unlock_irqrestore(&io_request_lock, flags);
	return ide_started;
}
Ejemplo n.º 6
0
static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
	ide_startstop_t startstop;

	BUG_ON(!blk_rq_started(rq));

#ifdef DEBUG
	printk("%s: start_request: current=0x%08lx\n",
		drive->hwif->name, (unsigned long) rq);
#endif

	/* bail early if we've exceeded max_failures */
	if (drive->max_failures && (drive->failures > drive->max_failures)) {
		rq->cmd_flags |= REQ_FAILED;
		goto kill_rq;
	}

	if (blk_pm_request(rq))
		ide_check_pm_state(drive, rq);

	SELECT_DRIVE(drive);
	if (ide_wait_stat(&startstop, drive, drive->ready_stat,
			  ATA_BUSY | ATA_DRQ, WAIT_READY)) {
		printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
		return startstop;
	}
	if (!drive->special.all) {
		struct ide_driver *drv;

		/*
		 * We reset the drive so we need to issue a SETFEATURES.
		 * Do it _after_ do_special() restored device parameters.
		 */
		if (drive->current_speed == 0xff)
			ide_config_drive_speed(drive, drive->desired_speed);

		if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
			return execute_drive_cmd(drive, rq);
		else if (blk_pm_request(rq)) {
			struct request_pm_state *pm = rq->data;
#ifdef DEBUG_PM
			printk("%s: start_power_step(step: %d)\n",
				drive->name, pm->pm_step);
#endif
			startstop = ide_start_power_step(drive, rq);
			if (startstop == ide_stopped &&
			    pm->pm_step == IDE_PM_COMPLETED)
				ide_complete_pm_request(drive, rq);
			return startstop;
		} else if (!rq->rq_disk && blk_special_request(rq))
			/*
			 * TODO: Once all ULDs have been modified to
			 * check for specific op codes rather than
			 * blindly accepting any special request, the
			 * check for ->rq_disk above may be replaced
			 * by a more suitable mechanism or even
			 * dropped entirely.
			 */
			return ide_special_rq(drive, rq);

		drv = *(struct ide_driver **)rq->rq_disk->private_data;

		return drv->do_request(drive, rq, rq->sector);
	}
	return do_special(drive);
kill_rq:
	ide_kill_rq(drive, rq);
	return ide_stopped;
}
Ejemplo n.º 7
0
/*
 * Set a new transfer mode at the drive
 */
int cs5530_set_xfer_mode (ide_drive_t *drive, byte mode)
{
	int		i, error = 1;
	byte		stat;
	ide_hwif_t	*hwif = HWIF(drive);

	printk("%s: cs5530_set_xfer_mode(%s)\n", drive->name, strmode(mode));
	/*
	 * If this is a DMA mode setting, then turn off all DMA bits.
	 * We will set one of them back on afterwards, if all goes well.
	 *
	 * Not sure why this is needed (it looks very silly),
	 * but other IDE chipset drivers also do this fiddling.  ???? -ml
 	 */
	switch (mode) {
		case XFER_UDMA_4:
		case XFER_UDMA_3:
		case XFER_UDMA_2:
		case XFER_UDMA_1:
		case XFER_UDMA_0:
		case XFER_MW_DMA_2:
		case XFER_MW_DMA_1:
		case XFER_MW_DMA_0:
		case XFER_SW_DMA_2:
		case XFER_SW_DMA_1:
		case XFER_SW_DMA_0:
			drive->id->dma_ultra &= ~0xFF00;
			drive->id->dma_mword &= ~0x0F00;
			drive->id->dma_1word &= ~0x0F00;
	}

	/*
	 * Select the drive, and issue the SETFEATURES command
	 */
	disable_irq(hwif->irq);
	udelay(1);
	SELECT_DRIVE(HWIF(drive), drive);
	udelay(1);
	if (IDE_CONTROL_REG)
		OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
	OUT_BYTE(mode, IDE_NSECTOR_REG);
	OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
	OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
	udelay(1);	/* spec allows drive 400ns to assert "BUSY" */

	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = GET_STAT()) & BUSY_STAT) {
		unsigned long flags, timeout;
		__save_flags(flags);	/* local CPU only */
		ide__sti();		/* local CPU only -- for jiffies */
		timeout = jiffies + WAIT_CMD;
		while ((stat = GET_STAT()) & BUSY_STAT) {
			if (0 < (signed long)(jiffies - timeout))
				break;
		}
		__restore_flags(flags); /* local CPU only */
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}
	enable_irq(hwif->irq);

	/*
	 * Turn dma bit on if all is okay
	 */
	if (error) {
		(void) ide_dump_status(drive, "cs5530_set_xfer_mode", stat);
	} else {
		switch (mode) {
			case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
			case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
			case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
			case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
			case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
			case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
			case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
			case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
			case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
			case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
			case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		}
	}
	return error;
}
Ejemplo n.º 8
0
/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif	= HWIF(drive);
	int	i, error	= 1;
	u8 stat;

//	while (HWGROUP(drive)->busy)
//		msleep(50);

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (hwif->ide_dma_check)	 /* check if host supports DMA */
		hwif->ide_dma_host_off(drive);
#endif

	/*
	 * Don't use ide_wait_cmd here - it will
	 * attempt to set_geometry and recalibrate,
	 * but for some reason these don't work at
	 * this point (lost interrupt).
	 */
        /*
         * Select the drive, and issue the SETFEATURES command
         */
	disable_irq_nosync(hwif->irq);
	
	/*
	 *	FIXME: we race against the running IRQ here if
	 *	this is called from non IRQ context. If we use
	 *	disable_irq() we hang on the error path. Work
	 *	is needed.
	 */
	 
	udelay(1);
	SELECT_DRIVE(drive);
	SELECT_MASK(drive, 0);
	udelay(1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
	hwif->OUTB(speed, IDE_NSECTOR_REG);
	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
	hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
	udelay(1);
	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
		unsigned long flags, timeout;
		local_irq_set(flags);
		timeout = jiffies + WAIT_CMD;
		while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
			if (time_after(jiffies, timeout))
				break;
		}
		local_irq_restore(flags);
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}
	// Added by Frank Ting 95/8/24
	// due to BTC loader resume bug
	if (i==10 && (OK_STAT(stat, 0, BUSY_STAT|DRQ_STAT|ERR_STAT))){
		error=0;
		printk("%s: conditional pass\n", drive->name);
	}

	SELECT_MASK(drive, 0);

	enable_irq(hwif->irq);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	drive->id->dma_ultra &= ~0xFF00;
	drive->id->dma_mword &= ~0x0F00;
	drive->id->dma_1word &= ~0x0F00;

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (speed >= XFER_SW_DMA_0)
		hwif->ide_dma_host_on(drive);
	else if (hwif->ide_dma_check)	/* check if host supports DMA */
		hwif->ide_dma_off_quietly(drive);
#endif

	switch(speed) {
		case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
		case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
		case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
		case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
		case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
		case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
		case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
		case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
		case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
		case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
		case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
		case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
		case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
		case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		default: break;
	}
	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Ejemplo n.º 9
0
/*
 * do_reset1() attempts to recover a confused drive by resetting it.
 * Unfortunately, resetting a disk drive actually resets all devices on
 * the same interface, so it can really be thought of as resetting the
 * interface rather than resetting the drive.
 *
 * ATAPI devices have their own reset mechanism which allows them to be
 * individually reset without clobbering other devices on the same interface.
 *
 * Unfortunately, the IDE interface does not generate an interrupt to let
 * us know when the reset operation has finished, so we must poll for this.
 * Equally poor, though, is the fact that this may a very long time to complete,
 * (up to 30 seconds worstcase).  So, instead of busy-waiting here for it,
 * we set a timer to poll at 50ms intervals.
 */
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
{
	unsigned int unit;
	unsigned long flags;
	ide_hwif_t *hwif = HWIF(drive);
	ide_hwgroup_t *hwgroup;
	//Added by Frank 96/1/11
	struct venus_state *state = hwif->hwif_data;
	//***************************************
	
	spin_lock_irqsave(&ide_lock, flags);
	//hwif = HWIF(drive);
	hwgroup = HWGROUP(drive);

	/* We must not reset with running handlers */
	if(hwgroup->handler != NULL)
		BUG();

	/* For an ATAPI device, first try an ATAPI SRST. */
	if (drive->media != ide_disk && !do_not_try_atapi) {
		pre_reset(drive);

		SELECT_DRIVE(drive);
		udelay (20);
		// Modified by Frank 96/1/11 for hardware reset
		if (state){
			hwif->OUTB(0x00, state->rst);
			udelay (25);	// RESET- asserted >25us
			hwif->OUTB(0x01, state->rst);
		}else{
			hwif->OUTB(WIN_SRST, IDE_COMMAND_REG);
			
		}
		hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE*4;
		// *********************************************
		
		hwgroup->polling = 1;
		__ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
		spin_unlock_irqrestore(&ide_lock, flags);
		return ide_started;
	}

	/*
	 * First, reset any device state data we were maintaining
	 * for any of the drives on this interface.
	 */
	for (unit = 0; unit < MAX_DRIVES; ++unit)
		pre_reset(&hwif->drives[unit]);

#if OK_TO_RESET_CONTROLLER
	if (!IDE_CONTROL_REG) {
		spin_unlock_irqrestore(&ide_lock, flags);
		return ide_stopped;
	}

	/*
	 * Note that we also set nIEN while resetting the device,
	 * to mask unwanted interrupts from the interface during the reset.
	 * However, due to the design of PC hardware, this will cause an
	 * immediate interrupt due to the edge transition it produces.
	 * This single interrupt gives us a "fast poll" for drives that
	 * recover from reset very quickly, saving us the first 50ms wait time.
	 */
	/* set SRST and nIEN */
	//hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG);
	/* more than enough time */
	printk("%s: ready to reset(x version)\n", drive->name);
	if ((readl((void __iomem *) 0xb801a200)& 0xffff)!=0x1282){
		
		unsigned long value = readl((void __iomem *) 0xb801b034);
		
		if ((strcmp(drive->name, "hda")==0)||(strcmp(drive->name, "hdb")==0))
			value|=0x01;
		else value|=0x04;
		writel(value, (void __iomem *) 0xb801b034);
		udelay(100);
	}	
			
	hwif->OUTB(0x00, state->rst);
	udelay(10);
	hwif->OUTB(0x01, state->rst);
	//if (drive->quirk_list == 2) {
		/* clear SRST and nIEN */
	//	hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG);
	//} else {
		/* clear SRST, leave nIEN */
	//	hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG);
	//}
	/* more than enough time */
	udelay(10);
	hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
	hwgroup->polling = 1;
	__ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
	/*
	 * Some weird controller like resetting themselves to a strange
	 * state when the disks are reset this way. At least, the Winbond
	 * 553 documentation says that
	 */
	//if (hwif->resetproc != NULL) {
	//	hwif->resetproc(drive);
	//}
	
#endif	/* OK_TO_RESET_CONTROLLER */

	spin_unlock_irqrestore(&ide_lock, flags);
	return ide_started;
}
Ejemplo n.º 10
0
/*
 * Verify that we are doing an approved SETFEATURES_XFER with respect
 * to the hardware being able to support request.  Since some hardware
 * can improperly report capabilties, we check to see if the host adapter
 * in combination with the device (usually a disk) properly detect
 * and acknowledge each end of the ribbon.
 */
int ide_ata66_check (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect > XFER_UDMA_2) &&
            (feature == SETFEATURES_XFER))
    {
        if (!HWIF(drive)->udma_four)
        {
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", HWIF(drive)->name);
            return 1;
        }
#ifndef CONFIG_IDEDMA_IVB
        if ((drive->id->hw_config & 0x6000) == 0)
        {
#else /* !CONFIG_IDEDMA_IVB */
        if (((drive->id->hw_config & 0x2000) == 0) ||
                ((drive->id->hw_config & 0x4000) == 0))
        {
#endif /* CONFIG_IDEDMA_IVB */
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", drive->name);
            return 1;
        }
    }
    return 0;
}

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect >= XFER_SW_DMA_0) &&
            (feature == SETFEATURES_XFER) &&
            (drive->id->dma_ultra ||
             drive->id->dma_mword ||
             drive->id->dma_1word))
        return 1;

    return 0;
}

/*
 *  All hosts that use the 80c ribbon mus use!
 */
byte eighty_ninty_three (ide_drive_t *drive)
{
    return ((byte) ((HWIF(drive)->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
                    (drive->id->hw_config & 0x4000) &&
#endif /* CONFIG_IDEDMA_IVB */
                    (drive->id->hw_config & 0x6000)) ? 1 : 0);
}

/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, byte speed)
{
    ide_hwif_t *hwif = HWIF(drive);
    int	i, error = 1;
    byte stat;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    byte unit = (drive->select.b.unit & 0x01);
    outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    /*
     * Don't use ide_wait_cmd here - it will
     * attempt to set_geometry and recalibrate,
     * but for some reason these don't work at
     * this point (lost interrupt).
     */
    /*
     * Select the drive, and issue the SETFEATURES command
     */
    disable_irq(hwif->irq);	/* disable_irq_nosync ?? */
    udelay(1);
    SELECT_DRIVE(HWIF(drive), drive);
    SELECT_MASK(HWIF(drive), drive, 0);
    udelay(1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
    OUT_BYTE(speed, IDE_NSECTOR_REG);
    OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
    OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
    if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
        OUT_BYTE(drive->ctl, IDE_CONTROL_REG);
    udelay(1);
    /*
     * Wait for drive to become non-BUSY
     */
    if ((stat = GET_STAT()) & BUSY_STAT)
    {
        unsigned long flags, timeout;
        __save_flags(flags);	/* local CPU only */
        ide__sti();		/* local CPU only -- for jiffies */
        timeout = jiffies + WAIT_CMD;
        while ((stat = GET_STAT()) & BUSY_STAT)
        {
            if (0 < (signed long)(jiffies - timeout))
                break;
        }
        __restore_flags(flags); /* local CPU only */
    }

    /*
     * Allow status to settle, then read it again.
     * A few rare drives vastly violate the 400ns spec here,
     * so we'll wait up to 10usec for a "good" status
     * rather than expensively fail things immediately.
     * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
     */
    for (i = 0; i < 10; i++)
    {
        udelay(1);
        if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT))
        {
            error = 0;
            break;
        }
    }

    SELECT_MASK(HWIF(drive), drive, 0);

    enable_irq(hwif->irq);

    if (error)
    {
        (void) ide_dump_status(drive, "set_drive_speed_status", stat);
        return error;
    }

    drive->id->dma_ultra &= ~0xFF00;
    drive->id->dma_mword &= ~0x0F00;
    drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    if (speed > XFER_PIO_4)
    {
        outb(inb(hwif->dma_base+2)|(1<<(5+unit)), hwif->dma_base+2);
    }
    else
    {
        outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
    }
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    switch(speed)
    {
    case XFER_UDMA_7:
        drive->id->dma_ultra |= 0x8080;
        break;
    case XFER_UDMA_6:
        drive->id->dma_ultra |= 0x4040;
        break;
    case XFER_UDMA_5:
        drive->id->dma_ultra |= 0x2020;
        break;
    case XFER_UDMA_4:
        drive->id->dma_ultra |= 0x1010;
        break;
    case XFER_UDMA_3:
        drive->id->dma_ultra |= 0x0808;
        break;
    case XFER_UDMA_2:
        drive->id->dma_ultra |= 0x0404;
        break;
    case XFER_UDMA_1:
        drive->id->dma_ultra |= 0x0202;
        break;
    case XFER_UDMA_0:
        drive->id->dma_ultra |= 0x0101;
        break;
    case XFER_MW_DMA_2:
        drive->id->dma_mword |= 0x0404;
        break;
    case XFER_MW_DMA_1:
        drive->id->dma_mword |= 0x0202;
        break;
    case XFER_MW_DMA_0:
        drive->id->dma_mword |= 0x0101;
        break;
    case XFER_SW_DMA_2:
        drive->id->dma_1word |= 0x0404;
        break;
    case XFER_SW_DMA_1:
        drive->id->dma_1word |= 0x0202;
        break;
    case XFER_SW_DMA_0:
        drive->id->dma_1word |= 0x0101;
        break;
    default:
        break;
    }
    return error;
}