Ejemplo n.º 1
0
int ide_driveid_update(ide_drive_t *drive)
{
	u16 *id;
	int rc;

	id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
	if (id == NULL)
		return 0;

	SELECT_MASK(drive, 1);
	rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1);
	SELECT_MASK(drive, 0);

	if (rc)
		goto out_err;

	drive->id[ATA_ID_UDMA_MODES]  = id[ATA_ID_UDMA_MODES];
	drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
	drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
	drive->id[ATA_ID_CFA_MODES]   = id[ATA_ID_CFA_MODES];
	

	kfree(id);

	return 1;
out_err:
	if (rc == 2)
		printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
	kfree(id);
	return 0;
}
Ejemplo n.º 2
0
int ide_driveid_update(ide_drive_t *drive)
{
	u16 *id;
	int rc;

	id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
	if (id == NULL)
		return 0;

	SELECT_MASK(drive, 1);
	rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1);
	SELECT_MASK(drive, 0);

	if (rc)
		goto out_err;

	drive->id[ATA_ID_UDMA_MODES]  = id[ATA_ID_UDMA_MODES];
	drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
	drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
	drive->id[ATA_ID_CFA_MODES]   = id[ATA_ID_CFA_MODES];
	/* anything more ? */

	kfree(id);

	return 1;
out_err:
	if (rc == 2)
;
	kfree(id);
	return 0;
}
Ejemplo n.º 3
0
/*
 * Update the
 */
int ide_driveid_update (ide_drive_t *drive)
{
    /*
     * Re-read drive->id for possible DMA mode
     * change (copied from ide-probe.c)
     */
    struct hd_driveid *id;
    unsigned long timeout, flags;

    SELECT_MASK(HWIF(drive), drive, 1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
    ide_delay_50ms();
    OUT_BYTE(WIN_IDENTIFY, IDE_COMMAND_REG);
    timeout = jiffies + WAIT_WORSTCASE;
    do
    {
        if (0 < (signed long)(jiffies - timeout))
        {
            SELECT_MASK(HWIF(drive), drive, 0);
            return 0;	/* drive timed-out */
        }
        ide_delay_50ms();	/* give drive a breather */
    }
    while (IN_BYTE(IDE_ALTSTATUS_REG) & BUSY_STAT);
    ide_delay_50ms();	/* wait for IRQ and DRQ_STAT */
    if (!OK_STAT(GET_STAT(),DRQ_STAT,BAD_R_STAT))
    {
        SELECT_MASK(HWIF(drive), drive, 0);
        printk("%s: CHECK for good STATUS\n", drive->name);
        return 0;
    }
    __save_flags(flags);	/* local CPU only */
    __cli();		/* local CPU only; some systems need this */
    SELECT_MASK(HWIF(drive), drive, 0);
    id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
    if (!id)
    {
        __restore_flags(flags);	/* local CPU only */
        return 0;
    }
    ide_input_data(drive, id, SECTOR_WORDS);
    (void) GET_STAT();	/* clear drive IRQ */
    ide__sti();		/* local CPU only */
    __restore_flags(flags);	/* local CPU only */
    ide_fix_driveid(id);
    if (id)
    {
        drive->id->dma_ultra = id->dma_ultra;
        drive->id->dma_mword = id->dma_mword;
        drive->id->dma_1word = id->dma_1word;
        /* anything more ? */
        kfree(id);
    }

    return 1;
}
Ejemplo n.º 4
0
/* Note: We don't use the generic routine here because some of Apple's
 * controller seem to be very sensitive about how things are done.
 * We should probably set the NIEN bit, but that's an example of thing
 * that can cause the controller to hang under some circumstances when
 * done on the media-bay CD-ROM during boot. We do get occasional
 * spurrious interrupts because of that.
 * --BenH
 */
static int
pmac_ide_do_setfeature(ide_drive_t *drive, byte command)
{
	unsigned long flags;
	int result = 1;

	save_flags(flags);
	cli();
	udelay(1);
	SELECT_DRIVE(HWIF(drive), drive);
	SELECT_MASK(HWIF(drive), drive, 0);
	udelay(1);
	if(wait_for_ready(drive)) {
		printk(KERN_ERR "pmac_ide_do_setfeature disk not ready before SET_FEATURE!\n");
		goto out;
	}
	OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
	OUT_BYTE(command, IDE_NSECTOR_REG);
	OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
	udelay(1);
	result = wait_for_ready(drive);
	if (result)
		printk(KERN_ERR "pmac_ide_do_setfeature disk not ready after SET_FEATURE !\n");
out:
	restore_flags(flags);
	
	return result;
}
ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
{
	ide_hwif_t *hwif	= HWIF(drive);
	struct ide_taskfile *tf = &task->tf;
	ide_handler_t *handler = NULL;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	const struct ide_dma_ops *dma_ops = hwif->dma_ops;

	if (task->data_phase == TASKFILE_MULTI_IN ||
	    task->data_phase == TASKFILE_MULTI_OUT) {
		if (!drive->mult_count) {
			printk(KERN_ERR "%s: multimode not set!\n",
					drive->name);
			return ide_stopped;
		}
	}

	if (task->tf_flags & IDE_TFLAG_FLAGGED)
		task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;

	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
		ide_tf_dump(drive->name, tf);
		tp_ops->set_irq(hwif, 1);
		SELECT_MASK(drive, 0);
		tp_ops->tf_load(drive, task);
	}

	switch (task->data_phase) {
	case TASKFILE_MULTI_OUT:
	case TASKFILE_OUT:
		tp_ops->exec_command(hwif, tf->command);
		ndelay(400);	/* FIXME */
		return pre_task_out_intr(drive, task->rq);
	case TASKFILE_MULTI_IN:
	case TASKFILE_IN:
		handler = task_in_intr;
		/* fall-through */
	case TASKFILE_NO_DATA:
		if (handler == NULL)
			handler = task_no_data_intr;
		/* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
		if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) {
			switch (tf->command) {
			case WIN_SPECIFY: handler = set_geometry_intr;	break;
			case WIN_RESTORE: handler = recal_intr;		break;
			case WIN_SETMULT: handler = set_multmode_intr;	break;
			}
		}
		ide_execute_command(drive, tf->command, handler,
				    WAIT_WORSTCASE, NULL);
		return ide_started;
	default:
		if (drive->using_dma == 0 || dma_ops->dma_setup(drive))
			return ide_stopped;
		dma_ops->dma_exec_cmd(drive, tf->command);
		dma_ops->dma_start(drive);
		return ide_started;
	}
}
ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
{
	ide_hwif_t *hwif = drive->hwif;
	struct ide_taskfile *tf = &task->tf;
	ide_handler_t *handler = NULL;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	const struct ide_dma_ops *dma_ops = hwif->dma_ops;

	if (task->data_phase == TASKFILE_MULTI_IN ||
	    task->data_phase == TASKFILE_MULTI_OUT) {
		if (!drive->mult_count) {
			printk(KERN_ERR "%s: multimode not set!\n",
					drive->name);
			return ide_stopped;
		}
	}

	if (task->tf_flags & IDE_TFLAG_FLAGGED)
		task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;

	memcpy(&hwif->task, task, sizeof(*task));

	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
		ide_tf_dump(drive->name, tf);
		tp_ops->set_irq(hwif, 1);
		SELECT_MASK(drive, 0);
		tp_ops->tf_load(drive, task);
	}

	switch (task->data_phase) {
	case TASKFILE_MULTI_OUT:
	case TASKFILE_OUT:
		tp_ops->exec_command(hwif, tf->command);
		ndelay(400);	/* FIXME */
		return pre_task_out_intr(drive, task->rq);
	case TASKFILE_MULTI_IN:
	case TASKFILE_IN:
		handler = task_in_intr;
		/* fall-through */
	case TASKFILE_NO_DATA:
		if (handler == NULL)
			handler = task_no_data_intr;
		ide_execute_command(drive, tf->command, handler,
				    WAIT_WORSTCASE, NULL);
		return ide_started;
	default:
		if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
		    dma_ops->dma_setup(drive))
			return ide_stopped;
		dma_ops->dma_exec_cmd(drive, tf->command);
		dma_ops->dma_start(drive);
		return ide_started;
	}
}
Ejemplo n.º 7
0
void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
{
	ide_hwif_t *hwif = drive->hwif;
	struct ide_taskfile *tf = &task->tf;
	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;

	if (task->tf_flags & IDE_TFLAG_FLAGGED)
		HIHI = 0xFF;

#ifdef DEBUG
	printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
		drive->name, tf->feature, tf->nsect, tf->lbal,
		tf->lbam, tf->lbah, tf->device, tf->command);
	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
		"lbam 0x%02x lbah 0x%02x\n",
		drive->name, tf->hob_nsect, tf->hob_lbal,
		tf->hob_lbam, tf->hob_lbah);
#endif

	ide_set_irq(drive, 1);

	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
		SELECT_MASK(drive, 0);

	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
		hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG);

	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
		hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
		hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
		hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
		hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
		hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);

	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
		hwif->OUTB(tf->feature, IDE_FEATURE_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
		hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
		hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
		hwif->OUTB(tf->lbam, IDE_LCYL_REG);
	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
		hwif->OUTB(tf->lbah, IDE_HCYL_REG);

	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
		hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
}
Ejemplo n.º 8
0
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
{
	ide_hwif_t *hwif = drive->hwif;
	ide_task_t task;

	memset(&task, 0, sizeof(task));
	task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
			IDE_TFLAG_OUT_FEATURE | tf_flags;
	task.tf.feature = dma;		/* Use PIO/DMA */
	task.tf.lbam    = bcount & 0xff;
	task.tf.lbah    = (bcount >> 8) & 0xff;

	ide_tf_dump(drive->name, &task.tf);
	hwif->tp_ops->set_irq(hwif, 1);
	SELECT_MASK(drive, 0);
	hwif->tp_ops->tf_load(drive, &task);
}
Ejemplo n.º 9
0
/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif	= HWIF(drive);
	int	i, error	= 1;
	u8 stat;

//	while (HWGROUP(drive)->busy)
//		ide_delay_50ms();

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
	hwif->ide_dma_host_off(drive);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

	/*
	 * Don't use ide_wait_cmd here - it will
	 * attempt to set_geometry and recalibrate,
	 * but for some reason these don't work at
	 * this point (lost interrupt).
	 */
        /*
         * Select the drive, and issue the SETFEATURES command
         */
	disable_irq_nosync(hwif->irq);
	udelay(1);
	SELECT_DRIVE(drive);
	SELECT_MASK(drive, 0);
	udelay(1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
	hwif->OUTB(speed, IDE_NSECTOR_REG);
	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
	hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
	udelay(1);
	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
		unsigned long flags, timeout;
		local_irq_set(flags);
		timeout = jiffies + WAIT_CMD;
		while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
			if (time_after(jiffies, timeout))
				break;
		}
		local_irq_restore(flags);
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}

	SELECT_MASK(drive, 0);

	enable_irq(hwif->irq);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	drive->id->dma_ultra &= ~0xFF00;
	drive->id->dma_mword &= ~0x0F00;
	drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
	if (speed >= XFER_SW_DMA_0)
		hwif->ide_dma_host_on(drive);
	else
		hwif->ide_dma_off_quietly(drive);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

	switch(speed) {
		case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
		case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
		case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
		case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
		case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
		case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
		case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
		case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
		case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
		case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
		case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
		case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
		case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
		case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		default: break;
	}
	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Ejemplo n.º 10
0
int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
{
	/* SATA has no cable restrictions */
	if (HWIF(drive)->sata)
		return 0;
		
	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
	    (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
#ifndef CONFIG_IDEDMA_IVB
		if ((drive->id->hw_config & 0x6000) == 0) {
#else /* !CONFIG_IDEDMA_IVB */
		if (((drive->id->hw_config & 0x2000) == 0) ||
		    ((drive->id->hw_config & 0x4000) == 0)) {
#endif /* CONFIG_IDEDMA_IVB */
			printk("%s: Speed warnings UDMA 3/4/5 is not "
				"functional.\n", drive->name);
			return 1;
		}
		if (!HWIF(drive)->udma_four) {
			printk("%s: Speed warnings UDMA 3/4/5 is not "
				"functional.\n",
				HWIF(drive)->name);
			return 1;
		}
	}
	return 0;
}

EXPORT_SYMBOL(ide_ata66_check);

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, ide_task_t *args)
{
	if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
	    (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
	    (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
	    (drive->id->dma_ultra ||
	     drive->id->dma_mword ||
	     drive->id->dma_1word))
		return 1;

	return 0;
}

EXPORT_SYMBOL(set_transfer);

u8 ide_auto_reduce_xfer (ide_drive_t *drive)
{
	if (!drive->crc_count)
		return drive->current_speed;
	drive->crc_count = 0;

	switch(drive->current_speed) {
		case XFER_UDMA_7:	return XFER_UDMA_6;
		case XFER_UDMA_6:	return XFER_UDMA_5;
		case XFER_UDMA_5:	return XFER_UDMA_4;
		case XFER_UDMA_4:	return XFER_UDMA_3;
		case XFER_UDMA_3:	return XFER_UDMA_2;
		case XFER_UDMA_2:	return XFER_UDMA_1;
		case XFER_UDMA_1:	return XFER_UDMA_0;
			/*
			 * OOPS we do not goto non Ultra DMA modes
			 * without iCRC's available we force
			 * the system to PIO and make the user
			 * invoke the ATA-1 ATA-2 DMA modes.
			 */
		case XFER_UDMA_0:
		default:		return XFER_PIO_4;
	}
}

EXPORT_SYMBOL(ide_auto_reduce_xfer);

/*
 * Update the 
 */
int ide_driveid_update (ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	struct hd_driveid *id;
#if 0
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id)
		return 0;

	taskfile_lib_get_identify(drive, (char *)&id);

	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}
	return 1;
#else
	/*
	 * Re-read drive->id for possible DMA mode
	 * change (copied from ide-probe.c)
	 */
	unsigned long timeout, flags;

	SELECT_MASK(drive, 1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
	ide_delay_50ms();
	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
	timeout = jiffies + WAIT_WORSTCASE;
	do {
		if (time_after(jiffies, timeout)) {
			SELECT_MASK(drive, 0);
			return 0;	/* drive timed-out */
		}
		ide_delay_50ms();	/* give drive a breather */
	} while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
	ide_delay_50ms();	/* wait for IRQ and DRQ_STAT */
	if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
		SELECT_MASK(drive, 0);
		printk("%s: CHECK for good STATUS\n", drive->name);
		return 0;
	}
	local_irq_save(flags);
	SELECT_MASK(drive, 0);
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id) {
		local_irq_restore(flags);
		return 0;
	}
	ata_input_data(drive, id, SECTOR_WORDS);
	(void) hwif->INB(IDE_STATUS_REG);	/* clear drive IRQ */
	local_irq_enable();
	local_irq_restore(flags);
	ide_fix_driveid(id);

	drive->id->dma_ultra = id->dma_ultra;
	drive->id->dma_mword = id->dma_mword;
	drive->id->dma_1word = id->dma_1word;
	/* anything more ? */
	kfree(id);

	return 1;
#endif
}
Ejemplo n.º 11
0
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	struct ide_taskfile tf;
	u16 *id = drive->id, i;
	int error = 0;
	u8 stat;

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (hwif->dma_ops)	
		hwif->dma_ops->dma_host_set(drive, 0);
#endif

	
	if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
		goto skip;


	udelay(1);
	tp_ops->dev_select(drive);
	SELECT_MASK(drive, 1);
	udelay(1);
	tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);

	memset(&tf, 0, sizeof(tf));
	tf.feature = SETFEATURES_XFER;
	tf.nsect   = speed;

	tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT);

	tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);

	if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
		tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);

	error = __ide_wait_stat(drive, drive->ready_stat,
				ATA_BUSY | ATA_DRQ | ATA_ERR,
				WAIT_CMD, &stat);

	SELECT_MASK(drive, 0);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	if (speed >= XFER_SW_DMA_0) {
		id[ATA_ID_UDMA_MODES]  &= ~0xFF00;
		id[ATA_ID_MWDMA_MODES] &= ~0x0700;
		id[ATA_ID_SWDMA_MODES] &= ~0x0700;
		if (ata_id_is_cfa(id))
			id[ATA_ID_CFA_MODES] &= ~0x0E00;
	} else	if (ata_id_is_cfa(id))
		id[ATA_ID_CFA_MODES] &= ~0x01C0;

 skip:
#ifdef CONFIG_BLK_DEV_IDEDMA
	if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
		hwif->dma_ops->dma_host_set(drive, 1);
	else if (hwif->dma_ops)	
		ide_dma_off_quietly(drive);
#endif

	if (speed >= XFER_UDMA_0) {
		i = 1 << (speed - XFER_UDMA_0);
		id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
	} else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) {
		i = speed - XFER_MW_DMA_2;
		id[ATA_ID_CFA_MODES] |= i << 9;
	} else if (speed >= XFER_MW_DMA_0) {
		i = 1 << (speed - XFER_MW_DMA_0);
		id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
	} else if (speed >= XFER_SW_DMA_0) {
		i = 1 << (speed - XFER_SW_DMA_0);
		id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
	} else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) {
		i = speed - XFER_PIO_4;
		id[ATA_ID_CFA_MODES] |= i << 6;
	}

	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Ejemplo n.º 12
0
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	struct ide_taskfile tf;
	u16 *id = drive->id, i;
	int error = 0;
	u8 stat;

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (hwif->dma_ops)	/* check if host supports DMA */
		hwif->dma_ops->dma_host_set(drive, 0);
#endif

	/* Skip setting PIO flow-control modes on pre-EIDE drives */
	if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
		goto skip;

	/*
	 * Don't use ide_wait_cmd here - it will
	 * attempt to set_geometry and recalibrate,
	 * but for some reason these don't work at
	 * this point (lost interrupt).
	 */

	/*
	 *	FIXME: we race against the running IRQ here if
	 *	this is called from non IRQ context. If we use
	 *	disable_irq() we hang on the error path. Work
	 *	is needed.
	 */
	disable_irq_nosync(hwif->irq);

	udelay(1);
	tp_ops->dev_select(drive);
	SELECT_MASK(drive, 1);
	udelay(1);
	tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);

	memset(&tf, 0, sizeof(tf));
	tf.feature = SETFEATURES_XFER;
	tf.nsect   = speed;

	tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT);

	tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);

	if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
		tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);

	error = __ide_wait_stat(drive, drive->ready_stat,
				ATA_BUSY | ATA_DRQ | ATA_ERR,
				WAIT_CMD, &stat);

	SELECT_MASK(drive, 0);

	enable_irq(hwif->irq);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	if (speed >= XFER_SW_DMA_0) {
		id[ATA_ID_UDMA_MODES]  &= ~0xFF00;
		id[ATA_ID_MWDMA_MODES] &= ~0x0700;
		id[ATA_ID_SWDMA_MODES] &= ~0x0700;
		if (ata_id_is_cfa(id))
			id[ATA_ID_CFA_MODES] &= ~0x0E00;
	} else	if (ata_id_is_cfa(id))
		id[ATA_ID_CFA_MODES] &= ~0x01C0;

 skip:
#ifdef CONFIG_BLK_DEV_IDEDMA
	if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
		hwif->dma_ops->dma_host_set(drive, 1);
	else if (hwif->dma_ops)	/* check if host supports DMA */
		ide_dma_off_quietly(drive);
#endif

	if (speed >= XFER_UDMA_0) {
		i = 1 << (speed - XFER_UDMA_0);
		id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
	} else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) {
		i = speed - XFER_MW_DMA_2;
		id[ATA_ID_CFA_MODES] |= i << 9;
	} else if (speed >= XFER_MW_DMA_0) {
		i = 1 << (speed - XFER_MW_DMA_0);
		id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
	} else if (speed >= XFER_SW_DMA_0) {
		i = 1 << (speed - XFER_SW_DMA_0);
		id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
	} else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) {
		i = speed - XFER_PIO_4;
		id[ATA_ID_CFA_MODES] |= i << 6;
	}

	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Ejemplo n.º 13
0
/*
 * Update the 
 */
int ide_driveid_update (ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	struct hd_driveid *id;
#if 0
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id)
		return 0;

	taskfile_lib_get_identify(drive, (char *)&id);

	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}
	return 1;
#else
	/*
	 * Re-read drive->id for possible DMA mode
	 * change (copied from ide-probe.c)
	 */
	unsigned long timeout, flags;

	SELECT_MASK(drive, 1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
	msleep(50);
	hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
	timeout = jiffies + WAIT_WORSTCASE;
	do {
		if (time_after(jiffies, timeout)) {
			SELECT_MASK(drive, 0);
			return 0;	/* drive timed-out */
		}
		msleep(50);	/* give drive a breather */
	} while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
	msleep(50);	/* wait for IRQ and DRQ_STAT */
	if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
		SELECT_MASK(drive, 0);
		printk("%s: CHECK for good STATUS\n", drive->name);
		return 0;
	}
	local_irq_save(flags);
	SELECT_MASK(drive, 0);
	id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
	if (!id) {
		local_irq_restore(flags);
		return 0;
	}
	ata_input_data(drive, id, SECTOR_WORDS);
	(void) hwif->INB(IDE_STATUS_REG);	/* clear drive IRQ */
	local_irq_enable();
	local_irq_restore(flags);
	ide_fix_driveid(id);
	if (id) {
		drive->id->dma_ultra = id->dma_ultra;
		drive->id->dma_mword = id->dma_mword;
		drive->id->dma_1word = id->dma_1word;
		/* anything more ? */
		kfree(id);
	}

	return 1;
#endif
}
Ejemplo n.º 14
0
/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
{
	ide_hwif_t *hwif	= HWIF(drive);
	int	i, error	= 1;
	u8 stat;

//	while (HWGROUP(drive)->busy)
//		msleep(50);

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (hwif->ide_dma_check)	 /* check if host supports DMA */
		hwif->ide_dma_host_off(drive);
#endif

	/*
	 * Don't use ide_wait_cmd here - it will
	 * attempt to set_geometry and recalibrate,
	 * but for some reason these don't work at
	 * this point (lost interrupt).
	 */
        /*
         * Select the drive, and issue the SETFEATURES command
         */
	disable_irq_nosync(hwif->irq);
	
	/*
	 *	FIXME: we race against the running IRQ here if
	 *	this is called from non IRQ context. If we use
	 *	disable_irq() we hang on the error path. Work
	 *	is needed.
	 */
	 
	udelay(1);
	SELECT_DRIVE(drive);
	SELECT_MASK(drive, 0);
	udelay(1);
	if (IDE_CONTROL_REG)
		hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
	hwif->OUTB(speed, IDE_NSECTOR_REG);
	hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
	hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
	if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
	udelay(1);
	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
		unsigned long flags, timeout;
		local_irq_set(flags);
		timeout = jiffies + WAIT_CMD;
		while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
			if (time_after(jiffies, timeout))
				break;
		}
		local_irq_restore(flags);
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}
	// Added by Frank Ting 95/8/24
	// due to BTC loader resume bug
	if (i==10 && (OK_STAT(stat, 0, BUSY_STAT|DRQ_STAT|ERR_STAT))){
		error=0;
		printk("%s: conditional pass\n", drive->name);
	}

	SELECT_MASK(drive, 0);

	enable_irq(hwif->irq);

	if (error) {
		(void) ide_dump_status(drive, "set_drive_speed_status", stat);
		return error;
	}

	drive->id->dma_ultra &= ~0xFF00;
	drive->id->dma_mword &= ~0x0F00;
	drive->id->dma_1word &= ~0x0F00;

#ifdef CONFIG_BLK_DEV_IDEDMA
	if (speed >= XFER_SW_DMA_0)
		hwif->ide_dma_host_on(drive);
	else if (hwif->ide_dma_check)	/* check if host supports DMA */
		hwif->ide_dma_off_quietly(drive);
#endif

	switch(speed) {
		case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
		case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
		case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
		case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
		case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
		case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
		case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
		case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
		case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
		case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
		case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
		case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
		case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
		case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		default: break;
	}
	if (!drive->init_speed)
		drive->init_speed = speed;
	drive->current_speed = speed;
	return error;
}
Ejemplo n.º 15
0
/*
 * Verify that we are doing an approved SETFEATURES_XFER with respect
 * to the hardware being able to support request.  Since some hardware
 * can improperly report capabilties, we check to see if the host adapter
 * in combination with the device (usually a disk) properly detect
 * and acknowledge each end of the ribbon.
 */
int ide_ata66_check (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect > XFER_UDMA_2) &&
            (feature == SETFEATURES_XFER))
    {
        if (!HWIF(drive)->udma_four)
        {
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", HWIF(drive)->name);
            return 1;
        }
#ifndef CONFIG_IDEDMA_IVB
        if ((drive->id->hw_config & 0x6000) == 0)
        {
#else /* !CONFIG_IDEDMA_IVB */
        if (((drive->id->hw_config & 0x2000) == 0) ||
                ((drive->id->hw_config & 0x4000) == 0))
        {
#endif /* CONFIG_IDEDMA_IVB */
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", drive->name);
            return 1;
        }
    }
    return 0;
}

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect >= XFER_SW_DMA_0) &&
            (feature == SETFEATURES_XFER) &&
            (drive->id->dma_ultra ||
             drive->id->dma_mword ||
             drive->id->dma_1word))
        return 1;

    return 0;
}

/*
 *  All hosts that use the 80c ribbon mus use!
 */
byte eighty_ninty_three (ide_drive_t *drive)
{
    return ((byte) ((HWIF(drive)->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
                    (drive->id->hw_config & 0x4000) &&
#endif /* CONFIG_IDEDMA_IVB */
                    (drive->id->hw_config & 0x6000)) ? 1 : 0);
}

/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, byte speed)
{
    ide_hwif_t *hwif = HWIF(drive);
    int	i, error = 1;
    byte stat;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    byte unit = (drive->select.b.unit & 0x01);
    outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    /*
     * Don't use ide_wait_cmd here - it will
     * attempt to set_geometry and recalibrate,
     * but for some reason these don't work at
     * this point (lost interrupt).
     */
    /*
     * Select the drive, and issue the SETFEATURES command
     */
    disable_irq(hwif->irq);	/* disable_irq_nosync ?? */
    udelay(1);
    SELECT_DRIVE(HWIF(drive), drive);
    SELECT_MASK(HWIF(drive), drive, 0);
    udelay(1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
    OUT_BYTE(speed, IDE_NSECTOR_REG);
    OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
    OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
    if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
        OUT_BYTE(drive->ctl, IDE_CONTROL_REG);
    udelay(1);
    /*
     * Wait for drive to become non-BUSY
     */
    if ((stat = GET_STAT()) & BUSY_STAT)
    {
        unsigned long flags, timeout;
        __save_flags(flags);	/* local CPU only */
        ide__sti();		/* local CPU only -- for jiffies */
        timeout = jiffies + WAIT_CMD;
        while ((stat = GET_STAT()) & BUSY_STAT)
        {
            if (0 < (signed long)(jiffies - timeout))
                break;
        }
        __restore_flags(flags); /* local CPU only */
    }

    /*
     * Allow status to settle, then read it again.
     * A few rare drives vastly violate the 400ns spec here,
     * so we'll wait up to 10usec for a "good" status
     * rather than expensively fail things immediately.
     * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
     */
    for (i = 0; i < 10; i++)
    {
        udelay(1);
        if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT))
        {
            error = 0;
            break;
        }
    }

    SELECT_MASK(HWIF(drive), drive, 0);

    enable_irq(hwif->irq);

    if (error)
    {
        (void) ide_dump_status(drive, "set_drive_speed_status", stat);
        return error;
    }

    drive->id->dma_ultra &= ~0xFF00;
    drive->id->dma_mword &= ~0x0F00;
    drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    if (speed > XFER_PIO_4)
    {
        outb(inb(hwif->dma_base+2)|(1<<(5+unit)), hwif->dma_base+2);
    }
    else
    {
        outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
    }
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    switch(speed)
    {
    case XFER_UDMA_7:
        drive->id->dma_ultra |= 0x8080;
        break;
    case XFER_UDMA_6:
        drive->id->dma_ultra |= 0x4040;
        break;
    case XFER_UDMA_5:
        drive->id->dma_ultra |= 0x2020;
        break;
    case XFER_UDMA_4:
        drive->id->dma_ultra |= 0x1010;
        break;
    case XFER_UDMA_3:
        drive->id->dma_ultra |= 0x0808;
        break;
    case XFER_UDMA_2:
        drive->id->dma_ultra |= 0x0404;
        break;
    case XFER_UDMA_1:
        drive->id->dma_ultra |= 0x0202;
        break;
    case XFER_UDMA_0:
        drive->id->dma_ultra |= 0x0101;
        break;
    case XFER_MW_DMA_2:
        drive->id->dma_mword |= 0x0404;
        break;
    case XFER_MW_DMA_1:
        drive->id->dma_mword |= 0x0202;
        break;
    case XFER_MW_DMA_0:
        drive->id->dma_mword |= 0x0101;
        break;
    case XFER_SW_DMA_2:
        drive->id->dma_1word |= 0x0404;
        break;
    case XFER_SW_DMA_1:
        drive->id->dma_1word |= 0x0202;
        break;
    case XFER_SW_DMA_0:
        drive->id->dma_1word |= 0x0101;
        break;
    default:
        break;
    }
    return error;
}