static int aec62xx_irq_timeout (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; switch(dev->device) { case PCI_DEVICE_ID_ARTOP_ATP860: case PCI_DEVICE_ID_ARTOP_ATP860R: case PCI_DEVICE_ID_ARTOP_ATP865: case PCI_DEVICE_ID_ARTOP_ATP865R: printk(" AEC62XX time out "); #if 0 { int i = 0; u8 reg49h = 0; pci_read_config_byte(HWIF(drive)->pci_dev, 0x49, ®49h); for (i=0;i<256;i++) pci_write_config_byte(HWIF(drive)->pci_dev, 0x49, reg49h|0x10); pci_write_config_byte(HWIF(drive)->pci_dev, 0x49, reg49h & ~0x10); } return 0; #endif default: break; } #if 0 { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 tmp1 = 0, tmp2 = 0, mode6 = 0; pci_read_config_byte(dev, 0x44, &tmp1); pci_read_config_byte(dev, 0x45, &tmp2); printk(" AEC6280 r44=%x r45=%x ",tmp1,tmp2); mode6 = HWIF(drive)->INB(((hwif->channel) ? hwif->mate->dma_status : hwif->dma_status)); printk(" AEC6280 133=%x ", (mode6 & 0x10)); } #endif return 0; }
static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 maslave = hwif->channel ? 0x42 : 0x40; u8 speed = ide_rate_filter(slc90e66_ratemask(drive), xferspeed); int sitre = 0, a_speed = 7 << (drive->dn * 4); int u_speed = 0, u_flag = 1 << drive->dn; u16 reg4042, reg44, reg48, reg4a; pci_read_config_word(dev, maslave, ®4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_word(dev, 0x44, ®44); pci_read_config_word(dev, 0x48, ®48); pci_read_config_word(dev, 0x4a, ®4a); switch(speed) { #ifdef CONFIG_BLK_DEV_IDEDMA case XFER_UDMA_4: u_speed = 4 << (drive->dn * 4); break; case XFER_UDMA_3: u_speed = 3 << (drive->dn * 4); break; case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break; case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break; case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break; case XFER_MW_DMA_2: case XFER_MW_DMA_1: case XFER_SW_DMA_2: break; #endif /* CONFIG_BLK_DEV_IDEDMA */ case XFER_PIO_4: case XFER_PIO_3: case XFER_PIO_2: case XFER_PIO_0: break; default: return -1; } if (speed >= XFER_UDMA_0) { if (!(reg48 & u_flag)) pci_write_config_word(dev, 0x48, reg48|u_flag); if ((reg4a & u_speed) != u_speed) { pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); pci_read_config_word(dev, 0x4a, ®4a); pci_write_config_word(dev, 0x4a, reg4a|u_speed); } } else { if (reg48 & u_flag) pci_write_config_word(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); } slc90e66_tune_drive(drive, slc90e66_dma_2_pio(speed)); return (ide_config_drive_speed(drive, speed)); }
/* * Similar to ide_wait_stat(), except it never calls ide_error internally. * This is a kludge to handle the new ide_config_drive_speed() function, * and should not otherwise be used anywhere. Eventually, the tuneproc's * should be updated to return ide_startstop_t, in which case we can get * rid of this abomination again. :) -ml * * It is gone.......... * * const char *msg == consider adding for verbose errors. */ int ide_config_drive_speed (ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = HWIF(drive); int i, error = 1; u8 stat; // while (HWGROUP(drive)->busy) // ide_delay_50ms(); #if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI) hwif->ide_dma_host_off(drive); #endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */ /* * Don't use ide_wait_cmd here - it will * attempt to set_geometry and recalibrate, * but for some reason these don't work at * this point (lost interrupt). */ /* * Select the drive, and issue the SETFEATURES command */ disable_irq_nosync(hwif->irq); udelay(1); SELECT_DRIVE(drive); SELECT_MASK(drive, 0); udelay(1); if (IDE_CONTROL_REG) hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG); hwif->OUTB(speed, IDE_NSECTOR_REG); hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG); hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG); if ((IDE_CONTROL_REG) && (drive->quirk_list == 2)) hwif->OUTB(drive->ctl, IDE_CONTROL_REG); udelay(1); /* * Wait for drive to become non-BUSY */ if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) { unsigned long flags, timeout; local_irq_set(flags); timeout = jiffies + WAIT_CMD; while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) { if (time_after(jiffies, timeout)) break; } local_irq_restore(flags); } /* * Allow status to settle, then read it again. * A few rare drives vastly violate the 400ns spec here, * so we'll wait up to 10usec for a "good" status * rather than expensively fail things immediately. * This fix courtesy of Matthew Faupel & Niccolo Rigacci. */ for (i = 0; i < 10; i++) { udelay(1); if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) { error = 0; break; } } SELECT_MASK(drive, 0); enable_irq(hwif->irq); if (error) { (void) ide_dump_status(drive, "set_drive_speed_status", stat); return error; } drive->id->dma_ultra &= ~0xFF00; drive->id->dma_mword &= ~0x0F00; drive->id->dma_1word &= ~0x0F00; #if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI) if (speed >= XFER_SW_DMA_0) hwif->ide_dma_host_on(drive); else hwif->ide_dma_off_quietly(drive); #endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */ switch(speed) { case XFER_UDMA_7: drive->id->dma_ultra |= 0x8080; break; case XFER_UDMA_6: drive->id->dma_ultra |= 0x4040; break; case XFER_UDMA_5: drive->id->dma_ultra |= 0x2020; break; case XFER_UDMA_4: drive->id->dma_ultra |= 0x1010; break; case XFER_UDMA_3: drive->id->dma_ultra |= 0x0808; break; case XFER_UDMA_2: drive->id->dma_ultra |= 0x0404; break; case XFER_UDMA_1: drive->id->dma_ultra |= 0x0202; break; case XFER_UDMA_0: drive->id->dma_ultra |= 0x0101; break; case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break; case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break; case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break; case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break; case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break; case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break; default: break; } if (!drive->init_speed) drive->init_speed = speed; drive->current_speed = speed; return error; }
/* * Some localbus EIDE interfaces require a special access sequence * when using 32-bit I/O instructions to transfer data. We call this * the "vlb_sync" sequence, which consists of three successive reads * of the sector count register location, with interrupts disabled * to ensure that the reads all happen together. */ void ata_vlb_sync (ide_drive_t *drive, ide_ioreg_t port) { (void) HWIF(drive)->INB(port); (void) HWIF(drive)->INB(port); (void) HWIF(drive)->INB(port); }
void SELECT_MASK (ide_drive_t *drive, int mask) { if (HWIF(drive)->maskproc) HWIF(drive)->maskproc(drive, mask); }
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) { unsigned int unit; unsigned long flags; ide_hwif_t *hwif; ide_hwgroup_t *hwgroup; spin_lock_irqsave(&io_request_lock, flags); hwgroup = HWGROUP(drive); hwif = HWIF(drive); /* We must not reset with running handlers */ if(hwgroup->handler != NULL) BUG(); /* For an ATAPI device, first try an ATAPI SRST. */ if (drive->media != ide_disk && !do_not_try_atapi) { pre_reset(drive); SELECT_DRIVE(drive); udelay (20); hwif->OUTB(WIN_SRST, IDE_COMMAND_REG); hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); spin_unlock_irqrestore(&io_request_lock, flags); return ide_started; } /* * First, reset any device state data we were maintaining * for any of the drives on this interface. */ for (unit = 0; unit < MAX_DRIVES; ++unit) pre_reset(&hwif->drives[unit]); #if OK_TO_RESET_CONTROLLER if (!IDE_CONTROL_REG) { spin_unlock_irqrestore(&io_request_lock, flags); return ide_stopped; } /* * Note that we also set nIEN while resetting the device, * to mask unwanted interrupts from the interface during the reset. * However, due to the design of PC hardware, this will cause an * immediate interrupt due to the edge transition it produces. * This single interrupt gives us a "fast poll" for drives that * recover from reset very quickly, saving us the first 50ms wait time. */ /* set SRST and nIEN */ hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG); /* more than enough time */ udelay(10); if (drive->quirk_list == 2) { /* clear SRST and nIEN */ hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG); } else { /* clear SRST, leave nIEN */ hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG); } /* more than enough time */ udelay(10); hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); /* * Some weird controller like resetting themselves to a strange * state when the disks are reset this way. At least, the Winbond * 553 documentation says that */ if (hwif->resetproc != NULL) { hwif->resetproc(drive); } #endif /* OK_TO_RESET_CONTROLLER */ spin_unlock_irqrestore(&io_request_lock, flags); return ide_started; }
static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = HWIF(drive); u8 adj = (drive->dn & 1) ? 0x08 : 0x00; int err; speed = ide_rate_filter(pdcnew_ratemask(drive), speed); /* * Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will * automatically set the timing registers based on 100 MHz PLL output. */ err = ide_config_drive_speed(drive, speed); /* * As we set up the PLL to output 133 MHz for UltraDMA/133 capable * chips, we must override the default register settings... */ if (max_dma_rate(hwif->pci_dev) == 4) { u8 mode = speed & 0x07; switch (speed) { case XFER_UDMA_6: case XFER_UDMA_5: case XFER_UDMA_4: case XFER_UDMA_3: case XFER_UDMA_2: case XFER_UDMA_1: case XFER_UDMA_0: set_indexed_reg(hwif, 0x10 + adj, udma_timings[mode].reg10); set_indexed_reg(hwif, 0x11 + adj, udma_timings[mode].reg11); set_indexed_reg(hwif, 0x12 + adj, udma_timings[mode].reg12); break; case XFER_MW_DMA_2: case XFER_MW_DMA_1: case XFER_MW_DMA_0: set_indexed_reg(hwif, 0x0e + adj, mwdma_timings[mode].reg0e); set_indexed_reg(hwif, 0x0f + adj, mwdma_timings[mode].reg0f); break; case XFER_PIO_4: case XFER_PIO_3: case XFER_PIO_2: case XFER_PIO_1: case XFER_PIO_0: set_indexed_reg(hwif, 0x0c + adj, pio_timings[mode].reg0c); set_indexed_reg(hwif, 0x0d + adj, pio_timings[mode].reg0d); set_indexed_reg(hwif, 0x13 + adj, pio_timings[mode].reg13); break; default: printk(KERN_ERR "pdc202xx_new: " "Unknown speed %d ignored\n", speed); } } else if (speed == XFER_UDMA_2) { /* Set tHOLD bit to 0 if using UDMA mode 2 */ u8 tmp = get_indexed_reg(hwif, 0x10 + adj); set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f); } return err; }
static int scc_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); void __iomem *dma_base = (void __iomem *)hwif->dma_base; unsigned long intsts_port = hwif->dma_base + 0x014; u32 reg; int dma_stat, data_loss = 0; static int retry = 0; /* errata A308 workaround: Step5 (check data loss) */ /* We don't check non ide_disk because it is limited to UDMA4 */ if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr) & ERR_STAT) && drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { reg = in_be32((void __iomem *)intsts_port); if (!(reg & INTSTS_ACTEINT)) { printk(KERN_WARNING "%s: operation failed (transfer data loss)\n", drive->name); data_loss = 1; if (retry++) { struct request *rq = HWGROUP(drive)->rq; int unit; /* ERROR_RESET and drive->crc_count are needed * to reduce DMA transfer mode in retry process. */ if (rq) rq->errors |= ERROR_RESET; for (unit = 0; unit < MAX_DRIVES; unit++) { ide_drive_t *drive = &hwif->drives[unit]; drive->crc_count++; } } } } while (1) { reg = in_be32((void __iomem *)intsts_port); if (reg & INTSTS_SERROR) { printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); continue; } if (reg & INTSTS_PRERR) { u32 maea0, maec0; unsigned long ctl_base = hwif->config_data; maea0 = in_be32((void __iomem *)(ctl_base + 0xF50)); maec0 = in_be32((void __iomem *)(ctl_base + 0xF54)); printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0); out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); continue; } if (reg & INTSTS_RERR) { printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); continue; } if (reg & INTSTS_ICERR) { out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); continue; } if (reg & INTSTS_BMSINT) { printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME); out_be32((void __iomem *)intsts_port, INTSTS_BMSINT); ide_do_reset(drive); continue; } if (reg & INTSTS_BMHE) { out_be32((void __iomem *)intsts_port, INTSTS_BMHE); continue; } if (reg & INTSTS_ACTEINT) { out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT); continue; } if (reg & INTSTS_IOIRQS) { out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS); continue; } break; } dma_stat = __scc_dma_end(drive); if (data_loss) dma_stat |= 2; /* emulate DMA error (to retry command) */ return dma_stat; }
static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 maslave = hwif->channel ? 0x42 : 0x40; u8 speed = ide_rate_filter(piix_ratemask(drive), xferspeed); int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; int w_flag = 0x10 << drive->dn; int u_speed = 0; int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; pci_read_config_word(dev, maslave, ®4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_byte(dev, 0x48, ®48); pci_read_config_word(dev, 0x4a, ®4a); pci_read_config_byte(dev, 0x54, ®54); pci_read_config_byte(dev, 0x55, ®55); switch(speed) { case XFER_UDMA_4: case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break; case XFER_UDMA_5: case XFER_UDMA_3: case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break; case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break; case XFER_MW_DMA_2: case XFER_MW_DMA_1: case XFER_SW_DMA_2: break; case XFER_PIO_4: case XFER_PIO_3: case XFER_PIO_2: case XFER_PIO_0: break; default: return -1; } if (speed >= XFER_UDMA_0) { if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); } else { pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } if ((reg4a & a_speed) != u_speed) pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); if (speed > XFER_UDMA_2) { if (!(reg54 & v_flag)) pci_write_config_byte(dev, 0x54, reg54 | v_flag); } else pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (reg54 & v_flag) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); if (reg55 & w_flag) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } piix_tune_drive(drive, piix_dma_2_pio(speed)); return (ide_config_drive_speed(drive, speed)); }
/** * do_drive_get_GTF - get the drive bootup default taskfile settings * @drive: the drive for which the taskfile settings should be retrieved * @gtf_length: number of bytes of _GTF data returned at @gtf_address * @gtf_address: buffer containing _GTF taskfile arrays * * The _GTF method has no input parameters. * It returns a variable number of register set values (registers * hex 1F1..1F7, taskfiles). * The <variable number> is not known in advance, so have ACPI-CA * allocate the buffer as needed and return it, then free it later. * * The returned @gtf_length and @gtf_address are only valid if the * function return value is 0. */ static int do_drive_get_GTF(ide_drive_t *drive, unsigned int *gtf_length, unsigned long *gtf_address, unsigned long *obj_loc) { acpi_status status; struct acpi_buffer output; union acpi_object *out_obj; ide_hwif_t *hwif = HWIF(drive); struct device *dev = hwif->gendev.parent; int err = -ENODEV; int port; *gtf_length = 0; *gtf_address = 0UL; *obj_loc = 0UL; if (ide_noacpi) return 0; if (!dev) { DEBPRINT("no PCI device for %s\n", hwif->name); goto out; } if (!hwif->acpidata) { DEBPRINT("no ACPI data for %s\n", hwif->name); goto out; } port = hwif->channel ? drive->dn - 2: drive->dn; DEBPRINT("ENTER: %s at %s, port#: %d, hard_port#: %d\n", hwif->name, dev->bus_id, port, hwif->channel); if (!drive->present) { DEBPRINT("%s drive %d:%d not present\n", hwif->name, hwif->channel, port); goto out; } /* Get this drive's _ADR info. if not already known. */ if (!drive->acpidata->obj_handle) { drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive); if (!drive->acpidata->obj_handle) { DEBPRINT("No ACPI object found for %s\n", drive->name); goto out; } } /* Setting up output buffer */ output.length = ACPI_ALLOCATE_BUFFER; output.pointer = NULL; /* ACPI-CA sets this; save/free it later */ /* _GTF has no input parameters */ err = -EIO; status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF", NULL, &output); if (ACPI_FAILURE(status)) { printk(KERN_DEBUG "%s: Run _GTF error: status = 0x%x\n", __FUNCTION__, status); goto out; } if (!output.length || !output.pointer) { DEBPRINT("Run _GTF: " "length or ptr is NULL (0x%llx, 0x%p)\n", (unsigned long long)output.length, output.pointer); goto out; } out_obj = output.pointer; if (out_obj->type != ACPI_TYPE_BUFFER) { DEBPRINT("Run _GTF: error: " "expected object type of ACPI_TYPE_BUFFER, " "got 0x%x\n", out_obj->type); err = -ENOENT; kfree(output.pointer); goto out; } if (!out_obj->buffer.length || !out_obj->buffer.pointer || out_obj->buffer.length % REGS_PER_GTF) { printk(KERN_ERR "%s: unexpected GTF length (%d) or addr (0x%p)\n", __FUNCTION__, out_obj->buffer.length, out_obj->buffer.pointer); err = -ENOENT; kfree(output.pointer); goto out; } *gtf_length = out_obj->buffer.length; *gtf_address = (unsigned long)out_obj->buffer.pointer; *obj_loc = (unsigned long)out_obj; DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n", *gtf_length, *gtf_address, *obj_loc); err = 0; out: return err; }
static inline void do_identify (ide_drive_t *drive, u8 cmd) { ide_hwif_t *hwif = HWIF(drive); int bswap = 1; struct hd_driveid *id; id = drive->id; /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); drive->id_read = 1; local_irq_enable(); #ifdef DEBUG printk(KERN_INFO "%s: dumping identify data\n", drive->name); ide_dump_identify((u8 *)id); #endif ide_fix_driveid(id); /* * WIN_IDENTIFY returns little-endian info, * WIN_PIDENTIFY *usually* returns little-endian info. */ if (cmd == WIN_PIDENTIFY) { if ((id->model[0] == 'N' && id->model[1] == 'E') /* NEC */ || (id->model[0] == 'F' && id->model[1] == 'X') /* Mitsumi */ || (id->model[0] == 'P' && id->model[1] == 'i'))/* Pioneer */ /* Vertos drives may still be weird */ bswap ^= 1; } ide_fixstring(id->model, sizeof(id->model), bswap); ide_fixstring(id->fw_rev, sizeof(id->fw_rev), bswap); ide_fixstring(id->serial_no, sizeof(id->serial_no), bswap); /* we depend on this a lot! */ id->model[sizeof(id->model)-1] = '\0'; if (strstr(id->model, "E X A B Y T E N E S T")) goto err_misc; printk(KERN_INFO "%s: %s, ", drive->name, id->model); drive->present = 1; drive->dead = 0; /* * Check for an ATAPI device */ if (cmd == WIN_PIDENTIFY) { u8 type = (id->config >> 8) & 0x1f; printk(KERN_CONT "ATAPI "); switch (type) { case ide_floppy: if (!strstr(id->model, "CD-ROM")) { if (!strstr(id->model, "oppy") && !strstr(id->model, "poyp") && !strstr(id->model, "ZIP")) printk(KERN_CONT "cdrom or floppy?, assuming "); if (drive->media != ide_cdrom) { printk(KERN_CONT "FLOPPY"); drive->removable = 1; break; } } /* Early cdrom models used zero */ type = ide_cdrom; case ide_cdrom: drive->removable = 1; #ifdef CONFIG_PPC /* kludge for Apple PowerBook internal zip */ if (!strstr(id->model, "CD-ROM") && strstr(id->model, "ZIP")) { printk(KERN_CONT "FLOPPY"); type = ide_floppy; break; } #endif printk(KERN_CONT "CD/DVD-ROM"); break; case ide_tape: printk(KERN_CONT "TAPE"); break; case ide_optical: printk(KERN_CONT "OPTICAL"); drive->removable = 1; break; default: printk(KERN_CONT "UNKNOWN (type %d)", type); break; } printk(KERN_CONT " drive\n"); drive->media = type; /* an ATAPI device ignores DRDY */ drive->ready_stat = 0; return; }
/* * ps2_ide_build_dmatable() prepares a dma request. * Returns 0 if all went okay, returns 1 otherwise. */ static int ps2_ide_build_dmatable(int rw, ide_drive_t *drive) { struct request *rq = HWGROUP(drive)->rq; struct buffer_head *bh = rq->bh; unsigned int size, addr; unsigned int count = 0, totalsize = 0; struct ps2_dmatable *t = (struct ps2_dmatable *)HWIF(drive)->dmatable; struct ata_dma_request *req = &t->ata_dma_request; unsigned int iopaddr = t->ata_iop_buffer; #ifdef GATHER_WRITE_DATA unsigned char *dma_buffer = t->dma_buffer; #endif DPRINT("nr_sectors %ld\n", rq->nr_sectors); do { /* * Determine addr and size of next buffer area. We assume that * individual virtual buffers are always composed linearly in * physical memory. For example, we assume that any 8kB buffer * is always composed of two adjacent physical 4kB pages rather * than two possibly non-adjacent physical 4kB pages. */ if (bh == NULL) { /* paging requests have (rq->bh == NULL) */ addr = virt_to_bus (rq->buffer); size = rq->nr_sectors << 9; } else { /* group sequential buffers into one large buffer */ addr = virt_to_bus (bh->b_data); size = bh->b_size; while ((bh = bh->b_reqnext) != NULL) { if ((addr + size) != virt_to_bus (bh->b_data)) break; size += bh->b_size; } } /* * Fill in the dma table. * EE requires 128-bit alignment of all blocks, */ if ((addr & 0x0f)) { printk("%s: misaligned DMA buffer\n", drive->name); return 0; } if (count >= ATA_MAX_ENTRIES) { printk("%s: DMA table too small\n", drive->name); return 0; /* revert to PIO for this request */ } DPRINT("ps2_ide_build_dmatable: %08x->%08x %d\n", addr, iopaddr, size); #if !defined(GATHER_WRITE_DATA) if (rw) { /* write */ req->sdd[count].data = addr; req->sdd[count].addr = iopaddr; } else { /* read */ req->sdd[count].data = iopaddr; req->sdd[count].addr = addr; } req->sdd[count].size = size; req->sdd[count].mode = 0; #else if (rw) { memcpy(dma_buffer, (void *)bus_to_virt(addr), size); dma_buffer += size; } else { req->sdd[count].data = iopaddr; req->sdd[count].addr = addr; req->sdd[count].size = size; req->sdd[count].mode = 0; } #endif iopaddr += size; totalsize += size; count++; } while (bh != NULL); if (totalsize > ATA_BUFFER_SIZE) { printk("%s: DMA buffer too small\n", drive->name); return 0; /* revert to PIO for this request */ } req->count = count; req->size = totalsize; if (!count) printk("%s: empty DMA table?\n", drive->name); return count; }
static int ps2_ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); struct ps2_dmatable *t = (struct ps2_dmatable *)hwif->dmatable; struct ata_dma_request *req = &t->ata_dma_request; int ret; #if !defined(GATHER_WRITE_DATA) ps2sif_dmadata_t *sdd; int cnt, i; #endif /* !GATHER_WRITE_DATA */ DPRINT("ps2_ide_dmaproc: %s\n", procfunc[func]); switch (func) { case ide_dma_off: printk("%s: DMA disabled\n", drive->name); case ide_dma_off_quietly: case ide_dma_on: drive->using_dma = (func == ide_dma_on); return 0; case ide_dma_check: /* only ide-disk DMA works... */ drive->using_dma = hwif->autodma && drive->media == ide_disk; /* TODO: always UltraDMA mode 4 */ if (drive->using_dma) { int ide_config_drive_speed (ide_drive_t *drive, byte speed); ide_config_drive_speed(drive, XFER_UDMA_4); } return 0; case ide_dma_read: #ifdef NO_DMA_READ return 1; #endif if (drive->media != ide_disk) return 0; if (!ps2_ide_build_dmatable(0, drive)) return 1; /* try PIO instead of DMA */ req->command = WIN_READDMA; req->devctrl = drive->ctl; drive->waiting_for_dma = 1; ide_set_handler(drive, &ide_dma_intr, WAIT_CMD); /* set nIEN for disable ATA interrupt */ /* (ATA interrupt is enabled in RPC handler) */ OUT_BYTE(drive->ctl|2, hwif->io_ports[IDE_CONTROL_OFFSET]); flush_cache_all(); do { ret = ps2sif_callrpc(&t->cd_ata, SIFNUM_DmaRead, SIF_RPCM_NOWAIT, (void *)req, sizeof(int) * 4 + sizeof(ps2sif_dmadata_t) * req->count, NULL, 0, NULL, NULL); switch (ret) { case 0: break; case -SIF_RPCE_SENDP: break; default: /* restore nIEN */ OUT_BYTE(drive->ctl, hwif->io_ports[IDE_CONTROL_OFFSET]); printk("ps2_ide_dmaproc(read): callrpc failed, result=%d\n", ret); drive->waiting_for_dma = 0; return 1; } } while (ret < 0); return 0; case ide_dma_write: #ifdef NO_DMA_WRITE return 1; #endif if (drive->media != ide_disk) return 0; if (!ps2_ide_build_dmatable(1, drive)) return 1; /* try PIO instead of DMA */ req->command = WIN_WRITEDMA; drive->waiting_for_dma = 1; ide_set_handler(drive, &ide_dma_intr, WAIT_CMD); flush_cache_all(); #if !defined(GATHER_WRITE_DATA) sdd = req->sdd; for (cnt = 0; cnt < req->count; cnt++) { while (ps2sif_setdma(sdd, 1) == 0) { i = 0x010000; while (i--) ; } sdd++; } #else /* GATHER_WRITE_DATA */ req->sdd[0].data = t->dma_buffer; req->sdd[0].addr = t->ata_iop_buffer; req->sdd[0].size = req->size; req->sdd[0].mode = 0; while (ps2sif_setdma(req->sdd, 1) == 0) { i = 0x010000; while (i--) ; } #endif do { ret = ps2sif_callrpc(&t->cd_ata, SIFNUM_DmaWrite, SIF_RPCM_NOWAIT, (void *)req, sizeof(int) * 4, NULL, 0, NULL, NULL); switch (ret) { case 0: break; case -SIF_RPCE_SENDP: break; default: printk("ps2_ide_dmaproc(write): callrpc failed, result=%d\n", ret); drive->waiting_for_dma = 0; return 1; } } while (ret < 0); return 0; case ide_dma_begin: /* TODO */ return 0; case ide_dma_end: /* returns 1 on error, 0 otherwise */ /* disable DMA transfer */ *SPD_R_XFR_CTRL = 0; *SPD_R_IF_CTRL = *SPD_R_IF_CTRL & ~IFC_DMA_EN; /* force break DMA */ if (!(*SPD_R_INTR_STAT & 0x0001)) { unsigned char if_ctrl; if_ctrl = *SPD_R_IF_CTRL; *SPD_R_IF_CTRL = IFC_ATA_RST; udelay(100); *SPD_R_IF_CTRL = if_ctrl; do { ret = ps2sif_callrpc(&t->cd_ata_end, 0, SIF_RPCM_NOWAIT, NULL, 0, NULL, 0, NULL, NULL); switch (ret) { case 0: break; case -SIF_RPCE_SENDP: break; default: printk("ps2_ide_dmaproc(end): callrpc failed, result=%d\n", ret); break; } } while (ret == -SIF_RPCE_SENDP); } drive->waiting_for_dma = 0; return 0; case ide_dma_test_irq: /* returns 1 if dma irq issued, 0 otherwise */ return (*SPD_R_INTR_STAT & 0x0001) ? 1 : 0; default: printk("ps2_ide_dmaproc: unsupported func: %d\n", func); return 1; } }
static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed) { u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 }; u16 dma[] = { 0x2208, 0x10C2, 0x10C1 }; ide_hwif_t *hwif = HWIF(drive); u16 ultra = 0, multi = 0; u8 mode = 0, unit = drive->select.b.unit; u8 speed = ide_rate_filter(siimage_ratemask(drive), xferspeed); unsigned long base = (unsigned long)hwif->hwif_data; u8 scsc = 0, addr_mask = ((hwif->channel) ? ((hwif->mmio) ? 0xF4 : 0x84) : ((hwif->mmio) ? 0xB4 : 0x80)); unsigned long ma = siimage_seldev(drive, 0x08); unsigned long ua = siimage_seldev(drive, 0x0C); if (hwif->mmio) { scsc = hwif->INB(base + 0x4A); mode = hwif->INB(base + addr_mask); multi = hwif->INW(ma); ultra = hwif->INW(ua); } else { pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc); pci_read_config_byte(hwif->pci_dev, addr_mask, &mode); pci_read_config_word(hwif->pci_dev, ma, &multi); pci_read_config_word(hwif->pci_dev, ua, &ultra); } mode &= ~((unit) ? 0x30 : 0x03); ultra &= ~0x3F; scsc = ((scsc & 0x30) == 0x00) ? 0 : 1; scsc = is_sata(hwif) ? 1 : scsc; switch(speed) { case XFER_PIO_4: case XFER_PIO_3: case XFER_PIO_2: case XFER_PIO_1: case XFER_PIO_0: siimage_tuneproc(drive, (speed - XFER_PIO_0)); mode |= ((unit) ? 0x10 : 0x01); break; case XFER_MW_DMA_2: case XFER_MW_DMA_1: case XFER_MW_DMA_0: multi = dma[speed - XFER_MW_DMA_0]; mode |= ((unit) ? 0x20 : 0x02); config_siimage_chipset_for_pio(drive, 0); break; case XFER_UDMA_6: case XFER_UDMA_5: case XFER_UDMA_4: case XFER_UDMA_3: case XFER_UDMA_2: case XFER_UDMA_1: case XFER_UDMA_0: multi = dma[2]; ultra |= ((scsc) ? (ultra6[speed - XFER_UDMA_0]) : (ultra5[speed - XFER_UDMA_0])); mode |= ((unit) ? 0x30 : 0x03); config_siimage_chipset_for_pio(drive, 0); break; default: return 1; } if (hwif->mmio) { hwif->OUTB(mode, base + addr_mask); hwif->OUTW(multi, ma); hwif->OUTW(ultra, ua); } else { pci_write_config_byte(hwif->pci_dev, addr_mask, mode); pci_write_config_word(hwif->pci_dev, ma, multi); pci_write_config_word(hwif->pci_dev, ua, ultra); } return (ide_config_drive_speed(drive, speed)); }
static void idescsi_discard_data (ide_drive_t *drive, unsigned int bcount) { while (bcount--) (void) HWIF(drive)->INB(IDE_DATA_REG); }
/* returns 1 if dma irq issued, 0 otherwise */ static int sgiioc4_ide_dma_test_irq(ide_drive_t * drive) { return sgiioc4_checkirq(HWIF(drive)); }
static void idescsi_output_zeros (ide_drive_t *drive, unsigned int bcount) { while (bcount--) HWIF(drive)->OUTB(0, IDE_DATA_REG); }
/* Creates the scatter gather list, DMA Table */ static unsigned int sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir) { ide_hwif_t *hwif = HWIF(drive); unsigned int *table = hwif->dmatable_cpu; unsigned int count = 0, i = 1; struct scatterlist *sg; hwif->sg_nents = i = ide_build_sglist(drive, rq); if (!i) return 0; /* sglist of length Zero */ sg = hwif->sg_table; while (i && sg_dma_len(sg)) { dma_addr_t cur_addr; int cur_len; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); while (cur_len) { if (count++ >= IOC4_PRD_ENTRIES) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); goto use_pio_instead; } else { u32 bcount = 0x10000 - (cur_addr & 0xffff); if (bcount > cur_len) bcount = cur_len; /* put the addr, length in * the IOC4 dma-table format */ *table = 0x0; table++; *table = cpu_to_be32(cur_addr); table++; *table = 0x0; table++; *table = cpu_to_be32(bcount); table++; cur_addr += bcount; cur_len -= bcount; } } sg++; i--; } if (count) { table--; *table |= cpu_to_be32(0x80000000); return count; } use_pio_instead: pci_unmap_sg(hwif->pci_dev, hwif->sg_table, hwif->sg_nents, hwif->sg_dma_direction); return 0; /* revert to PIO for this request */ }
static inline void do_identify (ide_drive_t *drive, u8 cmd) { ide_hwif_t *hwif = HWIF(drive); int bswap = 1; struct hd_driveid *id; /* called with interrupts disabled! */ id = drive->id; /* read 512 bytes of id info */ hwif->ata_input_data(drive, id, SECTOR_WORDS); drive->id_read = 1; local_irq_enable(); ide_fix_driveid(id); if (!drive->forced_lun) drive->last_lun = id->last_lun & 0x7; #if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA) /* * EATA SCSI controllers do a hardware ATA emulation: * Ignore them if there is a driver for them available. */ if ((id->model[0] == 'P' && id->model[1] == 'M') || (id->model[0] == 'S' && id->model[1] == 'K')) { printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model); goto err_misc; } #endif /* CONFIG_SCSI_EATA_DMA || CONFIG_SCSI_EATA_PIO */ /* * WIN_IDENTIFY returns little-endian info, * WIN_PIDENTIFY *usually* returns little-endian info. */ if (cmd == WIN_PIDENTIFY) { if ((id->model[0] == 'N' && id->model[1] == 'E') /* NEC */ || (id->model[0] == 'F' && id->model[1] == 'X') /* Mitsumi */ || (id->model[0] == 'P' && id->model[1] == 'i'))/* Pioneer */ /* Vertos drives may still be weird */ bswap ^= 1; } ide_fixstring(id->model, sizeof(id->model), bswap); ide_fixstring(id->fw_rev, sizeof(id->fw_rev), bswap); ide_fixstring(id->serial_no, sizeof(id->serial_no), bswap); if (strstr(id->model, "E X A B Y T E N E S T")) goto err_misc; /* we depend on this a lot! */ id->model[sizeof(id->model)-1] = '\0'; printk("%s: %s, ", drive->name, id->model); drive->present = 1; drive->dead = 0; /* * Check for an ATAPI device */ if (cmd == WIN_PIDENTIFY) { u8 type = (id->config >> 8) & 0x1f; printk("ATAPI "); #ifdef CONFIG_BLK_DEV_PDC4030 if (hwif->channel == 1 && hwif->chipset == ide_pdc4030) { printk(" -- not supported on 2nd Promise port\n"); goto err_misc; } #endif /* CONFIG_BLK_DEV_PDC4030 */ switch (type) { case ide_floppy: if (!strstr(id->model, "CD-ROM")) { if (!strstr(id->model, "oppy") && !strstr(id->model, "poyp") && !strstr(id->model, "ZIP")) printk("cdrom or floppy?, assuming "); if (drive->media != ide_cdrom) { printk ("FLOPPY"); drive->removable = 1; break; } } /* Early cdrom models used zero */ type = ide_cdrom; case ide_cdrom: drive->removable = 1; #ifdef CONFIG_PPC /* kludge for Apple PowerBook internal zip */ if (!strstr(id->model, "CD-ROM") && strstr(id->model, "ZIP")) { printk ("FLOPPY"); type = ide_floppy; break; } #endif printk ("CD/DVD-ROM"); break; case ide_tape: printk ("TAPE"); break; case ide_optical: printk ("OPTICAL"); drive->removable = 1; break; default: printk("UNKNOWN (type %d)", type); break; } printk (" drive\n"); drive->media = type; return; }
static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; int w_flag = 0x10 << drive->dn; int u_speed = 0; int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; pci_read_config_word(dev, maslave, ®4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_byte(dev, 0x48, ®48); pci_read_config_word(dev, 0x4a, ®4a); pci_read_config_byte(dev, 0x54, ®54); pci_read_config_byte(dev, 0x55, ®55); if (speed >= XFER_UDMA_0) { u8 udma = speed - XFER_UDMA_0; u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); } else { pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } if ((reg4a & a_speed) != u_speed) pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); if (speed > XFER_UDMA_2) { if (!(reg54 & v_flag)) pci_write_config_byte(dev, 0x54, reg54 | v_flag); } else pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; u8 pio; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (reg54 & v_flag) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); if (reg55 & w_flag) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; else pio = 2; /* only SWDMA2 is allowed */ piix_set_pio_mode(drive, pio); } }
/* Main tune procedure, called from tuneproc. */ static void opti621_tune_drive (ide_drive_t *drive, u8 pio) { /* primary and secondary drives share some registers, * so we have to program both drives */ unsigned long flags; u8 pio1 = 0, pio2 = 0; pio_clocks_t first, second; int ax, drdy; u8 cycle1, cycle2, misc; ide_hwif_t *hwif = HWIF(drive); /* sets drive->drive_data for both drives */ compute_pios(drive, pio); pio1 = hwif->drives[0].drive_data; pio2 = hwif->drives[1].drive_data; compute_clocks(pio1, &first); compute_clocks(pio2, &second); /* ax = max(a1,a2) */ ax = (first.address_time < second.address_time) ? second.address_time : first.address_time; drdy = 2; /* DRDY is default 2 (by OPTi Databook) */ cycle1 = ((first.data_time-1)<<4) | (first.recovery_time-2); cycle2 = ((second.data_time-1)<<4) | (second.recovery_time-2); misc = READ_PREFETCH | ((ax-1)<<4) | ((drdy-2)<<1); #ifdef OPTI621_DEBUG printk("%s: master: address: %d, data: %d, " "recovery: %d, drdy: %d [clk]\n", hwif->name, ax, first.data_time, first.recovery_time, drdy); printk("%s: slave: address: %d, data: %d, " "recovery: %d, drdy: %d [clk]\n", hwif->name, ax, second.data_time, second.recovery_time, drdy); #endif spin_lock_irqsave(&ide_lock, flags); reg_base = hwif->io_ports[IDE_DATA_OFFSET]; /* allow Register-B */ hwif->OUTB(0xc0, reg_base+CNTRL_REG); /* hmm, setupvic.exe does this ;-) */ hwif->OUTB(0xff, reg_base+5); /* if reads 0xff, adapter not exist? */ (void) hwif->INB(reg_base+CNTRL_REG); /* if reads 0xc0, no interface exist? */ read_reg(hwif, CNTRL_REG); /* read version, probably 0 */ read_reg(hwif, STRAP_REG); /* program primary drive */ /* select Index-0 for Register-A */ write_reg(hwif, 0, MISC_REG); /* set read cycle timings */ write_reg(hwif, cycle1, READ_REG); /* set write cycle timings */ write_reg(hwif, cycle1, WRITE_REG); /* program secondary drive */ /* select Index-1 for Register-B */ write_reg(hwif, 1, MISC_REG); /* set read cycle timings */ write_reg(hwif, cycle2, READ_REG); /* set write cycle timings */ write_reg(hwif, cycle2, WRITE_REG); /* use Register-A for drive 0 */ /* use Register-B for drive 1 */ write_reg(hwif, 0x85, CNTRL_REG); /* set address setup, DRDY timings, */ /* and read prefetch for both drives */ write_reg(hwif, misc, MISC_REG); spin_unlock_irqrestore(&ide_lock, flags); }
/* * This routine writes the prepared setup/active/recovery counts * for a drive into the cmd646 chipset registers to active them. */ static void program_drive_counts (ide_drive_t *drive, int setup_count, int active_count, int recovery_count) { unsigned long flags; struct pci_dev *dev = HWIF(drive)->pci_dev; ide_drive_t *drives = HWIF(drive)->drives; u8 temp_b; static const u8 setup_counts[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0}; static const u8 recovery_counts[] = {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; static const u8 arttim_regs[2][2] = { { ARTTIM0, ARTTIM1 }, { ARTTIM23, ARTTIM23 } }; static const u8 drwtim_regs[2][2] = { { DRWTIM0, DRWTIM1 }, { DRWTIM2, DRWTIM3 } }; int channel = (int) HWIF(drive)->channel; int slave = (drives != drive); /* Is this really the best way to determine this?? */ cmdprintk("program_drive_count parameters = s(%d),a(%d),r(%d),p(%d)\n", setup_count, active_count, recovery_count, drive->present); /* * Set up address setup count registers. * Primary interface has individual count/timing registers for * each drive. Secondary interface has one common set of registers, * for address setup so we merge these timings, using the slowest * value. */ if (channel) { drive->drive_data = setup_count; setup_count = IDE_MAX(drives[0].drive_data, drives[1].drive_data); cmdprintk("Secondary interface, setup_count = %d\n", setup_count); } /* * Convert values to internal chipset representation */ setup_count = (setup_count > 5) ? 0xc0 : (int) setup_counts[setup_count]; active_count &= 0xf; /* Remember, max value is 16 */ recovery_count = (int) recovery_counts[recovery_count]; cmdprintk("Final values = %d,%d,%d\n", setup_count, active_count, recovery_count); /* * Now that everything is ready, program the new timings */ local_irq_save(flags); /* * Program the address_setup clocks into ARTTIM reg, * and then the active/recovery counts into the DRWTIM reg */ (void) pci_read_config_byte(dev, arttim_regs[channel][slave], &temp_b); (void) pci_write_config_byte(dev, arttim_regs[channel][slave], ((u8) setup_count) | (temp_b & 0x3f)); (void) pci_write_config_byte(dev, drwtim_regs[channel][slave], (u8) ((active_count << 4) | recovery_count)); cmdprintk ("Write %x to %x\n", ((u8) setup_count) | (temp_b & 0x3f), arttim_regs[channel][slave]); cmdprintk ("Write %x to %x\n", (u8) ((active_count << 4) | recovery_count), drwtim_regs[channel][slave]); local_irq_restore(flags); }
void SELECT_DRIVE (ide_drive_t *drive) { if (HWIF(drive)->selectproc) HWIF(drive)->selectproc(drive); HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG); }
static int cmd64x_tune_chipset (ide_drive_t *drive, u8 xferspeed) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; u8 unit = (drive->select.b.unit & 0x01); u8 regU = 0, pciU = (hwif->channel) ? UDIDETCR1 : UDIDETCR0; u8 regD = 0, pciD = (hwif->channel) ? BMIDESR1 : BMIDESR0; u8 speed = ide_rate_filter(cmd64x_ratemask(drive), xferspeed); if (speed > XFER_PIO_4) { (void) pci_read_config_byte(dev, pciD, ®D); (void) pci_read_config_byte(dev, pciU, ®U); regD &= ~(unit ? 0x40 : 0x20); regU &= ~(unit ? 0xCA : 0x35); (void) pci_write_config_byte(dev, pciD, regD); (void) pci_write_config_byte(dev, pciU, regU); (void) pci_read_config_byte(dev, pciD, ®D); (void) pci_read_config_byte(dev, pciU, ®U); } switch(speed) { case XFER_UDMA_5: regU |= (unit ? 0x0A : 0x05); break; case XFER_UDMA_4: regU |= (unit ? 0x4A : 0x15); break; case XFER_UDMA_3: regU |= (unit ? 0x8A : 0x25); break; case XFER_UDMA_2: regU |= (unit ? 0x42 : 0x11); break; case XFER_UDMA_1: regU |= (unit ? 0x82 : 0x21); break; case XFER_UDMA_0: regU |= (unit ? 0xC2 : 0x31); break; case XFER_MW_DMA_2: regD |= (unit ? 0x40 : 0x10); break; case XFER_MW_DMA_1: regD |= (unit ? 0x80 : 0x20); break; case XFER_MW_DMA_0: regD |= (unit ? 0xC0 : 0x30); break; case XFER_SW_DMA_2: regD |= (unit ? 0x40 : 0x10); break; case XFER_SW_DMA_1: regD |= (unit ? 0x80 : 0x20); break; case XFER_SW_DMA_0: regD |= (unit ? 0xC0 : 0x30); break; case XFER_PIO_4: cmd64x_tuneproc(drive, 4); break; case XFER_PIO_3: cmd64x_tuneproc(drive, 3); break; case XFER_PIO_2: cmd64x_tuneproc(drive, 2); break; case XFER_PIO_1: cmd64x_tuneproc(drive, 1); break; case XFER_PIO_0: cmd64x_tuneproc(drive, 0); break; default: return 1; } if (speed > XFER_PIO_4) { (void) pci_write_config_byte(dev, pciU, regU); regD |= (unit ? 0x40 : 0x20); (void) pci_write_config_byte(dev, pciD, regD); } return (ide_config_drive_speed(drive, speed)); }
void QUIRK_LIST (ide_drive_t *drive) { if (HWIF(drive)->quirkproc) drive->quirk_list = HWIF(drive)->quirkproc(drive); }
/* * Set a new transfer mode at the drive */ int cs5530_set_xfer_mode (ide_drive_t *drive, byte mode) { int i, error = 1; byte stat; ide_hwif_t *hwif = HWIF(drive); printk("%s: cs5530_set_xfer_mode(%s)\n", drive->name, strmode(mode)); /* * If this is a DMA mode setting, then turn off all DMA bits. * We will set one of them back on afterwards, if all goes well. * * Not sure why this is needed (it looks very silly), * but other IDE chipset drivers also do this fiddling. ???? -ml */ switch (mode) { case XFER_UDMA_4: case XFER_UDMA_3: case XFER_UDMA_2: case XFER_UDMA_1: case XFER_UDMA_0: case XFER_MW_DMA_2: case XFER_MW_DMA_1: case XFER_MW_DMA_0: case XFER_SW_DMA_2: case XFER_SW_DMA_1: case XFER_SW_DMA_0: drive->id->dma_ultra &= ~0xFF00; drive->id->dma_mword &= ~0x0F00; drive->id->dma_1word &= ~0x0F00; } /* * Select the drive, and issue the SETFEATURES command */ disable_irq(hwif->irq); udelay(1); SELECT_DRIVE(HWIF(drive), drive); udelay(1); if (IDE_CONTROL_REG) OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG); OUT_BYTE(mode, IDE_NSECTOR_REG); OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG); OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG); udelay(1); /* spec allows drive 400ns to assert "BUSY" */ /* * Wait for drive to become non-BUSY */ if ((stat = GET_STAT()) & BUSY_STAT) { unsigned long flags, timeout; __save_flags(flags); /* local CPU only */ ide__sti(); /* local CPU only -- for jiffies */ timeout = jiffies + WAIT_CMD; while ((stat = GET_STAT()) & BUSY_STAT) { if (0 < (signed long)(jiffies - timeout)) break; } __restore_flags(flags); /* local CPU only */ } /* * Allow status to settle, then read it again. * A few rare drives vastly violate the 400ns spec here, * so we'll wait up to 10usec for a "good" status * rather than expensively fail things immediately. */ for (i = 0; i < 10; i++) { udelay(1); if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) { error = 0; break; } } enable_irq(hwif->irq); /* * Turn dma bit on if all is okay */ if (error) { (void) ide_dump_status(drive, "cs5530_set_xfer_mode", stat); } else { switch (mode) { case XFER_UDMA_4: drive->id->dma_ultra |= 0x1010; break; case XFER_UDMA_3: drive->id->dma_ultra |= 0x0808; break; case XFER_UDMA_2: drive->id->dma_ultra |= 0x0404; break; case XFER_UDMA_1: drive->id->dma_ultra |= 0x0202; break; case XFER_UDMA_0: drive->id->dma_ultra |= 0x0101; break; case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break; case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break; case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break; case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break; case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break; case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break; } } return error; }
int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) { /* SATA has no cable restrictions */ if (HWIF(drive)->sata) return 0; if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) && (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) { #ifndef CONFIG_IDEDMA_IVB if ((drive->id->hw_config & 0x6000) == 0) { #else /* !CONFIG_IDEDMA_IVB */ if (((drive->id->hw_config & 0x2000) == 0) || ((drive->id->hw_config & 0x4000) == 0)) { #endif /* CONFIG_IDEDMA_IVB */ printk("%s: Speed warnings UDMA 3/4/5 is not " "functional.\n", drive->name); return 1; } if (!HWIF(drive)->udma_four) { printk("%s: Speed warnings UDMA 3/4/5 is not " "functional.\n", HWIF(drive)->name); return 1; } } return 0; } EXPORT_SYMBOL(ide_ata66_check); /* * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER. * 1 : Safe to update drive->id DMA registers. * 0 : OOPs not allowed. */ int set_transfer (ide_drive_t *drive, ide_task_t *args) { if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) && (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) && (drive->id->dma_ultra || drive->id->dma_mword || drive->id->dma_1word)) return 1; return 0; } EXPORT_SYMBOL(set_transfer); u8 ide_auto_reduce_xfer (ide_drive_t *drive) { if (!drive->crc_count) return drive->current_speed; drive->crc_count = 0; switch(drive->current_speed) { case XFER_UDMA_7: return XFER_UDMA_6; case XFER_UDMA_6: return XFER_UDMA_5; case XFER_UDMA_5: return XFER_UDMA_4; case XFER_UDMA_4: return XFER_UDMA_3; case XFER_UDMA_3: return XFER_UDMA_2; case XFER_UDMA_2: return XFER_UDMA_1; case XFER_UDMA_1: return XFER_UDMA_0; /* * OOPS we do not goto non Ultra DMA modes * without iCRC's available we force * the system to PIO and make the user * invoke the ATA-1 ATA-2 DMA modes. */ case XFER_UDMA_0: default: return XFER_PIO_4; } } EXPORT_SYMBOL(ide_auto_reduce_xfer); /* * Update the */ int ide_driveid_update (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); struct hd_driveid *id; #if 0 id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC); if (!id) return 0; taskfile_lib_get_identify(drive, (char *)&id); ide_fix_driveid(id); if (id) { drive->id->dma_ultra = id->dma_ultra; drive->id->dma_mword = id->dma_mword; drive->id->dma_1word = id->dma_1word; /* anything more ? */ kfree(id); } return 1; #else /* * Re-read drive->id for possible DMA mode * change (copied from ide-probe.c) */ unsigned long timeout, flags; SELECT_MASK(drive, 1); if (IDE_CONTROL_REG) hwif->OUTB(drive->ctl,IDE_CONTROL_REG); ide_delay_50ms(); hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG); timeout = jiffies + WAIT_WORSTCASE; do { if (time_after(jiffies, timeout)) { SELECT_MASK(drive, 0); return 0; /* drive timed-out */ } ide_delay_50ms(); /* give drive a breather */ } while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT); ide_delay_50ms(); /* wait for IRQ and DRQ_STAT */ if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) { SELECT_MASK(drive, 0); printk("%s: CHECK for good STATUS\n", drive->name); return 0; } local_irq_save(flags); SELECT_MASK(drive, 0); id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC); if (!id) { local_irq_restore(flags); return 0; } ata_input_data(drive, id, SECTOR_WORDS); (void) hwif->INB(IDE_STATUS_REG); /* clear drive IRQ */ local_irq_enable(); local_irq_restore(flags); ide_fix_driveid(id); drive->id->dma_ultra = id->dma_ultra; drive->id->dma_mword = id->dma_mword; drive->id->dma_1word = id->dma_1word; /* anything more ? */ kfree(id); return 1; #endif }
/* * Some localbus EIDE interfaces require a special access sequence * when using 32-bit I/O instructions to transfer data. We call this * the "vlb_sync" sequence, which consists of three successive reads * of the sector count register location, with interrupts disabled * to ensure that the reads all happen together. */ static void ata_vlb_sync(ide_drive_t *drive, unsigned long port) { (void) HWIF(drive)->INB(port); (void) HWIF(drive)->INB(port); (void) HWIF(drive)->INB(port); }
static void cmd640_tune_drive(ide_drive_t *drive, byte pio_mode) { int interface_number; int drive_number; int clock_time; /* ns */ int max_pio; int mc_time, av_time, ds_time; struct hd_driveid* id; int readahead; /* there is a global named read_ahead */ if (pio_mode != 255) { cmd640_set_mode(drive, pio_mode); return; } interface_number = HWIF(drive)->index; drive_number = drive->select.b.unit; clock_time = 1000/bus_speed; id = drive->id; if ((max_pio = ide_scan_pio_blacklist(id->model)) != -1) { ds_time = pio_timings[max_pio].ds_time; } else { max_pio = id->tPIO; ds_time = pio_timings[max_pio].ds_time; if (id->field_valid & 2) { if ((id->capability & 8) && (id->eide_pio_modes & 7)) { if (id->eide_pio_modes & 4) max_pio = 5; else if (id->eide_pio_modes & 2) max_pio = 4; else max_pio = 3; ds_time = id->eide_pio_iordy; } else { ds_time = id->eide_pio; } if (ds_time == 0) ds_time = pio_timings[max_pio].ds_time; } /* * Conservative "downgrade" */ if (max_pio < 4 && max_pio != 0) { max_pio -= 1; ds_time = pio_timings[max_pio].ds_time; } } mc_time = pio_timings[max_pio].mc_time; av_time = pio_timings[max_pio].av_time; cmd640_timings_to_clocks(mc_time, av_time, ds_time, clock_time, interface_number*2 + drive_number); set_pio_mode(interface_number, drive_number, max_pio); cmd640_set_timing(interface_number, drive_number); /* * Disable (or set) readahead mode */ readahead = 0; if (cmd640_chip_version > 1) { /* Mmmm.. probably should be > 2 ?? */ readahead = known_drive_readahead(id->model); if (readahead == -1) readahead = 1; /* Mmmm.. probably be 0 ?? */ set_readahead_mode(readahead, interface_number, drive_number); } printk ("Mode and Timing set to PIO%d, Readahead is %s\n", max_pio, readahead ? "enabled" : "disabled"); }
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) { ide_hwif_t *hwif = HWIF(drive); unsigned long flags; struct request *rq; spin_lock_irqsave(&ide_lock, flags); rq = HWGROUP(drive)->rq; spin_unlock_irqrestore(&ide_lock, flags); if (rq->flags & REQ_DRIVE_CMD) { u8 *args = (u8 *) rq->buffer; if (rq->errors == 0) rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); if (args) { args[0] = stat; args[1] = err; args[2] = hwif->INB(IDE_NSECTOR_REG); } } else if (rq->flags & REQ_DRIVE_TASK) { u8 *args = (u8 *) rq->buffer; if (rq->errors == 0) rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); if (args) { args[0] = stat; args[1] = err; args[2] = hwif->INB(IDE_NSECTOR_REG); args[3] = hwif->INB(IDE_SECTOR_REG); args[4] = hwif->INB(IDE_LCYL_REG); args[5] = hwif->INB(IDE_HCYL_REG); args[6] = hwif->INB(IDE_SELECT_REG); } } else if (rq->flags & REQ_DRIVE_TASKFILE) { ide_task_t *args = (ide_task_t *) rq->special; if (rq->errors == 0) rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); if (args) { if (args->tf_in_flags.b.data) { u16 data = hwif->INW(IDE_DATA_REG); args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; } args->tfRegister[IDE_ERROR_OFFSET] = err; /* be sure we're looking at the low order bits */ hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); args->tfRegister[IDE_STATUS_OFFSET] = stat; if (drive->addressing == 1) { hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); } }