static int cmd64x_ide_dma_end (ide_drive_t *drive) { u8 dma_stat = 0, dma_cmd = 0; ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; drive->waiting_for_dma = 0; /* read DMA command state */ dma_cmd = hwif->INB(hwif->dma_command); /* stop DMA */ hwif->OUTB((dma_cmd & ~1), hwif->dma_command); /* get DMA status */ dma_stat = hwif->INB(hwif->dma_status); /* clear the INTR & ERROR bits */ hwif->OUTB(dma_stat|6, hwif->dma_status); if (cmd64x_alt_dma_status(dev)) { u8 dma_intr = 0; u8 dma_mask = (hwif->channel) ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0; u8 dma_reg = (hwif->channel) ? ARTTIM2 : CFR; (void) pci_read_config_byte(dev, dma_reg, &dma_intr); /* clear the INTR bit */ (void) pci_write_config_byte(dev, dma_reg, dma_intr|dma_mask); } /* purge DMA mappings */ ide_destroy_dmatable(drive); /* verify good DMA status */ return (dma_stat & 7) != 4; }
/* Stops the IOC4 DMA Engine */ static int sgiioc4_ide_dma_end(ide_drive_t * drive) { u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; ide_hwif_t *hwif = HWIF(drive); u64 dma_base = hwif->dma_base; int dma_stat = 0; unsigned long *ending_dma = (unsigned long *) hwif->dma_base2; hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base); if (ioc4_dma & IOC4_S_DMA_STOP) { printk(KERN_ERR "%s(%s): IOC4 DMA STOP bit is still 1 :" "ioc4_dma_reg 0x%x\n", __FUNCTION__, drive->name, ioc4_dma); dma_stat = 1; } /* * The IOC4 will DMA 1's to the ending dma area to indicate that * previous data DMA is complete. This is necessary because of relaxed * ordering between register reads and DMA writes on the Altix. */ while ((cnt++ < 200) && (!valid)) { for (num = 0; num < 16; num++) { if (ending_dma[num]) { valid = 1; break; } } udelay(1); } if (!valid) { printk(KERN_ERR "%s(%s) : DMA incomplete\n", __FUNCTION__, drive->name); dma_stat = 1; } bc_dev = hwif->INL(dma_base + IOC4_BC_DEV * 4); bc_mem = hwif->INL(dma_base + IOC4_BC_MEM * 4); if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) { if (bc_dev > bc_mem + 8) { printk(KERN_ERR "%s(%s): WARNING!! byte_count_dev %d " "!= byte_count_mem %d\n", __FUNCTION__, drive->name, bc_dev, bc_mem); } } drive->waiting_for_dma = 0; ide_destroy_dmatable(drive); return dma_stat; }
/* Stops the IOC4 DMA Engine */ static int sgiioc4_ide_dma_end(ide_drive_t * drive) { u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0; ide_hwif_t *hwif = HWIF(drive); uint64_t dma_base = hwif->dma_base; int dma_stat = 0, count; unsigned long *ending_dma = (unsigned long *) hwif->dma_base2; hwif->OUTL(IOC4_S_DMA_STOP, dma_base + IOC4_DMA_CTRL * 4); count = 0; do { xide_delay(count); ioc4_dma = hwif->INL(dma_base + IOC4_DMA_CTRL * 4); count += 10; } while ((ioc4_dma & IOC4_S_DMA_STOP) && (count < 100)); if (ioc4_dma & IOC4_S_DMA_STOP) { printk(KERN_ERR "sgiioc4_stopdma(%s): IOC4 DMA STOP bit is still 1 : ioc4_dma_reg 0x%x\n", drive->name, ioc4_dma); dma_stat = 1; } if (ending_dma) { do { for (num = 0; num < 16; num++) { if (ending_dma[num] & (~0ul)) { valid = 1; break; } } xide_delay(cnt); } while ((cnt++ < 100) && (!valid)); } if (!valid) printk(KERN_INFO "sgiioc4_ide_dma_end(%s) : Stale DMA Data in Memory\n", drive->name); bc_dev = hwif->INL(dma_base + IOC4_BC_DEV * 4); bc_mem = hwif->INL(dma_base + IOC4_BC_MEM * 4); if ((bc_dev & 0x01FF) || (bc_mem & 0x1FF)) { if (bc_dev > bc_mem + 8) { printk(KERN_ERR "sgiioc4_ide_dma_end(%s) : WARNING!!! byte_count_at_dev %d != byte_count_at_mem %d\n", drive->name, bc_dev, bc_mem); } } drive->waiting_for_dma = 0; ide_destroy_dmatable(drive); return dma_stat; }
static int cmd646_1_ide_dma_end (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); u8 dma_stat = 0, dma_cmd = 0; drive->waiting_for_dma = 0; /* get DMA status */ dma_stat = hwif->INB(hwif->dma_status); /* read DMA command state */ dma_cmd = hwif->INB(hwif->dma_command); /* stop DMA */ hwif->OUTB((dma_cmd & ~1), hwif->dma_command); /* clear the INTR & ERROR bits */ hwif->OUTB(dma_stat|6, hwif->dma_status); /* and free any DMA resources */ ide_destroy_dmatable(drive); /* verify good DMA status */ return (dma_stat & 7) != 4; }
static int cmd646_1_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); u8 dma_stat = 0, dma_cmd = 0; drive->waiting_for_dma = 0; /* get DMA status */ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* read DMA command state */ dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); /* stop DMA */ outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); /* clear the INTR & ERROR bits */ outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); /* and free any DMA resources */ ide_destroy_dmatable(drive); /* verify good DMA status */ return (dma_stat & 7) != 4; }
static int __scc_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat, dma_cmd; drive->waiting_for_dma = 0; /* get DMA command mode */ dma_cmd = scc_ide_inb(hwif->dma_base); /* stop DMA */ scc_ide_outb(dma_cmd & ~1, hwif->dma_base); /* get DMA status */ dma_stat = scc_ide_inb(hwif->dma_base + 4); /* clear the INTR & ERROR bits */ scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); /* purge DMA mappings */ ide_destroy_dmatable(drive); /* verify good DMA status */ hwif->dma = 0; wmb(); return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; }
/* Creates the scatter gather list, DMA Table */ static unsigned int sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir) { ide_hwif_t *hwif = HWIF(drive); unsigned int *table = hwif->dmatable_cpu; unsigned int count = 0, i = 1; struct scatterlist *sg; hwif->sg_nents = i = ide_build_sglist(drive, rq); if (!i) return 0; /* sglist of length Zero */ sg = hwif->sg_table; while (i && sg_dma_len(sg)) { dma_addr_t cur_addr; int cur_len; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); while (cur_len) { if (count++ >= IOC4_PRD_ENTRIES) { printk(KERN_WARNING "%s: DMA table too small\n", drive->name); goto use_pio_instead; } else { u32 bcount = 0x10000 - (cur_addr & 0xffff); if (bcount > cur_len) bcount = cur_len; /* put the addr, length in * the IOC4 dma-table format */ *table = 0x0; table++; *table = cpu_to_be32(cur_addr); table++; *table = 0x0; table++; *table = cpu_to_be32(bcount); table++; cur_addr += bcount; cur_len -= bcount; } } sg = sg_next(sg); i--; } if (count) { table--; *table |= cpu_to_be32(0x80000000); return count; } use_pio_instead: ide_destroy_dmatable(drive); return 0; /* revert to PIO for this request */ }