static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; /* ATA IRQ doesn't wait for DMA transfer completion and vice * versa. Mask IRQ selectively to detect command completion. * Without it, ATA DMA read command can cause data corruption. * * Something similar might be needed for ATAPI writes. I * tried a lot of combinations but couldn't find the solution. */ if (qc->tf.protocol == ATA_PROT_DMA && !(qc->tf.flags & ATA_TFLAG_WRITE)) inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); else inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); /* Issuing a command to yet uninitialized port locks up the * controller. Most of the time, this happens for the first * command after reset which are ATA and ATAPI IDENTIFYs. * Fast fail if stat is 0x7f or 0xff for those commands. */ if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || qc->tf.command == ATA_CMD_ID_ATAPI)) { u8 stat = ata_chk_status(ap); if (stat == 0x7f || stat == 0xff) return AC_ERR_HSM; } return ata_qc_issue_prot(qc); }
static void svia_noop_freeze(struct ata_port *ap) { /* Some VIA controllers choke if ATA_NIEN is manipulated in * certain way. Leave it alone and just clear pending IRQ. */ ata_chk_status(ap); ata_bmdma_irq_clear(ap); }
static void inic_thaw(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); ata_chk_status(ap); writeb(0xff, port_base + PORT_IRQ_STAT); __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); readb(port_base + PORT_IRQ_STAT); /* flush */ }
static void inic_host_intr(struct ata_port *ap) { void __iomem *port_base = inic_port_base(ap); struct ata_eh_info *ehi = &ap->link.eh_info; u8 irq_stat; /* fetch and clear irq */ irq_stat = readb(port_base + PORT_IRQ_STAT); writeb(irq_stat, port_base + PORT_IRQ_STAT); if (likely(!(irq_stat & PIRQ_ERR))) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { ata_chk_status(ap); /* clear ATA interrupt */ return; } if (likely(ata_host_intr(ap, qc))) return; ata_chk_status(ap); /* clear ATA interrupt */ ata_port_printk(ap, KERN_WARNING, "unhandled " "interrupt, irq_stat=%x\n", irq_stat); return; } /* error */ ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { ata_ehi_hotplugged(ehi); ata_port_freeze(ap); } else ata_port_abort(ap); }
static void scc_bmdma_freeze (struct ata_port *ap) { struct ata_ioports *ioaddr = &ap->ioaddr; ap->ctl |= ATA_NIEN; ap->last_ctl = ap->ctl; out_be32(ioaddr->ctl_addr, ap->ctl); /* Under certain circumstances, some controllers raise IRQ on * ATA_NIEN manipulation. Also, many controllers fail to mask * previously pending IRQ on ATA_NIEN assertion. Clear it. */ ata_chk_status(ap); ap->ops->irq_clear(ap); }