static irqreturn_t xlgmac_dma_isr(int irq, void *data) { struct xlgmac_channel *channel = data; /* Per channel DMA interrupts are enabled, so we use the per * channel napi structure and not the private data napi structure */ if (napi_schedule_prep(&channel->napi)) { /* Disable Tx and Rx interrupts */ disable_irq_nosync(channel->dma_irq); /* Turn on polling */ __napi_schedule_irqoff(&channel->napi); } return IRQ_HANDLED; }
/* The RDC interrupt handler. */ static irqreturn_t r6040_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct r6040_private *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; u16 misr, status; /* Save MIER */ misr = ioread16(ioaddr + MIER); /* Mask off RDC MAC interrupt */ iowrite16(MSK_INT, ioaddr + MIER); /* Read MISR status and clear */ status = ioread16(ioaddr + MISR); if (status == 0x0000 || status == 0xffff) { /* Restore RDC MAC interrupt */ iowrite16(misr, ioaddr + MIER); return IRQ_NONE; } /* RX interrupt request */ if (status & (RX_INTS | TX_INTS)) { if (status & RX_NO_DESC) { /* RX descriptor unavailable */ dev->stats.rx_dropped++; dev->stats.rx_missed_errors++; } if (status & RX_FIFO_FULL) dev->stats.rx_fifo_errors++; if (likely(napi_schedule_prep(&lp->napi))) { /* Mask off RX interrupt */ misr &= ~(RX_INTS | TX_INTS); __napi_schedule_irqoff(&lp->napi); } } /* Restore RDC MAC interrupt */ iowrite16(misr, ioaddr + MIER); return IRQ_HANDLED; }
static irqreturn_t xlgmac_isr(int irq, void *data) { unsigned int dma_isr, dma_ch_isr, mac_isr; struct xlgmac_pdata *pdata = data; struct xlgmac_channel *channel; struct xlgmac_hw_ops *hw_ops; unsigned int i, ti, ri; hw_ops = &pdata->hw_ops; /* The DMA interrupt status register also reports MAC and MTL * interrupts. So for polling mode, we just need to check for * this register to be non-zero */ dma_isr = readl(pdata->mac_regs + DMA_ISR); if (!dma_isr) return IRQ_HANDLED; netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) continue; channel = pdata->channel_head + i; dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR)); netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", i, dma_ch_isr); /* The TI or RI interrupt bits may still be set even if using * per channel DMA interrupts. Check to be sure those are not * enabled before using the private data napi structure. */ ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, DMA_CH_SR_TI_LEN); ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, DMA_CH_SR_RI_LEN); if (!pdata->per_channel_irq && (ti || ri)) { if (napi_schedule_prep(&pdata->napi)) { /* Disable Tx and Rx interrupts */ xlgmac_disable_rx_tx_ints(pdata); pdata->stats.napi_poll_isr++; /* Turn on polling */ __napi_schedule_irqoff(&pdata->napi); } } if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, DMA_CH_SR_TPS_LEN)) pdata->stats.tx_process_stopped++; if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, DMA_CH_SR_RPS_LEN)) pdata->stats.rx_process_stopped++; if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, DMA_CH_SR_TBU_LEN)) pdata->stats.tx_buffer_unavailable++; if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, DMA_CH_SR_RBU_LEN)) pdata->stats.rx_buffer_unavailable++; /* Restart the device on a Fatal Bus Error */ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, DMA_CH_SR_FBE_LEN)) { pdata->stats.fatal_bus_error++; schedule_work(&pdata->restart_work); } /* Clear all interrupt signals */ writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR)); } if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, DMA_ISR_MACIS_LEN)) { mac_isr = readl(pdata->mac_regs + MAC_ISR); if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, MAC_ISR_MMCTXIS_LEN)) hw_ops->tx_mmc_int(pdata); if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, MAC_ISR_MMCRXIS_LEN)) hw_ops->rx_mmc_int(pdata); } return IRQ_HANDLED; }