/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs) { struct net_device *dev = (struct net_device *)dev_instance; struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int csr5; int missed; int rx = 0; int tx = 0; int oi = 0; int maxrx = RX_RING_SIZE; int maxtx = TX_RING_SIZE; int maxoi = TX_RING_SIZE; #ifdef CONFIG_TULIP_NAPI int rxd = 0; #else int entry; #endif unsigned int work_count = tulip_max_interrupt_work; unsigned int handled = 0; /* Let's see whether the interrupt really is for us */ csr5 = ioread32(ioaddr + CSR5); if (tp->flags & HAS_PHY_IRQ) handled = phy_interrupt (dev); if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) return IRQ_RETVAL(handled); tp->nir++; do { #ifdef CONFIG_TULIP_NAPI if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { rxd++; /* Mask RX intrs and add the device to poll list. */ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); netif_rx_schedule(dev); if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) break; } /* Acknowledge the interrupt sources we handle here ASAP the poll function does Rx and RxNoBuf acking */ iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); #else /* Acknowledge all of the current interrupt sources ASAP. */ iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); if (csr5 & (RxIntr | RxNoBuf)) { rx += tulip_rx(dev); tulip_refill_rx(dev); } #endif /* CONFIG_TULIP_NAPI */ if (tulip_debug > 4) printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", dev->name, csr5, ioread32(ioaddr + CSR5)); if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { unsigned int dirty_tx; spin_lock(&tp->lock); for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status); if (status < 0) break; /* It still has not been Txed */ /* Check for Rx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping) pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); continue; } if (status & 0x8000) { /* There was an major error, log it. */ #ifndef final_version if (tulip_debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, status); #endif tp->stats.tx_errors++; if (status & 0x4104) tp->stats.tx_aborted_errors++; if (status & 0x0C00) tp->stats.tx_carrier_errors++; if (status & 0x0200) tp->stats.tx_window_errors++; if (status & 0x0002) tp->stats.tx_fifo_errors++; if ((status & 0x0080) && tp->full_duplex == 0) tp->stats.tx_heartbeat_errors++; } else { tp->stats.tx_bytes += tp->tx_buffers[entry].skb->len; tp->stats.collisions += (status >> 3) & 15; tp->stats.tx_packets++; } pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tp->tx_buffers[entry].skb->len, PCI_DMA_TODEVICE); /* Free the original skb. */ dev_kfree_skb_irq(tp->tx_buffers[entry].skb); tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tx++; } #ifndef final_version if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", dev->name, dirty_tx, tp->cur_tx); dirty_tx += TX_RING_SIZE; } #endif if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) netif_wake_queue(dev); tp->dirty_tx = dirty_tx; if (csr5 & TxDied) { if (tulip_debug > 2) printk(KERN_WARNING "%s: The transmitter stopped." " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6); tulip_restart_rxtx(tp); } spin_unlock(&tp->lock); } /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ if (csr5 == 0xffffffff) break; if (csr5 & TxJabber) tp->stats.tx_errors++; if (csr5 & TxFIFOUnderflow) { if ((tp->csr6 & 0xC000) != 0xC000) tp->csr6 += 0x4000; /* Bump up the Tx threshold */ else tp->csr6 |= 0x00200000; /* Store-n-forward. */ /* Restart the transmit process. */ tulip_restart_rxtx(tp); iowrite32(0, ioaddr + CSR1); } if (csr5 & (RxDied | RxNoBuf)) { if (tp->flags & COMET_MAC_ADDR) { iowrite32(tp->mc_filter[0], ioaddr + 0xAC); iowrite32(tp->mc_filter[1], ioaddr + 0xB0); } } if (csr5 & RxDied) { /* Missed a Rx frame. */ tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; tp->stats.rx_errors++; tulip_start_rxtx(tp); } /* * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this * call is ever done under the spinlock */ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { if (tp->link_change) (tp->link_change)(dev, csr5); } if (csr5 & SytemError) { int error = (csr5 >> 23) & 7; /* oops, we hit a PCI error. The code produced corresponds * to the reason: * 0 - parity error * 1 - master abort * 2 - target abort * Note that on parity error, we should do a software reset * of the chip to get it back into a sane state (according * to the 21142/3 docs that is). * -- rmk */ printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n", dev->name, tp->nir, error); } /* Clear all error sources, included undocumented ones! */ iowrite32(0x0800f7ba, ioaddr + CSR5); oi++; }
static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_t *time_stamp) { struct tulip_private *tp = (struct tulip_private *)rtdev->priv; int entry = tp->cur_rx % RX_RING_SIZE; int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; int received = 0; if (tulip_debug > 4) /*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry, tp->rx_ring[entry].status); /* If we own the next entry, it is a new packet. Send it up. */ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); if (tulip_debug > 5) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", rtdev->name, entry, status); if (--rx_work_limit < 0) break; if ((status & 0x38008300) != 0x0300) { if ((status & 0x38000300) != 0x0300) { /* Ingore earlier buffers. */ if ((status & 0xffff) != 0x7fff) { if (tulip_debug > 1) /*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame " "spanned multiple buffers, status %8.8x!\n", rtdev->name, status); tp->stats.rx_length_errors++; } } else if (status & RxDescFatalErr) { /* There was a fatal error. */ if (tulip_debug > 2) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", rtdev->name, status); tp->stats.rx_errors++; /* end of a packet.*/ if (status & 0x0890) tp->stats.rx_length_errors++; if (status & 0x0004) tp->stats.rx_frame_errors++; if (status & 0x0002) tp->stats.rx_crc_errors++; if (status & 0x0001) tp->stats.rx_fifo_errors++; } } else { /* Omit the four octet CRC from the length. */ short pkt_len = ((status >> 16) & 0x7ff) - 4; struct /*RTnet*/rtskb *skb; #ifndef final_version if (pkt_len > 1518) { /*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", rtdev->name, pkt_len, pkt_len); pkt_len = 1518; tp->stats.rx_length_errors++; } #endif #if 0 /*RTnet*/ /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < tulip_rx_copybreak && (skb = /*RTnet*/dev_alloc_rtskb(pkt_len + 2)) != NULL) { skb->rtdev = rtdev; /*RTnet*/rtskb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single(tp->pdev, tp->rx_buffers[entry].mapping, pkt_len, PCI_DMA_FROMDEVICE); #if ! defined(__alpha__) //eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, // pkt_len, 0); memcpy(rtskb_put(skb, pkt_len), tp->rx_buffers[entry].skb->tail, pkt_len); #else memcpy(/*RTnet*/rtskb_put(skb, pkt_len), tp->rx_buffers[entry].skb->tail, pkt_len); #endif } else { /* Pass up the skb already on the Rx ring. */ #endif /*RTnet*/ { char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb, pkt_len); #ifndef final_version if (tp->rx_buffers[entry].mapping != le32_to_cpu(tp->rx_ring[entry].buffer1)) { /*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses " "do not match in tulip_rx: %08x vs. %08x ? / %p.\n", rtdev->name, le32_to_cpu(tp->rx_ring[entry].buffer1), tp->rx_buffers[entry].mapping, temp);/*RTnet*/ } #endif pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); tp->rx_buffers[entry].skb = NULL; tp->rx_buffers[entry].mapping = 0; } skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev); skb->time_stamp = *time_stamp; /*RTnet*/rtnetif_rx(skb); tp->stats.rx_packets++; tp->stats.rx_bytes += pkt_len; } received++; entry = (++tp->cur_rx) % RX_RING_SIZE; } return received; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ int tulip_interrupt(rtdm_irq_t *irq_handle) { nanosecs_t time_stamp = rtdm_clock_read();/*RTnet*/ struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/ struct tulip_private *tp = (struct tulip_private *)rtdev->priv; long ioaddr = rtdev->base_addr; unsigned int csr5; int entry; int missed; int rx = 0; int tx = 0; int oi = 0; int maxrx = RX_RING_SIZE; int maxtx = TX_RING_SIZE; int maxoi = TX_RING_SIZE; unsigned int work_count = tulip_max_interrupt_work; /* Let's see whether the interrupt really is for us */ csr5 = inl(ioaddr + CSR5); if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) { rtdm_printk("%s: unexpected IRQ!\n",rtdev->name); return 0; } tp->nir++; do { /* Acknowledge all of the current interrupt sources ASAP. */ outl(csr5 & 0x0001ffff, ioaddr + CSR5); if (tulip_debug > 4) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", rtdev->name, csr5, inl(rtdev->base_addr + CSR5)); if (csr5 & (RxIntr | RxNoBuf)) { rx += tulip_rx(rtdev, &time_stamp); tulip_refill_rx(rtdev); } if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { unsigned int dirty_tx; rtdm_lock_get(&tp->lock); for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; dirty_tx++) { int entry = dirty_tx % TX_RING_SIZE; int status = le32_to_cpu(tp->tx_ring[entry].status); if (status < 0) break; /* It still has not been Txed */ /* Check for Rx filter setup frames. */ if (tp->tx_buffers[entry].skb == NULL) { /* test because dummy frames not mapped */ if (tp->tx_buffers[entry].mapping) pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, sizeof(tp->setup_frame), PCI_DMA_TODEVICE); continue; } if (status & 0x8000) { /* There was an major error, log it. */ #ifndef final_version if (tulip_debug > 1) /*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", rtdev->name, status); #endif tp->stats.tx_errors++; if (status & 0x4104) tp->stats.tx_aborted_errors++; if (status & 0x0C00) tp->stats.tx_carrier_errors++; if (status & 0x0200) tp->stats.tx_window_errors++; if (status & 0x0002) tp->stats.tx_fifo_errors++; if ((status & 0x0080) && tp->full_duplex == 0) tp->stats.tx_heartbeat_errors++; } else { tp->stats.tx_bytes += tp->tx_buffers[entry].skb->len; tp->stats.collisions += (status >> 3) & 15; tp->stats.tx_packets++; } pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tp->tx_buffers[entry].skb->len, PCI_DMA_TODEVICE); /* Free the original skb. */ /*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb); tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].mapping = 0; tx++; rtnetif_tx(rtdev); } #ifndef final_version if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { /*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n", rtdev->name, dirty_tx, tp->cur_tx); dirty_tx += TX_RING_SIZE; } #endif if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) /*RTnet*/rtnetif_wake_queue(rtdev); tp->dirty_tx = dirty_tx; if (csr5 & TxDied) { if (tulip_debug > 2) /*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped." " CSR5 is %x, CSR6 %x, new CSR6 %x.\n", rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6); tulip_restart_rxtx(tp); } rtdm_lock_put(&tp->lock); } /* Log errors. */ if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ if (csr5 == 0xffffffff) break; #if 0 /*RTnet*/ if (csr5 & TxJabber) tp->stats.tx_errors++; if (csr5 & TxFIFOUnderflow) { if ((tp->csr6 & 0xC000) != 0xC000) tp->csr6 += 0x4000; /* Bump up the Tx threshold */ else tp->csr6 |= 0x00200000; /* Store-n-forward. */ /* Restart the transmit process. */ tulip_restart_rxtx(tp); outl(0, ioaddr + CSR1); } if (csr5 & (RxDied | RxNoBuf)) { if (tp->flags & COMET_MAC_ADDR) { outl(tp->mc_filter[0], ioaddr + 0xAC); outl(tp->mc_filter[1], ioaddr + 0xB0); } } if (csr5 & RxDied) { /* Missed a Rx frame. */ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; tp->stats.rx_errors++; tulip_start_rxtx(tp); } /* * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this * call is ever done under the spinlock */ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { if (tp->link_change) (tp->link_change)(rtdev, csr5); } if (csr5 & SytemError) { int error = (csr5 >> 23) & 7; /* oops, we hit a PCI error. The code produced corresponds * to the reason: * 0 - parity error * 1 - master abort * 2 - target abort * Note that on parity error, we should do a software reset * of the chip to get it back into a sane state (according * to the 21142/3 docs that is). * -- rmk */ /*RTnet*/rtdm_printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n", rtdev->name, tp->nir, error); } #endif /*RTnet*/ /*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, " "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5); /* Clear all error sources, included undocumented ones! */ outl(0x0800f7ba, ioaddr + CSR5); oi++; }
/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static void tulip_interrupt(int irq, struct pt_regs *regs) { struct device *dev = (struct device *)(irq2dev_map[irq]); struct tulip_private *lp; int csr5, ioaddr, boguscnt=10; if (dev == NULL) { printk ("tulip_interrupt(): irq %d for unknown device.\n", irq); return; } ioaddr = dev->base_addr; lp = (struct tulip_private *)dev->priv; if (dev->interrupt) printk("%s: Re-entering the interrupt handler.\n", dev->name); dev->interrupt = 1; do { csr5 = inl(ioaddr + CSR5); /* Acknowledge all of the current interrupt sources ASAP. */ outl(csr5 & 0x0001ffff, ioaddr + CSR5); if (tulip_debug > 4) printk("%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n", dev->name, csr5, inl(dev->base_addr + CSR5)); if ((csr5 & 0x00018000) == 0) break; if (csr5 & 0x0040) /* Rx interrupt */ tulip_rx(dev); if (csr5 & 0x0001) { /* Tx-done interrupt */ int dirty_tx = lp->dirty_tx; while (dirty_tx < lp->cur_tx) { int entry = dirty_tx % TX_RING_SIZE; int status = lp->tx_ring[entry].status; if (status < 0) break; /* It still hasn't been Txed */ if (status & 0x8000) { /* There was an major error, log it. */ lp->stats.tx_errors++; if (status & 0x4104) lp->stats.tx_aborted_errors++; if (status & 0x0C00) lp->stats.tx_carrier_errors++; if (status & 0x0200) lp->stats.tx_window_errors++; if (status & 0x0002) lp->stats.tx_fifo_errors++; if (status & 0x0080) lp->stats.tx_heartbeat_errors++; #ifdef ETHER_STATS if (status & 0x0100) lp->stats.collisions16++; #endif } else { #ifdef ETHER_STATS if (status & 0x0001) lp->stats.tx_deferred++; #endif lp->stats.collisions += (status >> 3) & 15; lp->stats.tx_packets++; } /* Free the original skb. */ dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE); dirty_tx++; } #ifndef final_version if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dirty_tx, lp->cur_tx, lp->tx_full); dirty_tx += TX_RING_SIZE; } #endif if (lp->tx_full && dev->tbusy && dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; dev->tbusy = 0; mark_bh(NET_BH); } lp->dirty_tx = dirty_tx; } /* Log errors. */ if (csr5 & 0x8000) { /* Abnormal error summary bit. */ if (csr5 & 0x0008) lp->stats.tx_errors++; /* Tx babble. */ if (csr5 & 0x0100) { /* Missed a Rx frame. */ lp->stats.rx_errors++; lp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff; } if (csr5 & 0x0800) { printk("%s: Something Wicked happened! %8.8x.\n", dev->name, csr5); /* Hmmmmm, it's not clear what to do here. */ } } if (--boguscnt < 0) { printk("%s: Too much work at interrupt, csr5=0x%8.8x.\n", dev->name, csr5); /* Clear all interrupt sources. */ outl(0x0001ffff, ioaddr + CSR5); break; } } while (1);