/** * Complete bulk IN transfer * * @v ep USB endpoint * @v iobuf I/O buffer * @v rc Completion status code */ static void smsc95xx_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, int rc ) { struct smsc95xx_device *smsc95xx = container_of ( ep, struct smsc95xx_device, usbnet.in ); struct net_device *netdev = smsc95xx->netdev; struct smsc95xx_rx_header *header; /* Profile completions */ profile_start ( &smsc95xx_in_profiler ); /* Ignore packets cancelled when the endpoint closes */ if ( ! ep->open ) { free_iob ( iobuf ); return; } /* Record USB errors against the network device */ if ( rc != 0 ) { DBGC ( smsc95xx, "SMSC95XX %p bulk IN failed: %s\n", smsc95xx, strerror ( rc ) ); goto err; } /* Sanity check */ if ( iob_len ( iobuf ) < ( sizeof ( *header ) + 4 /* CRC */ ) ) { DBGC ( smsc95xx, "SMSC95XX %p underlength bulk IN\n", smsc95xx ); DBGC_HDA ( smsc95xx, 0, iobuf->data, iob_len ( iobuf ) ); rc = -EINVAL; goto err; } /* Strip header and CRC */ header = iobuf->data; iob_pull ( iobuf, sizeof ( *header ) ); iob_unput ( iobuf, 4 /* CRC */ ); /* Check for errors */ if ( header->command & cpu_to_le32 ( SMSC95XX_RX_RUNT | SMSC95XX_RX_LATE | SMSC95XX_RX_CRC ) ) { DBGC ( smsc95xx, "SMSC95XX %p receive error (%08x):\n", smsc95xx, le32_to_cpu ( header->command ) ); DBGC_HDA ( smsc95xx, 0, iobuf->data, iob_len ( iobuf ) ); rc = -EIO; goto err; } /* Hand off to network stack */ netdev_rx ( netdev, iob_disown ( iobuf ) ); profile_stop ( &smsc95xx_in_profiler ); return; err: /* Hand off to network stack */ netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); }
/** * e1000_process_rx_packets - process received packets * * @v netdev network interface device structure **/ static void e1000e_process_rx_packets ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv ( netdev ); uint32_t i; uint32_t rx_status; uint32_t rx_len; uint32_t rx_err; struct e1000_rx_desc *rx_curr_desc; /* Process received packets */ while ( 1 ) { i = adapter->rx_curr; rx_curr_desc = ( void * ) ( adapter->rx_base ) + ( i * sizeof ( *adapter->rx_base ) ); rx_status = rx_curr_desc->status; DBG2 ( "Before DD Check RX_status: %#08x\n", rx_status ); if ( ! ( rx_status & E1000_RXD_STAT_DD ) ) break; if ( adapter->rx_iobuf[i] == NULL ) break; DBG ( "E1000_RCTL = %#08x\n", E1000_READ_REG ( &adapter->hw, E1000_RCTL ) ); rx_len = rx_curr_desc->length; DBG ( "Received packet, rx_curr: %d rx_status: %#08x rx_len: %d\n", i, rx_status, rx_len ); rx_err = rx_curr_desc->errors; iob_put ( adapter->rx_iobuf[i], rx_len ); if ( rx_err & E1000_RXD_ERR_FRAME_ERR_MASK ) { netdev_rx_err ( netdev, adapter->rx_iobuf[i], -EINVAL ); DBG ( "e1000_poll: Corrupted packet received!" " rx_err: %#08x\n", rx_err ); } else { /* Add this packet to the receive queue. */ netdev_rx ( netdev, adapter->rx_iobuf[i] ); } adapter->rx_iobuf[i] = NULL; memset ( rx_curr_desc, 0, sizeof ( *rx_curr_desc ) ); adapter->rx_curr = ( adapter->rx_curr + 1 ) % NUM_RX_DESC; } }
static void b44_process_rx_packets(struct b44_private *bp) { struct io_buffer *iob; /* received data */ struct rx_header *rh; u32 pending, i; u16 len; pending = pending_rx_index(bp); for (i = bp->rx_cur; i != pending; i = ring_next(i)) { iob = bp->rx_iobuf[i]; if (iob == NULL) break; rh = iob->data; len = le16_to_cpu(rh->len); /* * Guard against incompletely written RX descriptors. * Without this, things can get really slow! */ if (len == 0) break; /* Discard CRC that is generated by the card */ len -= 4; /* Check for invalid packets and errors */ if (len > RX_PKT_BUF_SZ - RX_PKT_OFFSET || (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { DBG("rx error len=%d flags=%04x\n", len, cpu_to_le16(rh->flags)); rh->len = 0; rh->flags = 0; netdev_rx_err(bp->netdev, iob, -EINVAL); continue; } /* Clear RX descriptor */ rh->len = 0; rh->flags = 0; bp->rx_iobuf[i] = NULL; /* Hand off the IO buffer to the network stack */ iob_reserve(iob, RX_PKT_OFFSET); iob_put(iob, len); netdev_rx(bp->netdev, iob); } bp->rx_cur = i; b44_rx_refill(bp, pending_rx_index(bp)); }
/** * Poll for received packets * * @v netdev Network device */ static void nii_poll_rx ( struct net_device *netdev ) { struct nii_nic *nii = netdev->priv; PXE_CPB_RECEIVE cpb; PXE_DB_RECEIVE db; unsigned int quota; int stat; int rc; /* Retrieve up to NII_RX_QUOTA packets */ for ( quota = NII_RX_QUOTA ; quota ; quota-- ) { /* Allocate buffer, if required */ if ( ! nii->rxbuf ) { nii->rxbuf = alloc_iob ( nii->mtu ); if ( ! nii->rxbuf ) { /* Leave for next poll */ break; } } /* Construct parameter block */ memset ( &cpb, 0, sizeof ( cpb ) ); cpb.BufferAddr = virt_to_bus ( nii->rxbuf->data ); cpb.BufferLen = iob_tailroom ( nii->rxbuf ); /* Issue command */ if ( ( stat = nii_issue_cpb_db ( nii, PXE_OPCODE_RECEIVE, &cpb, sizeof ( cpb ), &db, sizeof ( db ) ) ) < 0 ) { /* PXE_STATCODE_NO_DATA is just the usual "no packet" * status indicator; ignore it. */ if ( stat == -PXE_STATCODE_NO_DATA ) break; /* Anything else is an error */ rc = -EIO_STAT ( stat ); DBGC ( nii, "NII %s could not receive: %s\n", nii->dev.name, strerror ( rc ) ); netdev_rx_err ( netdev, NULL, rc ); break; } /* Hand off to network stack */ iob_put ( nii->rxbuf, db.FrameLen ); netdev_rx ( netdev, nii->rxbuf ); nii->rxbuf = NULL; } }
/** * Complete bulk IN transfer * * @v ep USB endpoint * @v iobuf I/O buffer * @v rc Completion status code */ static void dm96xx_in_complete ( struct usb_endpoint *ep, struct io_buffer *iobuf, int rc ) { struct dm96xx_device *dm96xx = container_of ( ep, struct dm96xx_device, usbnet.in ); struct net_device *netdev = dm96xx->netdev; struct dm96xx_rx_header *header; /* Ignore packets cancelled when the endpoint closes */ if ( ! ep->open ) { free_iob ( iobuf ); return; } /* Record USB errors against the network device */ if ( rc != 0 ) { DBGC ( dm96xx, "DM96XX %p bulk IN failed: %s\n", dm96xx, strerror ( rc ) ); goto err; } /* Sanity check */ if ( iob_len ( iobuf ) < ( sizeof ( *header ) + 4 /* CRC */ ) ) { DBGC ( dm96xx, "DM96XX %p underlength bulk IN\n", dm96xx ); DBGC_HDA ( dm96xx, 0, iobuf->data, iob_len ( iobuf ) ); rc = -EINVAL; goto err; } /* Strip header and CRC */ header = iobuf->data; iob_pull ( iobuf, sizeof ( *header ) ); iob_unput ( iobuf, 4 /* CRC */ ); /* Check status */ if ( header->rsr & ~DM96XX_RSR_MF ) { DBGC ( dm96xx, "DM96XX %p receive error %02x:\n", dm96xx, header->rsr ); DBGC_HDA ( dm96xx, 0, iobuf->data, iob_len ( iobuf ) ); rc = -EIO; goto err; } /* Hand off to network stack */ netdev_rx ( netdev, iob_disown ( iobuf ) ); return; err: /* Hand off to network stack */ netdev_rx_err ( netdev, iob_disown ( iobuf ), rc ); }
static void legacy_poll ( struct net_device *netdev ) { struct nic *nic = netdev->priv; struct io_buffer *iobuf; iobuf = alloc_iob ( ETH_FRAME_LEN ); if ( ! iobuf ) return; nic->packet = iobuf->data; if ( nic->nic_op->poll ( nic, 1 ) ) { DBG ( "Received %d bytes\n", nic->packetlen ); iob_put ( iobuf, nic->packetlen ); netdev_rx ( netdev, iobuf ); } else { free_iob ( iobuf ); } }
/** * Poll for received packets * * @v netdev Network device */ static void snpnet_poll_rx ( struct net_device *netdev ) { struct snp_nic *snp = netdev->priv; UINTN len; unsigned int quota; EFI_STATUS efirc; int rc; /* Retrieve up to SNP_RX_QUOTA packets */ for ( quota = SNP_RX_QUOTA ; quota ; quota-- ) { /* Allocate buffer, if required */ if ( ! snp->rxbuf ) { snp->rxbuf = alloc_iob ( snp->mtu ); if ( ! snp->rxbuf ) { /* Leave for next poll */ break; } } /* Receive packet */ len = iob_tailroom ( snp->rxbuf ); if ( ( efirc = snp->snp->Receive ( snp->snp, NULL, &len, snp->rxbuf->data, NULL, NULL, NULL ) ) != 0 ) { /* EFI_NOT_READY is just the usual "no packet" * status indication; ignore it. */ if ( efirc == EFI_NOT_READY ) break; /* Anything else is an error */ rc = -EEFI ( efirc ); DBGC ( snp, "SNP %s could not receive: %s\n", netdev->name, strerror ( rc ) ); netdev_rx_err ( netdev, NULL, rc ); break; } /* Hand off to network stack */ iob_put ( snp->rxbuf, len ); netdev_rx ( netdev, snp->rxbuf ); snp->rxbuf = NULL; } }
/** Poll for new packets */ static void af_packet_nic_poll ( struct net_device *netdev ) { struct af_packet_nic * nic = netdev->priv; struct pollfd pfd; struct io_buffer * iobuf; int r; pfd.fd = nic->fd; pfd.events = POLLIN; if (linux_poll(&pfd, 1, 0) == -1) { DBGC(nic, "af_packet %p poll failed (%s)\n", nic, linux_strerror(linux_errno)); return; } if ((pfd.revents & POLLIN) == 0) return; /* At this point we know there is at least one new packet to be read */ iobuf = alloc_iob(RX_BUF_SIZE); if (! iobuf) goto allocfail; while ((r = linux_read(nic->fd, iobuf->data, RX_BUF_SIZE)) > 0) { DBGC2(nic, "af_packet %p read %d bytes\n", nic, r); iob_put(iobuf, r); netdev_rx(netdev, iobuf); iobuf = alloc_iob(RX_BUF_SIZE); if (! iobuf) goto allocfail; } free_iob(iobuf); return; allocfail: DBGC(nic, "af_packet %p alloc_iob failed\n", nic); }
/** * Poll for received packets * * @v netdev Network device */ static void rhine_poll_rx ( struct net_device *netdev ) { struct rhine_nic *rhn = netdev->priv; struct rhine_descriptor *desc; struct io_buffer *iobuf; unsigned int rx_idx; uint32_t des0; size_t len; /* Check for received packets */ while ( rhn->rx.cons != rhn->rx.prod ) { /* Get next receive descriptor */ rx_idx = ( rhn->rx.cons % RHINE_RXDESC_NUM ); desc = &rhn->rx.desc[rx_idx]; /* Stop if descriptor is still in use */ if ( desc->des0 & cpu_to_le32 ( RHINE_DES0_OWN ) ) return; /* Populate I/O buffer */ iobuf = rhn->rx_iobuf[rx_idx]; rhn->rx_iobuf[rx_idx] = NULL; des0 = le32_to_cpu ( desc->des0 ); len = ( RHINE_DES0_GETSIZE ( des0 ) - 4 /* strip CRC */ ); iob_put ( iobuf, len ); /* Hand off to network stack */ if ( des0 & RHINE_RDES0_RXOK ) { DBGC2 ( rhn, "RHINE %p RX %d complete (length %zd)\n", rhn, rx_idx, len ); netdev_rx ( netdev, iobuf ); } else { DBGC ( rhn, "RHINE %p RX %d error (length %zd, DES0 " "%08x)\n", rhn, rx_idx, len, des0 ); netdev_rx_err ( netdev, iobuf, -EIO ); } rhn->rx.cons++; } }
/** * Poll for received packets * * @v netdev Network device */ static void myson_poll_rx ( struct net_device *netdev ) { struct myson_nic *myson = netdev->priv; struct myson_descriptor *rx; struct io_buffer *iobuf; unsigned int rx_idx; size_t len; /* Check for received packets */ while ( myson->rx.cons != myson->rx.prod ) { /* Get next receive descriptor */ rx_idx = ( myson->rx.cons % MYSON_NUM_RX_DESC ); rx = &myson->rx.desc[rx_idx]; /* Stop if descriptor is still in use */ if ( rx->status & MYSON_RX_STAT_OWN ) return; /* Populate I/O buffer */ iobuf = myson->rx_iobuf[rx_idx]; myson->rx_iobuf[rx_idx] = NULL; len = MYSON_RX_STAT_FLNG ( le32_to_cpu ( rx->status ) ); iob_put ( iobuf, len - 4 /* strip CRC */ ); /* Hand off to network stack */ if ( rx->status & cpu_to_le32 ( MYSON_RX_STAT_ES ) ) { DBGC ( myson, "MYSON %p RX %d error (length %zd, " "status %08x)\n", myson, rx_idx, len, le32_to_cpu ( rx->status ) ); netdev_rx_err ( netdev, iobuf, -EIO ); } else { DBGC2 ( myson, "MYSON %p RX %d complete (length " "%zd)\n", myson, rx_idx, len ); netdev_rx ( netdev, iobuf ); } myson->rx.cons++; } }
/** * Poll for received packets * * @v netdev Network device */ static void netfront_poll_rx ( struct net_device *netdev ) { struct netfront_nic *netfront = netdev->priv; struct xen_device *xendev = netfront->xendev; struct netif_rx_response *response; struct io_buffer *iobuf; int status; size_t len; int rc; /* Consume any unconsumed responses */ while ( RING_HAS_UNCONSUMED_RESPONSES ( &netfront->rx_fring ) ) { /* Get next response */ response = RING_GET_RESPONSE ( &netfront->rx_fring, netfront->rx_fring.rsp_cons++ ); /* Retrieve from descriptor ring */ iobuf = netfront_pull ( netfront, &netfront->rx, response->id ); status = response->status; if ( status >= 0 ) { len = status; iob_reserve ( iobuf, response->offset ); iob_put ( iobuf, len ); DBGC2 ( netfront, "NETFRONT %s RX id %d complete " "%#08lx+%zx\n", xendev->key, response->id, virt_to_phys ( iobuf->data ), len ); netdev_rx ( netdev, iobuf ); } else { rc = -EIO_NETIF_RSP ( status ); DBGC2 ( netfront, "NETFRONT %s RX id %d error %d: %s\n", xendev->key, response->id, status, strerror ( rc ) ); netdev_rx_err ( netdev, iobuf, rc ); } } }
/** * e1000_poll - Poll for received packets * * @v netdev Network device */ static void e1000_poll ( struct net_device *netdev ) { struct e1000_adapter *adapter = netdev_priv( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t icr; uint32_t tx_status; uint32_t rx_status; uint32_t rx_len; uint32_t rx_err; struct e1000_tx_desc *tx_curr_desc; struct e1000_rx_desc *rx_curr_desc; uint32_t i; DBGP ( "e1000_poll\n" ); /* Acknowledge interrupts */ icr = E1000_READ_REG ( hw, ICR ); if ( ! icr ) return; DBG ( "e1000_poll: intr_status = %#08x\n", icr ); /* Check status of transmitted packets */ while ( ( i = adapter->tx_head ) != adapter->tx_tail ) { tx_curr_desc = ( void * ) ( adapter->tx_base ) + ( i * sizeof ( *adapter->tx_base ) ); tx_status = tx_curr_desc->upper.data; /* if the packet at tx_head is not owned by hardware it is for us */ if ( ! ( tx_status & E1000_TXD_STAT_DD ) ) break; DBG ( "Sent packet. tx_head: %d tx_tail: %d tx_status: %#08x\n", adapter->tx_head, adapter->tx_tail, tx_status ); if ( tx_status & ( E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU ) ) { netdev_tx_complete_err ( netdev, adapter->tx_iobuf[i], -EINVAL ); DBG ( "Error transmitting packet, tx_status: %#08x\n", tx_status ); } else { netdev_tx_complete ( netdev, adapter->tx_iobuf[i] ); DBG ( "Success transmitting packet, tx_status: %#08x\n", tx_status ); } /* Decrement count of used descriptors, clear this descriptor */ adapter->tx_fill_ctr--; memset ( tx_curr_desc, 0, sizeof ( *tx_curr_desc ) ); adapter->tx_head = ( adapter->tx_head + 1 ) % NUM_TX_DESC; } /* Process received packets */ while ( 1 ) { i = adapter->rx_curr; rx_curr_desc = ( void * ) ( adapter->rx_base ) + ( i * sizeof ( *adapter->rx_base ) ); rx_status = rx_curr_desc->status; DBG2 ( "Before DD Check RX_status: %#08x\n", rx_status ); if ( ! ( rx_status & E1000_RXD_STAT_DD ) ) break; if ( adapter->rx_iobuf[i] == NULL ) break; DBG ( "RCTL = %#08x\n", E1000_READ_REG ( &adapter->hw, RCTL ) ); rx_len = rx_curr_desc->length; DBG ( "Received packet, rx_curr: %d rx_status: %#08x rx_len: %d\n", i, rx_status, rx_len ); rx_err = rx_curr_desc->errors; iob_put ( adapter->rx_iobuf[i], rx_len ); if ( rx_err & E1000_RXD_ERR_FRAME_ERR_MASK ) { netdev_rx_err ( netdev, adapter->rx_iobuf[i], -EINVAL ); DBG ( "e1000_poll: Corrupted packet received!" " rx_err: %#08x\n", rx_err ); } else { /* Add this packet to the receive queue. */ netdev_rx ( netdev, adapter->rx_iobuf[i] ); } adapter->rx_iobuf[i] = NULL; memset ( rx_curr_desc, 0, sizeof ( *rx_curr_desc ) ); adapter->rx_curr = ( adapter->rx_curr + 1 ) % NUM_RX_DESC; } e1000_refill_rx_ring(adapter); }
/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np; long ioaddr; int boguscnt = max_interrupt_work; ioaddr = dev->base_addr; np = dev->priv; spin_lock(&np->lock); do { int intr_status = readw(ioaddr + IntrStatus); writew(intr_status & (IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone | IntrTxDMADone | StatsMax | LinkChange), ioaddr + IntrStatus); if (debug > 4) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; if (intr_status & (IntrRxDone|IntrRxDMADone)) netdev_rx(dev); if (intr_status & IntrTxDone) { int boguscnt = 32; int tx_status = readw(ioaddr + TxStatus); while (tx_status & 0x80) { if (debug > 4) printk("%s: Transmit status is %2.2x.\n", dev->name, tx_status); if (tx_status & 0x1e) { np->stats.tx_errors++; if (tx_status & 0x10) np->stats.tx_fifo_errors++; #ifdef ETHER_STATS if (tx_status & 0x08) np->stats.collisions16++; #else if (tx_status & 0x08) np->stats.collisions++; #endif if (tx_status & 0x04) np->stats.tx_fifo_errors++; if (tx_status & 0x02) np->stats.tx_window_errors++; /* This reset has not been verified!. */ if (tx_status & 0x10) { /* Reset the Tx. */ writew(0x001c, ioaddr + ASICCtrl + 2); #if 0 /* Do we need to reset the Tx pointer here? */ writel(np->tx_ring_dma + np->dirty_tx*sizeof(*np->tx_ring), dev->base_addr + TxListPtr); #endif } if (tx_status & 0x1e) /* Restart the Tx. */ writew(TxEnable, ioaddr + MACCtrl1); } /* Yup, this is a documentation bug. It cost me *hours*. */ writew(0, ioaddr + TxStatus); tx_status = readb(ioaddr + TxStatus); if (--boguscnt < 0) break; } } for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if ( ! (np->tx_ring[entry].status & 0x00010000)) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ pci_unmap_single(np->pci_dev, np->tx_ring[entry].frag[0].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); np->tx_skbuff[entry] = 0; } if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, clear tbusy. */ np->tx_full = 0; netif_wake_queue(dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax)) netdev_error(dev, intr_status); if (--boguscnt < 0) { get_stats(dev); if (debug > 1) printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x / 0x%4.4x.\n", dev->name, intr_status, readw(ioaddr + IntrClear)); /* Re-enable us in 3.2msec. */ writew(0, ioaddr + IntrEnable); writew(1000, ioaddr + DownCounter); writew(IntrDrvRqst, ioaddr + IntrEnable); break; } } while (1); if (debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, readw(ioaddr + IntrStatus)); spin_unlock(&np->lock); }
/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = (struct netdev_private *)dev->priv; long ioaddr = dev->base_addr; int work_limit = max_interrupt_work; spin_lock(&np->lock); do { u32 intr_status = readl(ioaddr + IntrStatus); /* Acknowledge all of the current interrupt sources ASAP. */ writel(intr_status & 0x001ffff, ioaddr + IntrStatus); if (debug > 4) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) break; if (intr_status & (IntrRxDone | RxNoBuf)) netdev_rx(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; int tx_status = le32_to_cpu(np->tx_ring[entry].status); if (tx_status < 0) break; if (tx_status & 0x8000) { /* There was an error, log it. */ #ifndef final_version if (debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", dev->name, tx_status); #endif np->stats.tx_errors++; if (tx_status & 0x0104) np->stats.tx_aborted_errors++; if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; if (tx_status & 0x0200) np->stats.tx_window_errors++; if (tx_status & 0x0002) np->stats.tx_fifo_errors++; if ((tx_status & 0x0080) && np->full_duplex == 0) np->stats.tx_heartbeat_errors++; #ifdef ETHER_STATS if (tx_status & 0x0100) np->stats.collisions16++; #endif } else { #ifdef ETHER_STATS if (tx_status & 0x0001) np->stats.tx_deferred++; #endif np->stats.tx_bytes += np->tx_skbuff[entry]->len; np->stats.collisions += (tx_status >> 3) & 15; np->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(np->pdev,np->tx_addr[entry], np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); np->tx_q_bytes -= np->tx_skbuff[entry]->len; dev_kfree_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = 0; } if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4 && np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { /* The ring is no longer full, clear tbusy. */ np->tx_full = 0; netif_wake_queue(dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | TimerInt | IntrTxStopped)) netdev_error(dev, intr_status); if (--work_limit < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); /* Set the timer to re-enable the other interrupts after 10*82usec ticks. */ writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable); writel(10, ioaddr + GPTimer); break; } } while (1); if (debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, (int)readl(ioaddr + IntrStatus)); spin_unlock(&np->lock); }
/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) { struct device *dev = (struct device *)dev_instance; struct netdev_private *np; long ioaddr, boguscnt = max_interrupt_work; ioaddr = dev->base_addr; np = (struct netdev_private *)dev->priv; #if defined(__i386__) /* A lock to prevent simultaneous entry bug on Intel SMP machines. */ if (test_and_set_bit(0, (void*)&dev->interrupt)) { printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", dev->name); dev->interrupt = 0; /* Avoid halting machine. */ return; } #else if (dev->interrupt) { printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name); return; } dev->interrupt = 1; #endif do { u32 intr_status = readw(ioaddr + IntrStatus); /* Acknowledge all of the current interrupt sources ASAP. */ writew(intr_status & 0xffff, ioaddr + IntrStatus); if (debug > 4) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) netdev_rx(dev); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; int txstatus; if (np->tx_ring[entry].tx_own) break; txstatus = np->tx_ring[entry].tx_status; if (debug > 6) printk(KERN_DEBUG " Tx scavenge %d status %4.4x.\n", entry, txstatus); if (txstatus & 0x8000) { if (debug > 1) printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", dev->name, txstatus); np->stats.tx_errors++; if (txstatus & 0x0400) np->stats.tx_carrier_errors++; if (txstatus & 0x0200) np->stats.tx_window_errors++; if (txstatus & 0x0100) np->stats.tx_aborted_errors++; if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++; if (txstatus & 0x0002) np->stats.tx_fifo_errors++; #ifdef ETHER_STATS if (txstatus & 0x0100) np->stats.collisions16++; #endif /* Transmitter restarted in 'abnormal' handler. */ } else { #ifdef ETHER_STATS if (txstatus & 0x0001) np->stats.tx_deferred++; #endif np->stats.collisions += (txstatus >> 3) & 15; #if defined(NETSTATS_VER2) np->stats.tx_bytes += np->tx_ring[entry].desc_length & 0x7ff; #endif np->stats.tx_packets++; } /* Free the original skb. */ dev_free_skb(np->tx_skbuff[entry]); np->tx_skbuff[entry] = 0; } if (np->tx_full && dev->tbusy && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) { /* The ring is no longer full, clear tbusy. */ np->tx_full = 0; clear_bit(0, (void*)&dev->tbusy); mark_bh(NET_BH); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange | IntrStatsMax | IntrTxAbort | IntrTxUnderrun)) netdev_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, " "status=0x%4.4x.\n", dev->name, intr_status); break; } } while (1); if (debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, readw(ioaddr + IntrStatus)); #if defined(__i386__) clear_bit(0, (void*)&dev->interrupt); #else dev->interrupt = 0; #endif return; }