static int greth_poll(struct napi_struct *napi, int budget) { struct greth_private *greth; int work_done = 0; unsigned long flags; u32 mask, ctrl; greth = container_of(napi, struct greth_private, napi); restart_txrx_poll: if (netif_queue_stopped(greth->netdev)) { if (greth->gbit_mac) greth_clean_tx_gbit(greth->netdev); else greth_clean_tx(greth->netdev); } if (greth->gbit_mac) { work_done += greth_rx_gbit(greth->netdev, budget - work_done); } else { work_done += greth_rx(greth->netdev, budget - work_done); } if (work_done < budget) { spin_lock_irqsave(&greth->devlock, flags); ctrl = GRETH_REGLOAD(greth->regs->control); if (netif_queue_stopped(greth->netdev)) { GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI | GRETH_RXI); mask = GRETH_INT_RX | GRETH_INT_RE | GRETH_INT_TX | GRETH_INT_TE; } else { GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); mask = GRETH_INT_RX | GRETH_INT_RE; } if (GRETH_REGLOAD(greth->regs->status) & mask) { GRETH_REGSAVE(greth->regs->control, ctrl); spin_unlock_irqrestore(&greth->devlock, flags); goto restart_txrx_poll; } else { __napi_complete(napi); spin_unlock_irqrestore(&greth->devlock, flags); } } return work_done; }
static int xenvif_poll(struct napi_struct *napi, int budget) { struct xenvif *vif = container_of(napi, struct xenvif, napi); int work_done; /* This vif is rogue, we pretend we've there is nothing to do * for this vif to deschedule it from NAPI. But this interface * will be turned off in thread context later. */ if (unlikely(vif->disabled)) { napi_complete(napi); return 0; } work_done = xenvif_tx_action(vif, budget); if (work_done < budget) { int more_to_do = 0; unsigned long flags; /* It is necessary to disable IRQ before calling * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might * lose event from the frontend. * * Consider: * RING_HAS_UNCONSUMED_REQUESTS * <frontend generates event to trigger napi_schedule> * __napi_complete * * This handler is still in scheduled state so the * event has no effect at all. After __napi_complete * this handler is descheduled and cannot get * scheduled again. We lose event in this case and the ring * will be completely stalled. */ local_irq_save(flags); RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); if (!more_to_do) __napi_complete(napi); local_irq_restore(flags); } return work_done; }
static int amd8111e_rx_poll(struct napi_struct *napi, int budget) { struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); struct net_device *dev = lp->amd8111e_net_dev; int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; int min_pkt_len, status; unsigned int intr0; int num_rx_pkt = 0; short pkt_len; #if AMD8111E_VLAN_TAG_USED short vtag; #endif int rx_pkt_limit = budget; unsigned long flags; do{ while(1) { status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); if (status & OWN_BIT) break; if(status & ERR_BIT) { lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; } if(!((status & STP_BIT) && (status & ENP_BIT))){ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt; } pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; #if AMD8111E_VLAN_TAG_USED vtag = status & TT_MASK; if (vtag != 0) min_pkt_len =MIN_PKT_LEN - 4; else #endif min_pkt_len =MIN_PKT_LEN; if (pkt_len < min_pkt_len) { lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; lp->drv_rx_errors++; goto err_next_pkt; } if(--rx_pkt_limit < 0) goto rx_not_empty; new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); if (!new_skb) { lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; lp->drv_rx_errors++; goto err_next_pkt; } skb_reserve(new_skb, 2); skb = lp->rx_skbuff[rx_index]; pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); skb_put(skb, pkt_len); lp->rx_skbuff[rx_index] = new_skb; lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, new_skb->data, lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); skb->protocol = eth_type_trans(skb, dev); #if AMD8111E_VLAN_TAG_USED if (vtag == TT_VLAN_TAGGED){ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); __vlan_hwaccel_put_tag(skb, vlan_tag); } #endif netif_receive_skb(skb); lp->coal_conf.rx_packets++; lp->coal_conf.rx_bytes += pkt_len; num_rx_pkt++; err_next_pkt: lp->rx_ring[rx_index].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[rx_index]); lp->rx_ring[rx_index].buff_count = cpu_to_le16(lp->rx_buff_len-2); wmb(); lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; } intr0 = readl(mmio + INT0); writel(intr0 & RINT0,mmio + INT0); } while(intr0 & RINT0); if (rx_pkt_limit > 0) { spin_lock_irqsave(&lp->lock, flags); __napi_complete(napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); } rx_not_empty: return num_rx_pkt; }