static void greth_clean_tx_gbit(struct net_device *dev) { struct greth_private *greth; struct greth_bd *bdp, *bdp_last_frag; struct sk_buff *skb; u32 stat; int nr_frags, i; greth = netdev_priv(dev); while (greth->tx_free < GRETH_TXBD_NUM) { skb = greth->tx_skbuff[greth->tx_last]; nr_frags = skb_shinfo(skb)->nr_frags; /* We only clean fully completed SKBs */ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); mb(); stat = greth_read_bd(&bdp_last_frag->stat); if (stat & GRETH_BD_EN) break; greth->tx_skbuff[greth->tx_last] = NULL; greth_update_tx_stats(dev, stat); dev->stats.tx_bytes += skb->len; bdp = greth->tx_bd_base + greth->tx_last; greth->tx_last = NEXT_TX(greth->tx_last); dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), skb_headlen(skb), DMA_TO_DEVICE); for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; bdp = greth->tx_bd_base + greth->tx_last; dma_unmap_page(greth->dev, greth_read_bd(&bdp->addr), frag->size, DMA_TO_DEVICE); greth->tx_last = NEXT_TX(greth->tx_last); } greth->tx_free += nr_frags+1; dev_kfree_skb(skb); } if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) netif_wake_queue(dev); }
static void greth_clean_tx_gbit(struct net_device *dev) { struct greth_private *greth; struct greth_bd *bdp, *bdp_last_frag; struct sk_buff *skb = NULL; u32 stat; int nr_frags, i; u16 tx_last; greth = netdev_priv(dev); tx_last = greth->tx_last; while (tx_last != greth->tx_next) { skb = greth->tx_skbuff[tx_last]; nr_frags = skb_shinfo(skb)->nr_frags; /* We only clean fully completed SKBs */ bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); mb(); stat = greth_read_bd(&bdp_last_frag->stat); if (stat & GRETH_BD_EN) break; greth->tx_skbuff[tx_last] = NULL; greth_update_tx_stats(dev, stat); dev->stats.tx_bytes += skb->len; bdp = greth->tx_bd_base + tx_last; tx_last = NEXT_TX(tx_last); dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), skb_headlen(skb), DMA_TO_DEVICE); for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; bdp = greth->tx_bd_base + tx_last; dma_unmap_page(greth->dev, greth_read_bd(&bdp->addr), skb_frag_size(frag), DMA_TO_DEVICE); tx_last = NEXT_TX(tx_last); } dev_kfree_skb(skb); } if (skb) { /* skb is set only if the above while loop was entered */ wmb(); greth->tx_last = tx_last; if (netif_queue_stopped(dev) && (greth_num_free_bds(tx_last, greth->tx_next) > (MAX_SKB_FRAGS+1))) netif_wake_queue(dev); } }