static void ag71xx_tx_packets(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->tx_ring; unsigned int sent; DBG("%s: processing TX ring\n", ag->dev->name); sent = 0; while (ring->dirty != ring->curr) { unsigned int i = ring->dirty % AG71XX_TX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb = ring->buf[i].skb; if (!ag71xx_desc_empty(desc)) break; ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); ag->dev->stats.tx_bytes += skb->len; ag->dev->stats.tx_packets++; dev_kfree_skb_any(skb); ring->buf[i].skb = NULL; ring->dirty++; sent++; } DBG("%s: %d packets sent out\n", ag->dev->name, sent); if ((ring->curr - ring->dirty) < AG71XX_TX_THRES_WAKEUP) netif_wake_queue(ag->dev); }
static void ag71xx_ring_tx_clean(struct ag71xx *ag) { struct ag71xx_ring *ring = &ag->tx_ring; struct net_device *dev = ag->dev; while (ring->curr != ring->dirty) { u32 i = ring->dirty % AG71XX_TX_RING_SIZE; if (!ag71xx_desc_empty(&ring->descs[i])) { ring->descs[i].ctrl = 0; dev->stats.tx_errors++; } if (ring->buf[i].skb) dev_kfree_skb_any(ring->buf[i].skb); ring->buf[i].skb = NULL; ring->dirty++; } /* flush descriptors */ wmb(); }
static int ag71xx_rx_packets(struct ag71xx *ag, int limit) { struct net_device *dev = ag->dev; struct ag71xx_ring *ring = &ag->rx_ring; int done = 0; DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", dev->name, limit, ring->curr, ring->dirty); while (done < limit) { unsigned int i = ring->curr % AG71XX_RX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb; int pktlen; if (ag71xx_desc_empty(desc)) break; if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) { ag71xx_assert(0); break; } ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); skb = ring->buf[i].skb; pktlen = ag71xx_desc_pktlen(desc); pktlen -= ETH_FCS_LEN; skb_put(skb, pktlen); skb->dev = dev; skb->ip_summed = CHECKSUM_NONE; dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; if (ag71xx_remove_ar8216_header(ag, skb) != 0) { dev->stats.rx_dropped++; kfree_skb(skb); } else { skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); } ring->buf[i].skb = NULL; done++; ring->curr++; } ag71xx_ring_rx_refill(ag); DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", dev->name, ring->curr, ring->dirty, done); return done; }
static int ag71xx_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct ag71xx_ring *ring = &ag->tx_ring; struct ag71xx_desc *desc; unsigned long flags; int i; i = ring->curr % AG71XX_TX_RING_SIZE; desc = &ring->descs[i]; spin_lock_irqsave(&ag->lock, flags); pdata->ddr_flush(); spin_unlock_irqrestore(&ag->lock, flags); if (!ag71xx_desc_empty(desc)) goto err_drop; ag71xx_add_ar8216_header(ag, skb); if (skb->len <= 0) { DBG("%s: packet len is too small\n", ag->dev->name); goto err_drop; } dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); ring->buf[i].skb = skb; /* setup descriptor fields */ desc->data = virt_to_phys(skb->data); desc->ctrl = (skb->len & DESC_PKTLEN_M); /* flush descriptor */ wmb(); ring->curr++; if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) { DBG("%s: tx queue full\n", ag->dev->name); netif_stop_queue(dev); } DBG("%s: packet injected into TX queue\n", ag->dev->name); /* enable TX engine */ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); dev->trans_start = jiffies; return 0; err_drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; }
static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); struct ag71xx_ring *ring = &ag->tx_ring; struct ag71xx_desc *desc; dma_addr_t dma_addr; int i; i = ring->curr % AG71XX_TX_RING_SIZE; desc = ring->buf[i].desc; if (!ag71xx_desc_empty(desc)) goto err_drop; if (ag71xx_has_ar8216(ag)) ag71xx_add_ar8216_header(ag, skb); if (skb->len <= 0) { DBG("%s: packet len is too small\n", ag->dev->name); goto err_drop; } dma_addr = dma_map_single(&dev->dev, skb->data, skb->len, DMA_TO_DEVICE); ring->buf[i].skb = skb; /* setup descriptor fields */ desc->data = (u32) dma_addr; desc->ctrl = (skb->len & DESC_PKTLEN_M); /* flush descriptor */ wmb(); ring->curr++; if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) { DBG("%s: tx queue full\n", ag->dev->name); netif_stop_queue(dev); } DBG("%s: packet injected into TX queue\n", ag->dev->name); /* enable TX engine */ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); return NETDEV_TX_OK; err_drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int ag71xx_rx_packets(struct ag71xx *ag, int limit) { struct net_device *dev = ag->dev; struct ag71xx_ring *ring = &ag->rx_ring; #ifndef AG71XX_NAPI_TX struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); unsigned long flags; #endif int done = 0; #ifndef AG71XX_NAPI_TX spin_lock_irqsave(&ag->lock, flags); ar71xx_ddr_flush(pdata->flush_reg); spin_unlock_irqrestore(&ag->lock, flags); #endif DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", dev->name, limit, ring->curr, ring->dirty); while (done < limit) { unsigned int i = ring->curr % AG71XX_RX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb; int pktlen; if (ag71xx_desc_empty(desc)) break; if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) { ag71xx_assert(0); break; } skb = ring->buf[i].skb; pktlen = ag71xx_desc_pktlen(desc); pktlen -= ETH_FCS_LEN; /* TODO: move it into the refill function */ dma_cache_wback_inv((unsigned long)skb->data, pktlen); skb_put(skb, pktlen); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_receive_skb(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; ring->buf[i].skb = NULL; done++; ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); ring->curr++; if ((ring->curr - ring->dirty) > (AG71XX_RX_RING_SIZE / 4)) ag71xx_ring_rx_refill(ag); } ag71xx_ring_rx_refill(ag); DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", dev->name, ring->curr, ring->dirty, done); return done; }