static int ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct raeth_priv *re = netdev_priv(dev); struct raeth_tx_info *txi, *txi_next; struct ramips_tx_dma *txd, *txd_next; unsigned long tx; unsigned int tx_next; dma_addr_t mapped_addr; if (re->plat->min_pkt_len) { if (skb->len < re->plat->min_pkt_len) { if (skb_padto(skb, re->plat->min_pkt_len)) { printk(KERN_ERR "ramips_eth: skb_padto failed\n"); kfree_skb(skb); return 0; } skb_put(skb, re->plat->min_pkt_len - skb->len); } } dev->trans_start = jiffies; mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len, DMA_TO_DEVICE); spin_lock(&re->page_lock); tx = ramips_fe_trr(RAETH_REG_TX_CTX_IDX0); tx_next = (tx + 1) % NUM_TX_DESC; txi = &re->tx_info[tx]; txd = txi->tx_desc; txi_next = &re->tx_info[tx_next]; txd_next = txi_next->tx_desc; if ((txi->tx_skb) || (txi_next->tx_skb) || !(txd->txd2 & TX_DMA_DONE) || !(txd_next->txd2 & TX_DMA_DONE)) goto out; txi->tx_skb = skb; txd->txd1 = (unsigned int) mapped_addr; wmb(); txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; ramips_fe_twr(tx_next, RAETH_REG_TX_CTX_IDX0); netdev_sent_queue(dev, skb->len); spin_unlock(&re->page_lock); return NETDEV_TX_OK; out: spin_unlock(&re->page_lock); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; }
static int ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev) { struct raeth_priv *priv = netdev_priv(dev); unsigned long tx; unsigned int tx_next; unsigned int mapped_addr; unsigned long flags; if(priv->plat->min_pkt_len) { if(skb->len < priv->plat->min_pkt_len) { if(skb_padto(skb, priv->plat->min_pkt_len)) { printk(KERN_ERR "ramips_eth: skb_padto failed\n"); kfree_skb(skb); return 0; } skb_put(skb, priv->plat->min_pkt_len - skb->len); } } dev->trans_start = jiffies; mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE); spin_lock_irqsave(&priv->page_lock, flags); tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0); if(tx == NUM_TX_DESC - 1) tx_next = 0; else tx_next = tx + 1; if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) || !(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE)) goto out; priv->tx[tx].txd1 = mapped_addr; priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE); priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; priv->tx_skb[tx] = skb; wmb(); ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0); spin_unlock_irqrestore(&priv->page_lock, flags); return NETDEV_TX_OK; out: spin_unlock_irqrestore(&priv->page_lock, flags); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; }