/* * MPC5121 FEC requeries 4-byte alignment for TX data buffer! */ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, struct sk_buff *skb) { struct sk_buff *new_skb; struct fs_enet_private *fep = netdev_priv(dev); /* Alloc new skb */ new_skb = dev_alloc_skb(skb->len + 4); if (!new_skb) { if (net_ratelimit()) { dev_warn(fep->dev, "Memory squeeze, dropping tx packet.\n"); } return NULL; } /* Make sure new skb is properly aligned */ skb_align(new_skb, 4); /* Copy data to new skb ... */ skb_copy_from_linear_data(skb, new_skb->data, skb->len); skb_put(new_skb, skb->len); /* ... and free an old one */ dev_kfree_skb_any(skb); return new_skb; }
void fs_init_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); cbd_t __iomem *bdp; struct sk_buff *skb; int i; fs_cleanup_bds(dev); fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; fep->tx_free = fep->tx_ring; fep->cur_rx = fep->rx_bd_base; /* * Initialize the receive buffer descriptors. */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = dev_alloc_skb(ENET_RX_FRSIZE); if (skb == NULL) { printk(KERN_WARNING DRV_MODULE_NAME ": %s Memory squeeze, unable to allocate skb\n", dev->name); break; } skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), DMA_FROM_DEVICE)); CBDW_DATLEN(bdp, 0); /* zero */ CBDW_SC(bdp, BD_ENET_RX_EMPTY | ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); } /* * if we failed, fillup remainder */ for (; i < fep->rx_ring; i++, bdp++) { fep->rx_skbuff[i] = NULL; CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); } /* * ...and the same for transmit. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fep->tx_skbuff[i] = NULL; CBDW_BUFADDR(bdp, 0); CBDW_DATLEN(bdp, 0); CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); } }
/* * MPC5121 FEC requeries 4-byte alignment for TX data buffer! */ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev, struct sk_buff *skb) { struct sk_buff *new_skb; /* Alloc new skb */ new_skb = netdev_alloc_skb(dev, skb->len + 4); if (!new_skb) return NULL; /* Make sure new skb is properly aligned */ skb_align(new_skb, 4); /* Copy data to new skb ... */ skb_copy_from_linear_data(skb, new_skb->data, skb->len); skb_put(new_skb, skb->len); /* ... and free an old one */ dev_kfree_skb_any(skb); return new_skb; }
/* NAPI receive function */ static int fs_enet_rx_napi(struct napi_struct *napi, int budget) { struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); struct net_device *dev = fep->ndev; const struct fs_platform_info *fpi = fep->fpi; cbd_t __iomem *bdp; struct sk_buff *skb, *skbn, *skbt; int received = 0; u16 pkt_len, sc; int curidx; if (budget <= 0) return received; /* * First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ bdp = fep->cur_rx; /* clear RX status bits for napi*/ (*fep->ops->napi_clear_rx_event)(dev); while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { curidx = bdp - fep->rx_bd_base; /* * Since we have allocated space to hold a complete frame, * the last indicator should be set. */ if ((sc & BD_ENET_RX_LAST) == 0) dev_warn(fep->dev, "rcv is not +last\n"); /* * Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { fep->stats.rx_errors++; /* Frame too long or too short. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) fep->stats.rx_length_errors++; /* Frame alignment */ if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) fep->stats.rx_frame_errors++; /* CRC Error */ if (sc & BD_ENET_RX_CR) fep->stats.rx_crc_errors++; /* FIFO overrun */ if (sc & BD_ENET_RX_OV) fep->stats.rx_crc_errors++; skb = fep->rx_skbuff[curidx]; dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), DMA_FROM_DEVICE); skbn = skb; } else { skb = fep->rx_skbuff[curidx]; dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), DMA_FROM_DEVICE); /* * Process the incoming frame. */ fep->stats.rx_packets++; pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ fep->stats.rx_bytes += pkt_len + 4; if (pkt_len <= fpi->rx_copybreak) { /* +2 to make IP header L1 cache aligned */ skbn = netdev_alloc_skb(dev, pkt_len + 2); if (skbn != NULL) { skb_reserve(skbn, 2); /* align IP header */ skb_copy_from_linear_data(skb, skbn->data, pkt_len); /* swap */ skbt = skb; skb = skbn; skbn = skbt; } } else { skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE); if (skbn) skb_align(skbn, ENET_RX_ALIGN); } if (skbn != NULL) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); received++; netif_receive_skb(skb); } else { fep->stats.rx_dropped++; skbn = skb; } } fep->rx_skbuff[curidx] = skbn; CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), DMA_FROM_DEVICE)); CBDW_DATLEN(bdp, 0); CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); /* * Update BD pointer to next entry. */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; else bdp = fep->rx_bd_base; (*fep->ops->rx_bd_done)(dev); if (received >= budget) break; } fep->cur_rx = bdp; if (received < budget) { /* done */ napi_complete(napi); (*fep->ops->napi_enable_rx)(dev); } return received; }