static void ramips_eth_rx_hw(unsigned long ptr) { struct net_device *dev = (struct net_device *) ptr; struct raeth_priv *priv = netdev_priv(dev); int rx; int max_rx = 16; while (max_rx) { struct sk_buff *rx_skb, *new_skb; int pktlen; rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC; if (!(priv->rx[rx].rxd2 & RX_DMA_DONE)) break; max_rx--; rx_skb = priv->rx_skb[rx]; pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2); new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN); /* Reuse the buffer on allocation failures */ if (new_skb) { dma_addr_t dma_addr; dma_unmap_single(&priv->netdev->dev, priv->rx_dma[rx], MAX_RX_LENGTH, DMA_FROM_DEVICE); skb_put(rx_skb, pktlen); rx_skb->dev = dev; rx_skb->protocol = eth_type_trans(rx_skb, dev); rx_skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; netif_rx(rx_skb); priv->rx_skb[rx] = new_skb; skb_reserve(new_skb, NET_IP_ALIGN); dma_addr = dma_map_single(&priv->netdev->dev, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE); priv->rx_dma[rx] = dma_addr; priv->rx[rx].rxd1 = (unsigned int) dma_addr; } else { dev->stats.rx_dropped++; } priv->rx[rx].rxd2 &= ~RX_DMA_DONE; wmb(); ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0); } if (max_rx == 0) tasklet_schedule(&priv->rx_tasklet); else ramips_fe_int_enable(RAMIPS_RX_DLY_INT); }
static void ramips_eth_rx_hw(unsigned long ptr) { struct net_device *dev = (struct net_device*)ptr; struct raeth_priv *priv = netdev_priv(dev); int rx; int max_rx = 16; while(max_rx) { struct sk_buff *rx_skb, *new_skb; rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC; if(!(priv->rx[rx].rxd2 & RX_DMA_DONE)) break; max_rx--; rx_skb = priv->rx_skb[rx]; rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2); rx_skb->dev = dev; rx_skb->protocol = eth_type_trans(rx_skb, dev); rx_skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += rx_skb->len; netif_rx(rx_skb); new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2); priv->rx_skb[rx] = new_skb; BUG_ON(!new_skb); skb_reserve(new_skb, 2); priv->rx[rx].rxd1 = dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH + 2, DMA_FROM_DEVICE); priv->rx[rx].rxd2 &= ~RX_DMA_DONE; wmb(); ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0); } if(max_rx == 0) tasklet_schedule(&priv->rx_tasklet); else ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE); }
static void mt7620_rx_dma(struct fe_priv *priv, int idx, int len) { priv->rx_dma[idx].rxd2 = RX_DMA_PLEN0(len); }
static void ramips_eth_rx_hw(unsigned long ptr) { struct net_device *dev = (struct net_device *) ptr; struct raeth_priv *re = netdev_priv(dev); int rx; int max_rx = 16; rx = ramips_fe_trr(RAETH_REG_RX_CALC_IDX0); while (max_rx) { struct raeth_rx_info *rxi; struct ramips_rx_dma *rxd; struct sk_buff *rx_skb, *new_skb; int pktlen; rx = (rx + 1) % NUM_RX_DESC; rxi = &re->rx_info[rx]; rxd = rxi->rx_desc; if (!(rxd->rxd2 & RX_DMA_DONE)) break; rx_skb = rxi->rx_skb; pktlen = RX_DMA_PLEN0(rxd->rxd2); new_skb = ramips_alloc_skb(re); /* Reuse the buffer on allocation failures */ if (new_skb) { dma_addr_t dma_addr; dma_unmap_single(&re->netdev->dev, rxi->rx_dma, MAX_RX_LENGTH, DMA_FROM_DEVICE); skb_put(rx_skb, pktlen); rx_skb->dev = dev; rx_skb->protocol = eth_type_trans(rx_skb, dev); rx_skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; netif_rx(rx_skb); rxi->rx_skb = new_skb; dma_addr = dma_map_single(&re->netdev->dev, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE); rxi->rx_dma = dma_addr; rxd->rxd1 = (unsigned int) dma_addr; wmb(); } else { dev->stats.rx_dropped++; } rxd->rxd2 = RX_DMA_LSO; ramips_fe_twr(rx, RAETH_REG_RX_CALC_IDX0); max_rx--; } if (max_rx == 0) tasklet_schedule(&re->rx_tasklet); else ramips_fe_int_enable(RX_DLY_INT); }