static int mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) { struct sk_buff *skb; unsigned char *data; int len; int done = 0; bool more; while (done < budget) { u32 info; data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); if (!data) break; if (q->rx_head) { mt76_add_fragment(dev, q, data, len, more); continue; } skb = build_skb(data, q->buf_size); if (!skb) { skb_free_frag(data); continue; } skb_reserve(skb, q->buf_offset); if (skb->tail + len > skb->end) { dev_kfree_skb(skb); continue; } if (q == &dev->q_rx[MT_RXQ_MCU]) { u32 * rxfce = (u32 *) skb->cb; *rxfce = info; } __skb_put(skb, len); done++; if (more) { q->rx_head = skb; continue; } dev->drv->rx_skb(dev, q - dev->q_rx, skb); } mt76_dma_rx_fill(dev, q, true); return done; }
static int mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; u8 *data = sg_virt(&urb->sg[0]); int data_len, len, nsgs = 1; struct sk_buff *skb; if (!test_bit(MT76_STATE_INITIALIZED, &dev->state)) return 0; len = mt76u_get_rx_entry_len(data, urb->actual_length); if (len < 0) return 0; skb = build_skb(data, q->buf_size); if (!skb) return 0; data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN); skb_reserve(skb, MT_DMA_HDR_LEN); if (skb->tail + data_len > skb->end) { dev_kfree_skb(skb); return 1; } __skb_put(skb, data_len); len -= data_len; while (len > 0) { data_len = min_t(int, len, urb->sg[nsgs].length); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, sg_page(&urb->sg[nsgs]), urb->sg[nsgs].offset, data_len, q->buf_size); len -= data_len; nsgs++; } dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb); return nsgs; }
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int weight) { u32 end_slot; int handled = 0; end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot -= ring->index_base; end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot /= sizeof(struct bgmac_dma_desc); while (ring->start != end_slot) { struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; struct sk_buff *skb; void *buf = slot->buf; dma_addr_t dma_addr = slot->dma_addr; u16 len, flags; do { /* Prepare new skb as replacement */ if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) { bgmac_dma_rx_poison_buf(dma_dev, slot); break; } /* Unmap buffer to make it accessible to the CPU */ dma_unmap_single(dma_dev, dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); /* Get info from the header */ len = le16_to_cpu(rx->len); flags = le16_to_cpu(rx->flags); /* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); break; } if (len > BGMAC_RX_ALLOC_SIZE) { bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); break; } /* Omit CRC. */ len -= ETH_FCS_LEN; skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); if (unlikely(!skb)) { bgmac_err(bgmac, "build_skb failed\n"); put_page(virt_to_head_page(buf)); break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + BGMAC_RX_BUF_OFFSET + len); skb_pull(skb, BGMAC_RX_FRAME_OFFSET + BGMAC_RX_BUF_OFFSET); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); if (++ring->start >= BGMAC_RX_RING_SLOTS) ring->start = 0; if (handled >= weight) /* Should never be greater */ break; } bgmac_dma_rx_update_index(bgmac, ring); return handled; }
static int hip04_rx_poll(struct napi_struct *napi, int budget) { struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi); struct net_device *ndev = priv->ndev; struct net_device_stats *stats = &ndev->stats; unsigned int cnt = hip04_recv_cnt(priv); struct rx_desc *desc; struct sk_buff *skb; unsigned char *buf; bool last = false; dma_addr_t phys; int rx = 0; int tx_remaining; u16 len; u32 err; while (cnt && !last) { buf = priv->rx_buf[priv->rx_head]; skb = build_skb(buf, priv->rx_buf_size); if (unlikely(!skb)) { net_dbg_ratelimited("build_skb failed\n"); goto refill; } dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], RX_BUF_SIZE, DMA_FROM_DEVICE); priv->rx_phys[priv->rx_head] = 0; desc = (struct rx_desc *)skb->data; len = be16_to_cpu(desc->pkt_len); err = be32_to_cpu(desc->pkt_err); if (0 == len) { dev_kfree_skb_any(skb); last = true; } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) { dev_kfree_skb_any(skb); stats->rx_dropped++; stats->rx_errors++; } else { skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&priv->napi, skb); stats->rx_packets++; stats->rx_bytes += len; rx++; } refill: buf = netdev_alloc_frag(priv->rx_buf_size); if (!buf) goto done; phys = dma_map_single(&ndev->dev, buf, RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, phys)) goto done; priv->rx_buf[priv->rx_head] = buf; priv->rx_phys[priv->rx_head] = phys; hip04_set_recv_desc(priv, phys); priv->rx_head = RX_NEXT(priv->rx_head); if (rx >= budget) goto done; if (--cnt == 0) cnt = hip04_recv_cnt(priv); } if (!(priv->reg_inten & RCV_INT)) { /* enable rx interrupt */ priv->reg_inten |= RCV_INT; writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN); } napi_complete(napi); done: /* clean up tx descriptors and start a new timer if necessary */ tx_remaining = hip04_tx_reclaim(ndev, false); if (rx < budget && tx_remaining) hip04_start_tx_timer(priv); return rx; }