void mt76u_buf_free(struct mt76u_buf *buf) { struct urb *urb = buf->urb; int i; for (i = 0; i < urb->num_sgs; i++) skb_free_frag(sg_virt(&urb->sg[i])); usb_free_urb(buf->urb); }
static int mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) { struct sk_buff *skb; unsigned char *data; int len; int done = 0; bool more; while (done < budget) { u32 info; data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); if (!data) break; if (q->rx_head) { mt76_add_fragment(dev, q, data, len, more); continue; } skb = build_skb(data, q->buf_size); if (!skb) { skb_free_frag(data); continue; } skb_reserve(skb, q->buf_offset); if (skb->tail + len > skb->end) { dev_kfree_skb(skb); continue; } if (q == &dev->q_rx[MT_RXQ_MCU]) { u32 * rxfce = (u32 *) skb->cb; *rxfce = info; } __skb_put(skb, len); done++; if (more) { q->rx_head = skb; continue; } dev->drv->rx_skb(dev, q - dev->q_rx, skb); } mt76_dma_rx_fill(dev, q, true); return done; }
static void hip04_free_ring(struct net_device *ndev, struct device *d) { struct hip04_priv *priv = netdev_priv(ndev); int i; for (i = 0; i < RX_DESC_NUM; i++) if (priv->rx_buf[i]) skb_free_frag(priv->rx_buf[i]); for (i = 0; i < TX_DESC_NUM; i++) if (priv->tx_skb[i]) dev_kfree_skb_any(priv->tx_skb[i]); dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc), priv->tx_desc, priv->tx_desc_dma); }
static void mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) { void *buf; bool more; spin_lock_bh(&q->lock); do { buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); if (!buf) break; skb_free_frag(buf); } while (1); spin_unlock_bh(&q->lock); }
static int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) { dma_addr_t addr; void *buf; int frames = 0; int len = SKB_WITH_OVERHEAD(q->buf_size); int offset = q->buf_offset; int idx; void *(*alloc)(unsigned int fragsz); if (napi) alloc = napi_alloc_frag; else alloc = netdev_alloc_frag; spin_lock_bh(&q->lock); while (q->queued < q->ndesc - 1) { struct mt76_queue_buf qbuf; buf = alloc(q->buf_size); if (!buf) break; addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev->dev, addr)) { skb_free_frag(buf); break; } qbuf.addr = addr + offset; qbuf.len = len - offset; idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); frames++; } if (frames) mt76_dma_kick_queue(dev, q); spin_unlock_bh(&q->lock); return frames; }
static int mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, int nsgs, int len, int sglen) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; struct urb *urb = buf->urb; int i; spin_lock_bh(&q->rx_page_lock); for (i = 0; i < nsgs; i++) { struct page *page; void *data; int offset; data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC); if (!data) break; page = virt_to_head_page(data); offset = data - page_address(page); sg_set_page(&urb->sg[i], page, sglen, offset); } spin_unlock_bh(&q->rx_page_lock); if (i < nsgs) { int j; for (j = nsgs; j < urb->num_sgs; j++) skb_free_frag(sg_virt(&urb->sg[j])); urb->num_sgs = i; } urb->num_sgs = max_t(int, i, urb->num_sgs); buf->len = urb->num_sgs * sglen, sg_init_marker(urb->sg, urb->num_sgs); return i ? : -ENOMEM; }