static void mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) { struct mt76_queue *q = &dev->q_tx[qid]; struct mt76_queue_entry entry; bool wake = false; int last; if (!q->ndesc) return; spin_lock_bh(&q->lock); if (flush) last = -1; else last = ioread32(&q->regs->dma_idx); while (q->queued && q->tail != last) { mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); if (entry.schedule) q->swq_queued--; if (entry.skb) dev->drv->tx_complete_skb(dev, q, &entry, flush); if (entry.txwi) { mt76_put_txwi(dev, entry.txwi); wake = true; } q->tail = (q->tail + 1) % q->ndesc; q->queued--; if (!flush && q->tail == last) last = ioread32(&q->regs->dma_idx); } if (!flush) mt76_txq_schedule(dev, q); else mt76_dma_sync_idx(dev, q); wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; spin_unlock_bh(&q->lock); if (wake) ieee80211_wake_queue(dev->hw, qid); }
static void mt76u_tx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; struct mt76u_buf *buf; struct mt76_queue *q; bool wake; int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = &dev->q_tx[i]; spin_lock_bh(&q->lock); while (true) { buf = &q->entry[q->head].ubuf; if (!buf->done || !q->queued) break; dev->drv->tx_complete_skb(dev, q, &q->entry[q->head], false); if (q->entry[q->head].schedule) { q->entry[q->head].schedule = false; q->swq_queued--; } q->head = (q->head + 1) % q->ndesc; q->queued--; } mt76_txq_schedule(dev, q); wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; if (!q->queued) wake_up(&dev->tx_wait); spin_unlock_bh(&q->lock); if (!test_and_set_bit(MT76_READING_STATS, &dev->state)) ieee80211_queue_delayed_work(dev->hw, &dev->usb.stat_work, msecs_to_jiffies(10)); if (wake) ieee80211_wake_queue(dev->hw, i); } }