Esempio n. 1
0
/* Only bother starting a queue on an active virtual wiphy */
bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
{
	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
	unsigned int i;
	bool txq_started = false;

	spin_lock_bh(&sc->wiphy_lock);

	/* Start the primary wiphy */
	if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
		ieee80211_wake_queue(hw, skb_queue);
		txq_started = true;
		goto unlock;
	}

	/* Now start the secondary wiphy queues */
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		struct ath_wiphy *aphy = sc->sec_wiphy[i];
		if (!aphy)
			continue;
		if (aphy->state != ATH_WIPHY_ACTIVE)
			continue;

		hw = aphy->hw;
		ieee80211_wake_queue(hw, skb_queue);
		txq_started = true;
		break;
	}

unlock:
	spin_unlock_bh(&sc->wiphy_lock);
	return txq_started;
}
Esempio n. 2
0
static void mt7601u_complete_tx(struct urb *urb)
{
	struct mt7601u_tx_queue *q = urb->context;
	struct mt7601u_dev *dev = q->dev;
	struct sk_buff *skb;
	unsigned long flags;

	spin_lock_irqsave(&dev->tx_lock, flags);

	if (mt7601u_urb_has_error(urb))
		dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
		goto out;

	skb = q->e[q->start].skb;
	trace_mt_tx_dma_done(dev, skb);

	mt7601u_tx_status(dev, skb);

	if (q->used == q->entries - q->entries / 8)
		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));

	q->start = (q->start + 1) % q->entries;
	q->used--;

	if (urb->status)
		goto out;

	set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
	if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
		queue_delayed_work(dev->stat_wq, &dev->stat_work,
				   msecs_to_jiffies(10));
out:
	spin_unlock_irqrestore(&dev->tx_lock, flags);
}
Esempio n. 3
0
File: dma.c Progetto: Lyude/linux
static void mt76x0_complete_tx(struct urb *urb)
{
	struct mt76x0_tx_queue *q = urb->context;
	struct mt76x0_dev *dev = q->dev;
	struct sk_buff *skb;
	unsigned long flags;

	spin_lock_irqsave(&dev->tx_lock, flags);

	if (mt76x0_urb_has_error(urb))
		dev_err(dev->mt76.dev, "Error: TX urb failed:%d\n", urb->status);
	if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
		goto out;

	skb = q->e[q->start].skb;
	trace_mt76x0_tx_dma_done(&dev->mt76, skb);

	__skb_queue_tail(&dev->tx_skb_done, skb);
	tasklet_schedule(&dev->tx_tasklet);

	if (q->used == q->entries - q->entries / 8)
		ieee80211_wake_queue(dev->mt76.hw, skb_get_queue_mapping(skb));

	q->start = (q->start + 1) % q->entries;
	q->used--;
out:
	spin_unlock_irqrestore(&dev->tx_lock, flags);
}
Esempio n. 4
0
static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
{
	struct rtl8180_priv *priv = dev->priv;
	struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];

	while (skb_queue_len(&ring->queue)) {
		struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
		struct sk_buff *skb;
		struct ieee80211_tx_info *info;
		u32 flags = le32_to_cpu(entry->flags);

		if (flags & RTL818X_TX_DESC_FLAG_OWN)
			return;

		ring->idx = (ring->idx + 1) % ring->entries;
		skb = __skb_dequeue(&ring->queue);
		pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
				 skb->len, PCI_DMA_TODEVICE);

		info = IEEE80211_SKB_CB(skb);
		ieee80211_tx_info_clear_status(info);

		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
		    (flags & RTL818X_TX_DESC_FLAG_TX_OK))
			info->flags |= IEEE80211_TX_STAT_ACK;

		info->status.rates[0].count = (flags & 0xFF) + 1;

		ieee80211_tx_status_irqsafe(dev, skb);
		if (ring->entries - skb_queue_len(&ring->queue) == 2)
			ieee80211_wake_queue(dev, prio);
	}
}
Esempio n. 5
0
void b43_pio_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	struct b43_pio_txqueue *q;
	struct b43_pio_txpacket *pack = NULL;
	unsigned int total_len;
	struct ieee80211_tx_info *info;

	q = parse_cookie(dev, status->cookie, &pack);
	if (unlikely(!q))
		return;
	B43_WARN_ON(!pack);

	info = IEEE80211_SKB_CB(pack->skb);

	b43_fill_txstatus_report(dev, info, status);

	total_len = pack->skb->len + b43_txhdr_size(dev);
	total_len = roundup(total_len, 4);
	q->buffer_used -= total_len;
	q->free_packet_slots += 1;

	ieee80211_tx_status(dev->wl->hw, pack->skb);
	pack->skb = NULL;
	list_add(&pack->list, &q->packets_list);

	if (q->stopped) {
		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
		q->stopped = 0;
	}
}
Esempio n. 6
0
static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
{
	struct cw1200_queue_stats *stats = queue->stats;
	BUG_ON(!queue->tx_locked_cnt);
	if (--queue->tx_locked_cnt == 0) {
		pr_debug("[TX] Queue %d is unlocked.\n",
			 queue->queue_id);
		ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
	}
}
Esempio n. 7
0
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
	struct mt76_queue *q = &dev->q_tx[qid];
	struct mt76_queue_entry entry;
	bool wake = false;
	int last;

	if (!q->ndesc)
		return;

	spin_lock_bh(&q->lock);
	if (flush)
		last = -1;
	else
		last = ioread32(&q->regs->dma_idx);

	while (q->queued && q->tail != last) {
		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
		if (entry.schedule)
			q->swq_queued--;

		if (entry.skb)
			dev->drv->tx_complete_skb(dev, q, &entry, flush);

		if (entry.txwi) {
			mt76_put_txwi(dev, entry.txwi);
			wake = true;
		}

		q->tail = (q->tail + 1) % q->ndesc;
		q->queued--;

		if (!flush && q->tail == last)
		    last = ioread32(&q->regs->dma_idx);
	}

	if (!flush)
		mt76_txq_schedule(dev, q);
	else
		mt76_dma_sync_idx(dev, q);

	wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
	spin_unlock_bh(&q->lock);

	if (wake)
		ieee80211_wake_queue(dev->hw, qid);
}
Esempio n. 8
0
void pcie_tx_skbs_ndp(unsigned long data)
{
	struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
	struct mwl_priv *priv = hw->priv;
	struct pcie_priv *pcie_priv = priv->hif.priv;
	int num = SYSADPT_TX_WMM_QUEUES;
	struct sk_buff *tx_skb;
	int rc;

	while (num--) {
		while (skb_queue_len(&pcie_priv->txq[num]) > 0) {
			if (pcie_priv->desc_data_ndp.tx_desc_busy_cnt >=
			    (MAX_TX_RING_SEND_SIZE - 1)) {
				pcie_tx_done_ndp(hw);
				break;
			}

			tx_skb = skb_dequeue(&pcie_priv->txq[num]);

			rc = pcie_tx_skb_ndp(priv, tx_skb);
			if (rc) {
				pcie_tx_done_ndp(hw);
				if (rc == -EAGAIN)
					skb_queue_head(&pcie_priv->txq[num],
						       tx_skb);
				break;
			}

			if (++pcie_priv->tx_done_cnt > TXDONE_THRESHOLD) {
				pcie_tx_done_ndp(hw);
				pcie_priv->tx_done_cnt = 0;
			}
		}

		if (skb_queue_len(&pcie_priv->txq[num]) <
		    pcie_priv->txq_wake_threshold) {
			int queue;

			queue = SYSADPT_TX_WMM_QUEUES - num - 1;
			if (ieee80211_queue_stopped(hw, queue))
				ieee80211_wake_queue(hw, queue);
		}
	}

	pcie_priv->is_tx_schedule = false;
}
Esempio n. 9
0
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
	struct adm8211_priv *priv = dev->priv;
	unsigned int dirty_tx;

	spin_lock(&priv->lock);

	for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) {
		unsigned int entry = dirty_tx % priv->tx_ring_size;
		u32 status = le32_to_cpu(priv->tx_ring[entry].status);
		struct ieee80211_tx_info *txi;
		struct adm8211_tx_ring_info *info;
		struct sk_buff *skb;

		if (status & TDES0_CONTROL_OWN ||
		    !(status & TDES0_CONTROL_DONE))
			break;

		info = &priv->tx_buffers[entry];
		skb = info->skb;
		txi = IEEE80211_SKB_CB(skb);

		/* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */

		pci_unmap_single(priv->pdev, info->mapping,
				 info->skb->len, PCI_DMA_TODEVICE);

		ieee80211_tx_info_clear_status(txi);

		skb_pull(skb, sizeof(struct adm8211_tx_hdr));
		memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
		if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) &&
		    !(status & TDES0_STATUS_ES))
			txi->flags |= IEEE80211_TX_STAT_ACK;

		ieee80211_tx_status_irqsafe(dev, skb);

		info->skb = NULL;
	}

	if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2)
		ieee80211_wake_queue(dev, 0);

	priv->dirty_tx = dirty_tx;
	spin_unlock(&priv->lock);
}
Esempio n. 10
0
static void mt76u_tx_tasklet(unsigned long data)
{
	struct mt76_dev *dev = (struct mt76_dev *)data;
	struct mt76u_buf *buf;
	struct mt76_queue *q;
	bool wake;
	int i;

	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
		q = &dev->q_tx[i];

		spin_lock_bh(&q->lock);
		while (true) {
			buf = &q->entry[q->head].ubuf;
			if (!buf->done || !q->queued)
				break;

			dev->drv->tx_complete_skb(dev, q,
						  &q->entry[q->head],
						  false);

			if (q->entry[q->head].schedule) {
				q->entry[q->head].schedule = false;
				q->swq_queued--;
			}

			q->head = (q->head + 1) % q->ndesc;
			q->queued--;
		}
		mt76_txq_schedule(dev, q);
		wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
		if (!q->queued)
			wake_up(&dev->tx_wait);

		spin_unlock_bh(&q->lock);

		if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
			ieee80211_queue_delayed_work(dev->hw,
						     &dev->usb.stat_work,
						     msecs_to_jiffies(10));

		if (wake)
			ieee80211_wake_queue(dev->hw, i);
	}
}
Esempio n. 11
0
/*
 * Interrupt functions.
 */
static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue)
{
	struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue);
	struct data_entry *entry;
	struct data_desc *txd;
	u32 word;
	int tx_status;
	int retry;

	while (!rt2x00_ring_empty(ring)) {
		entry = rt2x00_get_data_entry_done(ring);
		txd = entry->priv;
		rt2x00_desc_read(txd, 0, &word);

		if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
		    !rt2x00_get_field32(word, TXD_W0_VALID))
			break;

		/*
		 * Obtain the status about this packet.
		 */
		tx_status = rt2x00_get_field32(word, TXD_W0_RESULT);
		retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);

		rt2x00lib_txdone(entry, tx_status, retry);

		/*
		 * Make this entry available for reuse.
		 */
		entry->flags = 0;
		rt2x00_set_field32(&word, TXD_W0_VALID, 0);
		rt2x00_desc_write(txd, 0, word);
		rt2x00_ring_index_done_inc(ring);
	}

	/*
	 * If the data ring was full before the txdone handler
	 * we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
	entry = ring->entry;
	if (!rt2x00_ring_full(ring))
		ieee80211_wake_queue(rt2x00dev->hw,
				     entry->tx_status.control.queue);
}
Esempio n. 12
0
/*
 * TX data handlers.
 */
static void rt2x00usb_interrupt_txdone(struct urb *urb)
{
	struct queue_entry *entry = (struct queue_entry *)urb->context;
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
	struct txdone_entry_desc txdesc;
	__le32 *txd = (__le32 *)entry->skb->data;
	u32 word;

	if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
	    !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
		return;

	rt2x00_desc_read(txd, 0, &word);

	/*
	 * Remove the descriptor data from the buffer.
	 */
	skb_pull(entry->skb, entry->queue->desc_size);

	/*
	 * Obtain the status about this packet.
	 */
	txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
	txdesc.retry = 0;
	txdesc.control = &priv_tx->control;

	rt2x00lib_txdone(entry, &txdesc);

	/*
	 * Make this entry available for reuse.
	 */
	entry->flags = 0;
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);

	/*
	 * If the data queue was full before the txdone handler
	 * we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
	if (!rt2x00queue_full(entry->queue))
		ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
}
Esempio n. 13
0
static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
{
	struct queue_entry_priv_usb *entry_priv;
	unsigned short threshold = queue->threshold;

	WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid);

	/*
	 * Temporarily disable the TX queue, this will force mac80211
	 * to use the other queues until this queue has been restored.
	 *
	 * Set the queue threshold to the queue limit. This prevents the
	 * queue from being enabled during the txdone handler.
	 */
	queue->threshold = queue->limit;
	ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);

	/*
	 * Reset all currently uploaded TX frames.
	 */
	while (!rt2x00queue_empty(queue)) {
		entry_priv = rt2x00queue_get_entry(queue, Q_INDEX_DONE)->priv_data;
		usb_kill_urb(entry_priv->urb);

		/*
		 * We need a short delay here to wait for
		 * the URB to be canceled and invoked the tx_done handler.
		 */
		udelay(200);
	}

	/*
	 * The queue has been reset, and mac80211 is allowed to use the
	 * queue again.
	 */
	queue->threshold = threshold;
	ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
}
Esempio n. 14
0
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
{
	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
	unsigned short threshold = queue->threshold;

	WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
		" invoke forced forced reset", queue->qid);

	/*
	 * Temporarily disable the TX queue, this will force mac80211
	 * to use the other queues until this queue has been restored.
	 *
	 * Set the queue threshold to the queue limit. This prevents the
	 * queue from being enabled during the txdone handler.
	 */
	queue->threshold = queue->limit;
	ieee80211_stop_queue(rt2x00dev->hw, queue->qid);

	/*
	 * Kill all entries in the queue, afterwards we need to
	 * wait a bit for all URBs to be cancelled.
	 */
	rt2x00usb_kill_tx_queue(queue);

	/*
	 * In case that a driver has overriden the txdone_work
	 * function, we invoke the TX done through there.
	 */
	rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);

	/*
	 * The queue has been reset, and mac80211 is allowed to use the
	 * queue again.
	 */
	queue->threshold = threshold;
	ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
}
Esempio n. 15
0
static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));

	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];

	while (skb_queue_len(&ring->queue)) {
		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
		struct sk_buff *skb;
		struct ieee80211_tx_info *info;

		u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
							  HW_DESC_OWN);

		/*
		 *beacon packet will only use the first
		 *descriptor defautly,and the own may not
		 *be cleared by the hardware
		 */
		if (own)
			return;
		ring->idx = (ring->idx + 1) % ring->entries;

		skb = __skb_dequeue(&ring->queue);
		pci_unmap_single(rtlpci->pdev,
				 rtlpriv->cfg->ops->
					     get_desc((u8 *) entry, true,
						      HW_DESC_TXBUFF_ADDR),
				 skb->len, PCI_DMA_TODEVICE);

		RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
			 ("new ring->idx:%d, "
			  "free: skb_queue_len:%d, free: seq:%x\n",
			  ring->idx,
			  skb_queue_len(&ring->queue),
			  *(u16 *) (skb->data + 22)));

		info = IEEE80211_SKB_CB(skb);
		ieee80211_tx_info_clear_status(info);

		info->flags |= IEEE80211_TX_STAT_ACK;
		/*info->status.rates[0].count = 1; */

		ieee80211_tx_status_irqsafe(hw, skb);

		if ((ring->entries - skb_queue_len(&ring->queue))
				== 2) {

			RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
					("more desc left, wake"
					 "skb_queue@%d,ring->idx = %d,"
					 "skb_queue_len = 0x%d\n",
					 prio, ring->idx,
					 skb_queue_len(&ring->queue)));

			ieee80211_wake_queue(hw,
					skb_get_queue_mapping
					(skb));
		}

		skb = NULL;
	}

	if (((rtlpriv->link_info.num_rx_inperiod +
		rtlpriv->link_info.num_tx_inperiod) > 8) ||
		(rtlpriv->link_info.num_rx_inperiod > 2)) {
		rtl_lps_leave(hw);
	}
}
void rt2x00lib_txdone(struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);

	/*
	 * Unmap the skb.
	 */
	rt2x00queue_unmap_skb(rt2x00dev, entry->skb);

	/*
	 * Send frame to debugfs immediately, after this call is completed
	 * we are going to overwrite the skb->cb array.
	 */
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);

	/*
	 * Update TX statistics.
	 */
	rt2x00dev->link.qual.tx_success +=
	    test_bit(TXDONE_SUCCESS, &txdesc->flags);
	rt2x00dev->link.qual.tx_failed +=
	    test_bit(TXDONE_FAILURE, &txdesc->flags);

	/*
	 * Initialize TX status
	 */
	memset(&tx_info->status, 0, sizeof(tx_info->status));
	tx_info->status.ack_signal = 0;
	tx_info->status.excessive_retries =
	    test_bit(TXDONE_EXCESSIVE_RETRY, &txdesc->flags);
	tx_info->status.retry_count = txdesc->retry;

	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
		if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
			tx_info->flags |= IEEE80211_TX_STAT_ACK;
		else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
			rt2x00dev->low_level_stats.dot11ACKFailureCount++;
	}

	if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
		if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
			rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
		else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
			rt2x00dev->low_level_stats.dot11RTSFailureCount++;
	}

	/*
	 * Only send the status report to mac80211 when TX status was
	 * requested by it. If this was a extra frame coming through
	 * a mac80211 library call (RTS/CTS) then we should not send the
	 * status report back.
	 */
	if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
		ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
	else
		dev_kfree_skb_irq(entry->skb);

	/*
	 * Make this entry available for reuse.
	 */
	entry->skb = NULL;
	entry->flags = 0;

	rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry);

	__clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);

	/*
	 * If the data queue was below the threshold before the txdone
	 * handler we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
	if (!rt2x00queue_threshold(entry->queue))
		ieee80211_wake_queue(rt2x00dev->hw, qid);
}
Esempio n. 17
0
void rt2x00lib_txdone(struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
	enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
	unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
	u8 rate_idx, rate_flags, retry_rates;
	u8 skbdesc_flags = skbdesc->flags;
	unsigned int i;
	bool success;

	/*
	 * Unmap the skb.
	 */
	rt2x00queue_unmap_skb(rt2x00dev, entry->skb);

	/*
	 * Remove L2 padding which was added during
	 */
	if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
		rt2x00queue_remove_l2pad(entry->skb, header_length);

	/*
	 * If the IV/EIV data was stripped from the frame before it was
	 * passed to the hardware, we should now reinsert it again because
	 * mac80211 will expect the the same data to be present it the
	 * frame as it was passed to us.
	 */
	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
		rt2x00crypto_tx_insert_iv(entry->skb, header_length);

	/*
	 * Send frame to debugfs immediately, after this call is completed
	 * we are going to overwrite the skb->cb array.
	 */
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);

	/*
	 * Determine if the frame has been successfully transmitted.
	 */
	success =
	    test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
	    test_bit(TXDONE_UNKNOWN, &txdesc->flags) ||
	    test_bit(TXDONE_FALLBACK, &txdesc->flags);

	/*
	 * Update TX statistics.
	 */
	rt2x00dev->link.qual.tx_success += success;
	rt2x00dev->link.qual.tx_failed += !success;

	rate_idx = skbdesc->tx_rate_idx;
	rate_flags = skbdesc->tx_rate_flags;
	retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ?
	    (txdesc->retry + 1) : 1;

	/*
	 * Initialize TX status
	 */
	memset(&tx_info->status, 0, sizeof(tx_info->status));
	tx_info->status.ack_signal = 0;

	/*
	 * Frame was send with retries, hardware tried
	 * different rates to send out the frame, at each
	 * retry it lowered the rate 1 step.
	 */
	for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
		tx_info->status.rates[i].idx = rate_idx - i;
		tx_info->status.rates[i].flags = rate_flags;
		tx_info->status.rates[i].count = 1;
	}
	if (i < (IEEE80211_TX_MAX_RATES - 1))
		tx_info->status.rates[i].idx = -1; /* terminate */

	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
		if (success)
			tx_info->flags |= IEEE80211_TX_STAT_ACK;
		else
			rt2x00dev->low_level_stats.dot11ACKFailureCount++;
	}

	if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		if (success)
			rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
		else
			rt2x00dev->low_level_stats.dot11RTSFailureCount++;
	}

	/*
	 * Only send the status report to mac80211 when it's a frame
	 * that originated in mac80211. If this was a extra frame coming
	 * through a mac80211 library call (RTS/CTS) then we should not
	 * send the status report back.
	 */
	if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
		ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
	else
		dev_kfree_skb_irq(entry->skb);

	/*
	 * Make this entry available for reuse.
	 */
	entry->skb = NULL;
	entry->flags = 0;

	rt2x00dev->ops->lib->clear_entry(entry);

	clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);

	/*
	 * If the data queue was below the threshold before the txdone
	 * handler we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
	if (!rt2x00queue_threshold(entry->queue))
		ieee80211_wake_queue(rt2x00dev->hw, qid);
}
Esempio n. 18
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	static const struct b43_txstatus fake; /* filled with 0 */
	const struct b43_txstatus *txstat;
	int slot, firstused;
	bool frame_succeed;
	int skip;
	static u8 err_out1, err_out2;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;

	skip = 0;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality.
		 */
		if (slot == next_slot(ring, next_slot(ring, firstused))) {
			/* If a single header/data pair was missed, skip over
			 * the first two slots in an attempt to recover.
			 */
			slot = firstused;
			skip = 2;
			if (!err_out1) {
				/* Report the error once. */
				b43dbg(dev->wl,
				       "Skip on DMA ring %d slot %d.\n",
				       ring->index, slot);
				err_out1 = 1;
			}
		} else {
			/* More than a single header/data pair were missed.
			 * Report this error once.
			 */
			if (!err_out2)
				b43dbg(dev->wl,
				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
				       ring->index, firstused, slot);
			err_out2 = 1;
			return;
		}
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}

		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr,
					 meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame,
				 * so the skb pointer must not be NULL.
				 */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission. When skipping over
			 * a missed TX status report, use a status structure
			 * filled with zeros to indicate that the frame was not
			 * sent (frame_count 0) and not acknowledged
			 */
			if (unlikely(skip))
				txstat = &fake;
			else
				txstat = status;

			frame_succeed = b43_fill_txstatus_report(dev, info,
								 txstat);
#ifdef CPTCFG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment && !skip) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
		if (skip > 0)
			--skip;
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ring->stopped = false;
	}

	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
	} else {
		/* If the driver queue is running wake the corresponding
		 * mac80211 queue. */
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
	/* Add work to the queue. */
	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
Esempio n. 19
0
/**
 * rsi_core_qos_processor() - This function is used to determine the wmm queue
 *			      based on the backoff procedure. Data packets are
 *			      dequeued from the selected hal queue and sent to
 *			      the below layers.
 * @common: Pointer to the driver private structure.
 *
 * Return: None.
 */
void rsi_core_qos_processor(struct rsi_common *common)
{
	struct rsi_hw *adapter = common->priv;
	struct sk_buff *skb;
	unsigned long tstamp_1, tstamp_2;
	u8 q_num;
	int status;

	tstamp_1 = jiffies;
	while (1) {
		q_num = rsi_core_determine_hal_queue(common);
		rsi_dbg(DATA_TX_ZONE,
			"%s: Queue number = %d\n", __func__, q_num);

		if (q_num == INVALID_QUEUE) {
			rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
			break;
		}
		if (common->hibernate_resume)
			break;

		mutex_lock(&common->tx_lock);

		status = adapter->check_hw_queue_status(adapter, q_num);
		if ((status <= 0)) {
			mutex_unlock(&common->tx_lock);
			break;
		}

		if ((q_num < MGMT_SOFT_Q) &&
		    ((skb_queue_len(&common->tx_queue[q_num])) <=
		      MIN_DATA_QUEUE_WATER_MARK)) {
			if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
				ieee80211_wake_queue(adapter->hw,
						     WME_AC(q_num));
		}

		skb = rsi_core_dequeue_pkt(common, q_num);
		if (skb == NULL) {
			rsi_dbg(ERR_ZONE, "skb null\n");
			mutex_unlock(&common->tx_lock);
			break;
		}
		if (q_num == MGMT_BEACON_Q) {
			status = rsi_send_pkt_to_bus(common, skb);
			dev_kfree_skb(skb);
		} else {
#ifdef CONFIG_RSI_COEX
			if (common->coex_mode > 1) {
				status = rsi_coex_send_pkt(common, skb,
							   RSI_WLAN_Q);
			} else {
#endif
				if (q_num == MGMT_SOFT_Q)
					status = rsi_send_mgmt_pkt(common, skb);
				else
					status = rsi_send_data_pkt(common, skb);
#ifdef CONFIG_RSI_COEX
			}
#endif
		}

		if (status) {
			mutex_unlock(&common->tx_lock);
			break;
		}

		common->tx_stats.total_tx_pkt_send[q_num]++;

		tstamp_2 = jiffies;
		mutex_unlock(&common->tx_lock);

		if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
			schedule();
	}
}
Esempio n. 20
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	int slot, firstused;
	bool frame_succeed;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality. */
		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
		       "Expected %d, but got %d\n",
		       ring->index, firstused, slot);
		return;
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}
		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame, so
				 * the skb pointer must not be NULL. */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission.
			 */
			frame_succeed = b43_fill_txstatus_report(dev, info, status);
#ifdef CONFIG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		ring->stopped = 0;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
}