Example #1
0
static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
{
	struct cw1200_queue_stats *stats = queue->stats;
	if (queue->tx_locked_cnt++ == 0) {
		pr_debug("[TX] Queue %d is locked.\n",
			 queue->queue_id);
		ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
	}
}
Example #2
0
/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
{
	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
	unsigned int i;

	spin_lock_bh(&sc->wiphy_lock);

	/* Stop the primary wiphy */
	ieee80211_stop_queue(hw, skb_queue);

	/* Now stop the secondary wiphy queues */
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		struct ath_wiphy *aphy = sc->sec_wiphy[i];
		if (!aphy)
			continue;
		hw = aphy->hw;
		ieee80211_stop_queue(hw, skb_queue);
	}
	spin_unlock_bh(&sc->wiphy_lock);
}
Example #3
0
static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
				 struct sk_buff *skb, u8 ep)
{
	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
	unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
	struct mt7601u_dma_buf_tx *e;
	struct mt7601u_tx_queue *q = &dev->tx_q[ep];
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&dev->tx_lock, flags);

	if (WARN_ON(q->entries <= q->used)) {
		ret = -ENOSPC;
		goto out;
	}

	e = &q->e[q->end];
	e->skb = skb;
	usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
			  mt7601u_complete_tx, q);
	ret = usb_submit_urb(e->urb, GFP_ATOMIC);
	if (ret) {
		/* Special-handle ENODEV from TX urb submission because it will
		 * often be the first ENODEV we see after device is removed.
		 */
		if (ret == -ENODEV)
			set_bit(MT7601U_STATE_REMOVED, &dev->state);
		else
			dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
				ret);
		goto out;
	}

	q->end = (q->end + 1) % q->entries;
	q->used++;

	if (q->used >= q->entries)
		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
out:
	spin_unlock_irqrestore(&dev->tx_lock, flags);

	return ret;
}
Example #4
0
static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
{
	struct queue_entry_priv_usb *entry_priv;
	unsigned short threshold = queue->threshold;

	WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid);

	/*
	 * Temporarily disable the TX queue, this will force mac80211
	 * to use the other queues until this queue has been restored.
	 *
	 * Set the queue threshold to the queue limit. This prevents the
	 * queue from being enabled during the txdone handler.
	 */
	queue->threshold = queue->limit;
	ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);

	/*
	 * Reset all currently uploaded TX frames.
	 */
	while (!rt2x00queue_empty(queue)) {
		entry_priv = rt2x00queue_get_entry(queue, Q_INDEX_DONE)->priv_data;
		usb_kill_urb(entry_priv->urb);

		/*
		 * We need a short delay here to wait for
		 * the URB to be canceled and invoked the tx_done handler.
		 */
		udelay(200);
	}

	/*
	 * The queue has been reset, and mac80211 is allowed to use the
	 * queue again.
	 */
	queue->threshold = threshold;
	ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
}
Example #5
0
int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
			       struct sta_info *sta, u16 tid)
{
	int i;

	/* XXX: currently broken due to cb/requeue use */
	return -EPERM;

	/* prepare the filter and save it for the SW queue
	 * matching the received HW queue */

	if (!local->hw.ampdu_queues)
		return -EPERM;

	/* try to get a Qdisc from the pool */
	for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
		if (!test_and_set_bit(i, local->queue_pool)) {
			ieee80211_stop_queue(local_to_hw(local), i);
			sta->tid_to_tx_q[tid] = i;

			/* IF there are already pending packets
			 * on this tid first we need to drain them
			 * on the previous queue
			 * since HT is strict in order */
#ifdef CONFIG_MAC80211_HT_DEBUG
			if (net_ratelimit()) {
				DECLARE_MAC_BUF(mac);
				printk(KERN_DEBUG "allocated aggregation queue"
					" %d tid %d addr %s pool=0x%lX\n",
					i, tid, print_mac(mac, sta->sta.addr),
					local->queue_pool[0]);
			}
#endif /* CONFIG_MAC80211_HT_DEBUG */
			return 0;
		}

	return -EAGAIN;
}
Example #6
0
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
{
	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
	unsigned short threshold = queue->threshold;

	WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
		" invoke forced forced reset", queue->qid);

	/*
	 * Temporarily disable the TX queue, this will force mac80211
	 * to use the other queues until this queue has been restored.
	 *
	 * Set the queue threshold to the queue limit. This prevents the
	 * queue from being enabled during the txdone handler.
	 */
	queue->threshold = queue->limit;
	ieee80211_stop_queue(rt2x00dev->hw, queue->qid);

	/*
	 * Kill all entries in the queue, afterwards we need to
	 * wait a bit for all URBs to be cancelled.
	 */
	rt2x00usb_kill_tx_queue(queue);

	/*
	 * In case that a driver has overriden the txdone_work
	 * function, we invoke the TX done through there.
	 */
	rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);

	/*
	 * The queue has been reset, and mac80211 is allowed to use the
	 * queue again.
	 */
	queue->threshold = threshold;
	ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
}
Example #7
0
static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl8192_tx_ring *ring;
	struct rtl_tx_desc *pdesc;
	u8 idx;
	unsigned int queue_index, hw_queue;
	unsigned long flags;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
	__le16 fc = hdr->frame_control;
	u8 *pda_addr = hdr->addr1;
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	/*ssn */
	u8 *qc = NULL;
	u8 tid = 0;
	u16 seq_number = 0;
	u8 own;
	u8 temp_one = 1;

	if (ieee80211_is_mgmt(fc))
		rtl_tx_mgmt_proc(hw, skb);
	rtl_action_proc(hw, skb, true);

	queue_index = skb_get_queue_mapping(skb);
	hw_queue = _rtl_mac_to_hwqueue(fc, queue_index);

	if (is_multicast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesmulticast += skb->len;
	else if (is_broadcast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesbroadcast += skb->len;
	else
		rtlpriv->stats.txbytesunicast += skb->len;

	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);

	ring = &rtlpci->tx_ring[hw_queue];
	if (hw_queue != BEACON_QUEUE)
		idx = (ring->idx + skb_queue_len(&ring->queue)) %
				ring->entries;
	else
		idx = 0;

	pdesc = &ring->desc[idx];
	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
			true, HW_DESC_OWN);

	if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
			 ("No more TX desc@%d, ring->idx = %d,"
			  "idx = %d, skb_queue_len = 0x%d\n",
			  hw_queue, ring->idx, idx,
			  skb_queue_len(&ring->queue)));

		spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
		return skb->len;
	}

	/*
	 *if(ieee80211_is_nullfunc(fc)) {
	 *      spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
	 *      return 1;
	 *}
	 */

	if (ieee80211_is_data_qos(fc)) {
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;

		seq_number = mac->tids[tid].seq_number;
		seq_number &= IEEE80211_SCTL_SEQ;
		/*
		 *hdr->seq_ctrl = hdr->seq_ctrl &
		 *cpu_to_le16(IEEE80211_SCTL_FRAG);
		 *hdr->seq_ctrl |= cpu_to_le16(seq_number);
		 */

		seq_number += 1;
	}

	if (ieee80211_is_data(fc))
		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);

	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
					info, skb, hw_queue);

	__skb_queue_tail(&ring->queue, skb);

	rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true,
				    HW_DESC_OWN, (u8 *)&temp_one);

	if (!ieee80211_has_morefrags(hdr->frame_control)) {
		if (qc)
			mac->tids[tid].seq_number = seq_number;
	}

	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
	    hw_queue != BEACON_QUEUE) {

		RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
			 ("less desc left, stop skb_queue@%d, "
			  "ring->idx = %d,"
			  "idx = %d, skb_queue_len = 0x%d\n",
			  hw_queue, ring->idx, idx,
			  skb_queue_len(&ring->queue)));

		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
	}

	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);

	rtlpriv->cfg->ops->tx_polling(hw, hw_queue);

	return 0;
}
Example #8
0
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_dmaring *ring;
	struct ieee80211_hdr *hdr;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		/* The multicast ring will be sent after the DTIM */
		ring = dev->dma.tx_ring_mcast;
		/* Set the more-data bit. Ucode will clear it on
		 * the last frame for us. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
		ring = select_ring_by_priority(
			dev, skb_get_queue_mapping(skb));
	}

	B43_WARN_ON(!ring->tx);

	if (unlikely(ring->stopped)) {
		/* We get here only because of a bug in mac80211.
		 * Because of a race, one packet may be queued after
		 * the queue is stopped, thus we got called when we shouldn't.
		 * For now, just refuse the transmit. */
		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
			b43err(dev->wl, "Packet after queue stopped\n");
		err = -ENOSPC;
		goto out;
	}

	if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
		/* If we get here, we have a real error with the queue
		 * full, but queues not stopped. */
		b43err(dev->wl, "DMA queue overflow\n");
		err = -ENOSPC;
		goto out;
	}

	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The queue to ring mapping is
	 * static, so we don't need to store it per frame. */
	ring->queue_prio = skb_get_queue_mapping(skb);

	err = dma_tx_fragment(ring, skb);
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		ieee80211_free_txskb(dev->wl->hw, skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "DMA tx mapping failure\n");
		goto out;
	}
	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
	    should_inject_overflow(ring)) {
		/* This TX ring is full. */
		unsigned int skb_mapping = skb_get_queue_mapping(skb);
		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
		dev->wl->tx_queue_stopped[skb_mapping] = 1;
		ring->stopped = true;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
		}
	}
out:

	return err;
}
Example #9
0
int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	enum data_queue_qid qid = skb_get_queue_mapping(skb);
	struct data_queue *queue;

	/*
	 * Mac80211 might be calling this function while we are trying
	 * to remove the device or perhaps suspending it.
	 * Note that we can only stop the TX queues inside the TX path
	 * due to possible race conditions in mac80211.
	 */
	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
		goto exit_fail;

	/*
	 * Determine which queue to put packet on.
	 */
	if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
	    test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
		queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
	else
		queue = rt2x00queue_get_queue(rt2x00dev, qid);
	if (unlikely(!queue)) {
		ERROR(rt2x00dev,
		      "Attempt to send packet over invalid queue %d.\n"
		      "Please file bug report to %s.\n", qid, DRV_PROJECT);
		goto exit_fail;
	}

	/*
	 * If CTS/RTS is required. create and queue that frame first.
	 * Make sure we have at least enough entries available to send
	 * this CTS/RTS frame as well as the data frame.
	 * Note that when the driver has set the set_rts_threshold()
	 * callback function it doesn't need software generation of
	 * either RTS or CTS-to-self frame and handles everything
	 * inside the hardware.
	 */
	if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
						IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
	    !rt2x00dev->ops->hw->set_rts_threshold) {
		if (rt2x00queue_available(queue) <= 1)
			goto exit_fail;

		if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
			goto exit_fail;
	}

	if (rt2x00queue_write_tx_frame(queue, skb, false))
		goto exit_fail;

	if (rt2x00queue_threshold(queue))
		ieee80211_stop_queue(rt2x00dev->hw, qid);

	return NETDEV_TX_OK;

 exit_fail:
	ieee80211_stop_queue(rt2x00dev->hw, qid);
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}
Example #10
0
/**
 * rsi_core_xmit() - This function transmits the packets received from mac80211
 * @common: Pointer to the driver private structure.
 * @skb: Pointer to the socket buffer structure.
 *
 * Return: None.
 */
void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
{
	struct rsi_hw *adapter = common->priv;
	struct ieee80211_tx_info *info;
	struct skb_info *tx_params;
	struct ieee80211_hdr *wh = NULL;
	struct ieee80211_vif *vif;
	u8 q_num, tid = 0;
	struct rsi_sta *rsta = NULL;

	if ((!skb) || (!skb->len)) {
		rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
			__func__);
		goto xmit_fail;
	}
	if (common->fsm_state != FSM_MAC_INIT_DONE) {
		rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
		goto xmit_fail;
	}
	if (common->wow_flags & RSI_WOW_ENABLED) {
		rsi_dbg(ERR_ZONE,
			"%s: Blocking Tx_packets when WOWLAN is enabled\n",
			__func__);
		goto xmit_fail;
	}

	info = IEEE80211_SKB_CB(skb);
	tx_params = (struct skb_info *)info->driver_data;
	wh = (struct ieee80211_hdr *)&skb->data[0];
	tx_params->sta_id = 0;

	vif = rsi_get_vif(adapter, wh->addr2);
	if (!vif)
		goto xmit_fail;
	tx_params->vif = vif;
	tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
	if ((ieee80211_is_mgmt(wh->frame_control)) ||
	    (ieee80211_is_ctl(wh->frame_control)) ||
	    (ieee80211_is_qos_nullfunc(wh->frame_control))) {
		if (ieee80211_is_assoc_req(wh->frame_control) ||
		    ieee80211_is_reassoc_req(wh->frame_control)) {
			struct ieee80211_bss_conf *bss = &vif->bss_conf;

			common->eapol4_confirm = false;
			rsi_hal_send_sta_notify_frame(common,
						      RSI_IFTYPE_STATION,
						      STA_CONNECTED, bss->bssid,
						      bss->qos, bss->aid, 0,
						      vif);
		}

		q_num = MGMT_SOFT_Q;
		skb->priority = q_num;

		if (rsi_prepare_mgmt_desc(common, skb)) {
			rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
			goto xmit_fail;
		}
	} else {
		if (ieee80211_is_data_qos(wh->frame_control)) {
			u8 *qos = ieee80211_get_qos_ctl(wh);

			tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
			skb->priority = TID_TO_WME_AC(tid);
		} else {
			tid = IEEE80211_NONQOS_TID;
			skb->priority = BE_Q;
		}

		q_num = skb->priority;
		tx_params->tid = tid;

		if (((vif->type == NL80211_IFTYPE_AP) ||
		     (vif->type == NL80211_IFTYPE_P2P_GO)) &&
		    (!is_broadcast_ether_addr(wh->addr1)) &&
		    (!is_multicast_ether_addr(wh->addr1))) {
			rsta = rsi_find_sta(common, wh->addr1);
			if (!rsta)
				goto xmit_fail;
			tx_params->sta_id = rsta->sta_id;
		} else {
			tx_params->sta_id = 0;
		}

		if (rsta) {
			/* Start aggregation if not done for this tid */
			if (!rsta->start_tx_aggr[tid]) {
				rsta->start_tx_aggr[tid] = true;
				ieee80211_start_tx_ba_session(rsta->sta,
							      tid, 0);
			}
		}
		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
			q_num = MGMT_SOFT_Q;
			skb->priority = q_num;
		}
		if (rsi_prepare_data_desc(common, skb)) {
			rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
			goto xmit_fail;
		}
	}

	if ((q_num < MGMT_SOFT_Q) &&
	    ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
	     DATA_QUEUE_WATER_MARK)) {
		rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
		if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
			ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
		rsi_set_event(&common->tx_thread.event);
		goto xmit_fail;
	}

	rsi_core_queue_pkt(common, skb);
	rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
	rsi_set_event(&common->tx_thread.event);

	return;

xmit_fail:
	rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
	/* Dropping pkt here */
	ieee80211_free_txskb(common->priv->hw, skb);
}
Example #11
0
static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl8180_priv *priv = dev->priv;
	struct rtl8180_tx_ring *ring;
	struct rtl8180_tx_desc *entry;
	unsigned long flags;
	unsigned int idx, prio;
	dma_addr_t mapping;
	u32 tx_flags;
	u8 rc_flags;
	u16 plcp_len = 0;
	__le16 rts_duration = 0;

	prio = skb_get_queue_mapping(skb);
	ring = &priv->tx_ring[prio];

	mapping = pci_map_single(priv->pdev, skb->data,
				 skb->len, PCI_DMA_TODEVICE);

	tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
		   RTL818X_TX_DESC_FLAG_LS |
		   (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
		   skb->len;

	if (priv->r8185)
		tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
			    RTL818X_TX_DESC_FLAG_NO_ENC;

	rc_flags = info->control.rates[0].flags;
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
		tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
	} else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
		tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
	}

	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
		rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
						      info);

	if (!priv->r8185) {
		unsigned int remainder;

		plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
				(ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10);
		remainder = (16 * (skb->len + 4)) %
			    ((ieee80211_get_tx_rate(dev, info)->bitrate * 2) / 10);
		if (remainder > 0 && remainder <= 6)
			plcp_len |= 1 << 15;
	}

	spin_lock_irqsave(&priv->lock, flags);
	idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
	entry = &ring->desc[idx];

	entry->rts_duration = rts_duration;
	entry->plcp_len = cpu_to_le16(plcp_len);
	entry->tx_buf = cpu_to_le32(mapping);
	entry->frame_len = cpu_to_le32(skb->len);
	entry->flags2 = info->control.rates[1].idx >= 0 ?
		ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
	entry->retry_limit = info->control.rates[0].count;
	entry->flags = cpu_to_le32(tx_flags);
	__skb_queue_tail(&ring->queue, skb);
	if (ring->entries - skb_queue_len(&ring->queue) < 2)
		ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
	spin_unlock_irqrestore(&priv->lock, flags);

	rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));

	return 0;
}
Example #12
0
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_pio_txqueue *q;
	struct ieee80211_hdr *hdr;
	unsigned int hdrlen, total_len;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;

	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		/* The multicast queue will be sent after the DTIM. */
		q = dev->pio.tx_queue_mcast;
		/* Set the frame More-Data bit. Ucode will clear it
		 * for us on the last frame. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
		q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
	}

	hdrlen = b43_txhdr_size(dev);
	total_len = roundup(skb->len + hdrlen, 4);

	if (unlikely(total_len > q->buffer_size)) {
		err = -ENOBUFS;
		b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
		goto out;
	}
	if (unlikely(q->free_packet_slots == 0)) {
		err = -ENOBUFS;
		b43warn(dev->wl, "PIO: TX packet overflow.\n");
		goto out;
	}
	B43_WARN_ON(q->buffer_used > q->buffer_size);

	if (total_len > (q->buffer_size - q->buffer_used)) {
		/* Not enough memory on the queue. */
		err = -EBUSY;
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
		q->stopped = 1;
		goto out;
	}

	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The mac80211-queue to b43-queue
	 * mapping is static, so we don't need to store it per frame. */
	q->queue_prio = skb_get_queue_mapping(skb);

	err = pio_tx_frame(q, skb);
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		dev_kfree_skb_any(skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "PIO transmission failure\n");
		goto out;
	}
	q->nr_tx_packets++;

	B43_WARN_ON(q->buffer_used > q->buffer_size);
	if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
	    (q->free_packet_slots == 0)) {
		/* The queue is full. */
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
		q->stopped = 1;
	}

out:
	return err;
}
Example #13
0
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_dmaring *ring;
	struct ieee80211_hdr *hdr;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		
		ring = dev->dma.tx_ring_mcast;
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		
		ring = select_ring_by_priority(
			dev, skb_get_queue_mapping(skb));
	}

	B43_WARN_ON(!ring->tx);

	if (unlikely(ring->stopped)) {
		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
			b43err(dev->wl, "Packet after queue stopped\n");
		err = -ENOSPC;
		goto out;
	}

	if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
		b43err(dev->wl, "DMA queue overflow\n");
		err = -ENOSPC;
		goto out;
	}

	ring->queue_prio = skb_get_queue_mapping(skb);

	err = dma_tx_fragment(ring, skb);
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
 		 * anymore and must not transmit it unencrypted. */
		ieee80211_free_txskb(dev->wl->hw, skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "DMA tx mapping failure\n");
		goto out;
	}
	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
	    should_inject_overflow(ring)) {
		
		unsigned int skb_mapping = skb_get_queue_mapping(skb);
		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
		dev->wl->tx_queue_stopped[skb_mapping] = 1;
		ring->stopped = true;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
		}
	}
out:

	return err;
}
Example #14
0
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_pio_txqueue *q;
	struct ieee80211_hdr *hdr;
	unsigned int hdrlen, total_len;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;

	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		
		q = dev->pio.tx_queue_mcast;
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		
		q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
	}

	hdrlen = b43_txhdr_size(dev);
	total_len = roundup(skb->len + hdrlen, 4);

	if (unlikely(total_len > q->buffer_size)) {
		err = -ENOBUFS;
		b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
		goto out;
	}
	if (unlikely(q->free_packet_slots == 0)) {
		err = -ENOBUFS;
		b43warn(dev->wl, "PIO: TX packet overflow.\n");
		goto out;
	}
	B43_WARN_ON(q->buffer_used > q->buffer_size);

	if (total_len > (q->buffer_size - q->buffer_used)) {
		
		err = -EBUSY;
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
		q->stopped = true;
		goto out;
	}

	q->queue_prio = skb_get_queue_mapping(skb);

	err = pio_tx_frame(q, skb);
	if (unlikely(err == -ENOKEY)) {
		dev_kfree_skb_any(skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "PIO transmission failure\n");
		goto out;
	}

	B43_WARN_ON(q->buffer_used > q->buffer_size);
	if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
	    (q->free_packet_slots == 0)) {
		
		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
		q->stopped = true;
	}

out:
	return err;
}
Example #15
0
/**
 * rsi_core_xmit() - This function transmits the packets received from mac80211
 * @common: Pointer to the driver private structure.
 * @skb: Pointer to the socket buffer structure.
 *
 * Return: None.
 */
void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
{
	struct rsi_hw *adapter = common->priv;
	struct ieee80211_tx_info *info;
	struct skb_info *tx_params;
	struct ieee80211_hdr *tmp_hdr = NULL;
	u8 q_num, tid = 0;

	if ((!skb) || (!skb->len)) {
		rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
			__func__);
		goto xmit_fail;
	}
	info = IEEE80211_SKB_CB(skb);
	tx_params = (struct skb_info *)info->driver_data;
	tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];

	if (common->fsm_state != FSM_MAC_INIT_DONE) {
		rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
		goto xmit_fail;
	}

	if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
	    (ieee80211_is_ctl(tmp_hdr->frame_control)) ||
	    (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) {
		q_num = MGMT_SOFT_Q;
		skb->priority = q_num;
	} else {
		if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
			tid = (skb->data[24] & IEEE80211_QOS_TID);
			skb->priority = TID_TO_WME_AC(tid);
		} else {
			tid = IEEE80211_NONQOS_TID;
			skb->priority = BE_Q;
		}
		q_num = skb->priority;
		tx_params->tid = tid;
		tx_params->sta_id = 0;
	}

	if ((q_num != MGMT_SOFT_Q) &&
	    ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
	     DATA_QUEUE_WATER_MARK)) {
		rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
		if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
			ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
		rsi_set_event(&common->tx_thread.event);
		goto xmit_fail;
	}

	rsi_core_queue_pkt(common, skb);
	rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
	rsi_set_event(&common->tx_thread.event);

	return;

xmit_fail:
	rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
	/* Dropping pkt here */
	ieee80211_free_txskb(common->priv->hw, skb);
}