Example #1
0
/* caller must hold wl->mutex */
void wl1271_tx_flush(struct wl1271 *wl)
{
	int i;
	struct sk_buff *skb;
	struct ieee80211_tx_info *info;

	/* TX failure */
/* 	control->flags = 0; FIXME */

	while ((skb = skb_dequeue(&wl->tx_queue))) {
		info = IEEE80211_SKB_CB(skb);

		wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);

		if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
				continue;

		ieee80211_tx_status(wl->hw, skb);
	}

	for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
		if (wl->tx_frames[i] != NULL) {
			skb = wl->tx_frames[i];
			info = IEEE80211_SKB_CB(skb);

			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
				continue;

			ieee80211_tx_status(wl->hw, skb);
			wl->tx_frames[i] = NULL;
		}
}
Example #2
0
static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
{
	int i;
	struct sk_buff *skb;
	struct ieee80211_tx_info *info;
	unsigned long flags;
	int filtered[NUM_TX_QUEUES];

	/* filter all frames currently in the low level queues for this hlid */
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		filtered[i] = 0;
		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
			filtered[i]++;

			if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
				continue;

			info = IEEE80211_SKB_CB(skb);
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
			info->status.rates[0].idx = -1;
			ieee80211_tx_status(wl->hw, skb);
		}
	}

	spin_lock_irqsave(&wl->wl_lock, flags);
	for (i = 0; i < NUM_TX_QUEUES; i++)
		wl->tx_queue_count[i] -= filtered[i];
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_handle_tx_low_watermark(wl);
}
Example #3
0
File: main.c Project: dnlove/maica
static void ieee80211_tasklet_handler(unsigned long data)
{
	struct ieee80211_local *local = (struct ieee80211_local *) data;
	struct sk_buff *skb;

	while ((skb = skb_dequeue(&local->skb_queue)) ||
	       (skb = skb_dequeue(&local->skb_queue_unreliable))) {
		switch (skb->pkt_type) {
		case IEEE80211_RX_MSG:
			/* Clear skb->pkt_type in order to not confuse kernel
			 * netstack. */
			skb->pkt_type = 0;
			ieee80211_rx(local_to_hw(local), skb);
			break;
		case IEEE80211_TX_STATUS_MSG:
			skb->pkt_type = 0;
			ieee80211_tx_status(local_to_hw(local), skb);
			break;
		default:
			WARN(1, "mac80211: Packet is of unknown type %d\n",
			     skb->pkt_type);
			dev_kfree_skb(skb);
			break;
		}
	}
}
Example #4
0
void b43_pio_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	struct b43_pio_txqueue *q;
	struct b43_pio_txpacket *pack = NULL;
	unsigned int total_len;
	struct ieee80211_tx_info *info;

	q = parse_cookie(dev, status->cookie, &pack);
	if (unlikely(!q))
		return;
	B43_WARN_ON(!pack);

	info = IEEE80211_SKB_CB(pack->skb);

	b43_fill_txstatus_report(dev, info, status);

	total_len = pack->skb->len + b43_txhdr_size(dev);
	total_len = roundup(total_len, 4);
	q->buffer_used -= total_len;
	q->free_packet_slots += 1;

	ieee80211_tx_status(dev->wl->hw, pack->skb);
	pack->skb = NULL;
	list_add(&pack->list, &q->packets_list);

	if (q->stopped) {
		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
		q->stopped = 0;
	}
}
Example #5
0
static void wl1271_tx_complete_packet(struct wl1271 *wl,
				      struct wl1271_tx_hw_res_descr *result)
{

	struct ieee80211_tx_info *info;
	struct sk_buff *skb;
	u32 header_len;
	int id = result->id;

	/* check for id legality */
	if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) {
		wl1271_warning("TX result illegal id: %d", id);
		return;
	}

	skb = wl->tx_frames[id];
	info = IEEE80211_SKB_CB(skb);

	/* update packet status */
	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
		if (result->status == TX_SUCCESS)
			info->flags |= IEEE80211_TX_STAT_ACK;
		if (result->status & TX_RETRY_EXCEEDED) {
			/* FIXME */
			/* info->status.excessive_retries = 1; */
			wl->stats.excessive_retries++;
		}
	}

	/* FIXME */
	/* info->status.retry_count = result->ack_failures; */
	wl->stats.retry_count += result->ack_failures;

	/* get header len */
	if (info->control.hw_key &&
	    info->control.hw_key->alg == ALG_TKIP)
		header_len = WL1271_TKIP_IV_SPACE +
			sizeof(struct wl1271_tx_hw_descr);
	else
		header_len = sizeof(struct wl1271_tx_hw_descr);

	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
		     " status 0x%x",
		     result->id, skb, result->ack_failures,
		     result->rate_class_index, result->status);

	/* remove private header from packet */
	skb_pull(skb, header_len);

	/* return the packet to the stack */
	ieee80211_tx_status(wl->hw, skb);
	wl->tx_frames[result->id] = NULL;
}
Example #6
0
File: tx.c Project: 020gzh/linux
void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	mt7601u_tx_skb_remove_dma_overhead(skb, info);

	ieee80211_tx_info_clear_status(info);
	info->status.rates[0].idx = -1;
	info->flags |= IEEE80211_TX_STAT_ACK;

	spin_lock(&dev->mac_lock);
	ieee80211_tx_status(dev->hw, skb);
	spin_unlock(&dev->mac_lock);
}
Example #7
0
static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
{
	int i, filtered = 0;
	struct sk_buff *skb;
	struct ieee80211_tx_info *info;
	unsigned long flags;

	/* filter all frames currently the low level queus for this hlid */
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
			info = IEEE80211_SKB_CB(skb);
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
			info->status.rates[0].idx = -1;
			ieee80211_tx_status(wl->hw, skb);
			filtered++;
		}
	}

	spin_lock_irqsave(&wl->wl_lock, flags);
	wl->tx_queue_count -= filtered;
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_handle_tx_low_watermark(wl);
}
void rt2x00lib_txdone(struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
	unsigned int header_length, i;
	u8 rate_idx, rate_flags, retry_rates;
	u8 skbdesc_flags = skbdesc->flags;
	bool success;

	/*
	 * Unmap the skb.
	 */
	rt2x00queue_unmap_skb(entry);

	/*
	 * Remove the extra tx headroom from the skb.
	 */
	skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);

	/*
	 * Signal that the TX descriptor is no longer in the skb.
	 */
	skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;

	/*
	 * Determine the length of 802.11 header.
	 */
	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);

	/*
	 * Remove L2 padding which was added during
	 */
	if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
		rt2x00queue_remove_l2pad(entry->skb, header_length);

	/*
	 * If the IV/EIV data was stripped from the frame before it was
	 * passed to the hardware, we should now reinsert it again because
	 * mac80211 will expect the same data to be present it the
	 * frame as it was passed to us.
	 */
	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
		rt2x00crypto_tx_insert_iv(entry->skb, header_length);

	/*
	 * Send frame to debugfs immediately, after this call is completed
	 * we are going to overwrite the skb->cb array.
	 */
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);

	/*
	 * Determine if the frame has been successfully transmitted.
	 */
	success =
	    test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
	    test_bit(TXDONE_UNKNOWN, &txdesc->flags);

	/*
	 * Update TX statistics.
	 */
	rt2x00dev->link.qual.tx_success += success;
	rt2x00dev->link.qual.tx_failed += !success;

	rate_idx = skbdesc->tx_rate_idx;
	rate_flags = skbdesc->tx_rate_flags;
	retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ?
	    (txdesc->retry + 1) : 1;

	/*
	 * Initialize TX status
	 */
	memset(&tx_info->status, 0, sizeof(tx_info->status));
	tx_info->status.ack_signal = 0;

	/*
	 * Frame was send with retries, hardware tried
	 * different rates to send out the frame, at each
	 * retry it lowered the rate 1 step except when the
	 * lowest rate was used.
	 */
	for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
		tx_info->status.rates[i].idx = rate_idx - i;
		tx_info->status.rates[i].flags = rate_flags;

		if (rate_idx - i == 0) {
			/*
			 * The lowest rate (index 0) was used until the
			 * number of max retries was reached.
			 */
			tx_info->status.rates[i].count = retry_rates - i;
			i++;
			break;
		}
		tx_info->status.rates[i].count = 1;
	}
	if (i < (IEEE80211_TX_MAX_RATES - 1))
		tx_info->status.rates[i].idx = -1; /* terminate */

	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
		if (success)
			tx_info->flags |= IEEE80211_TX_STAT_ACK;
		else
			rt2x00dev->low_level_stats.dot11ACKFailureCount++;
	}

	/*
	 * Every single frame has it's own tx status, hence report
	 * every frame as ampdu of size 1.
	 *
	 * TODO: if we can find out how many frames were aggregated
	 * by the hw we could provide the real ampdu_len to mac80211
	 * which would allow the rc algorithm to better decide on
	 * which rates are suitable.
	 */
	if (test_bit(TXDONE_AMPDU, &txdesc->flags) ||
	    tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
		tx_info->status.ampdu_len = 1;
		tx_info->status.ampdu_ack_len = success ? 1 : 0;

		if (!success)
			tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
	}

	if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		if (success)
			rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
		else
			rt2x00dev->low_level_stats.dot11RTSFailureCount++;
	}

	/*
	 * Only send the status report to mac80211 when it's a frame
	 * that originated in mac80211. If this was a extra frame coming
	 * through a mac80211 library call (RTS/CTS) then we should not
	 * send the status report back.
	 */
	if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
		if (test_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags))
			ieee80211_tx_status(rt2x00dev->hw, entry->skb);
		else
			ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
	} else
		dev_kfree_skb_any(entry->skb);

	/*
	 * Make this entry available for reuse.
	 */
	entry->skb = NULL;
	entry->flags = 0;

	rt2x00dev->ops->lib->clear_entry(entry);

	rt2x00queue_index_inc(entry, Q_INDEX_DONE);

	/*
	 * If the data queue was below the threshold before the txdone
	 * handler we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished. This has to be
	 * serialized with rt2x00mac_tx(), otherwise we can wake up queue
	 * before it was stopped.
	 */
	spin_lock_bh(&entry->queue->tx_lock);
	if (!rt2x00queue_threshold(entry->queue))
		rt2x00queue_unpause_queue(entry->queue);
	spin_unlock_bh(&entry->queue->tx_lock);
}
Example #9
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	static const struct b43_txstatus fake; /* filled with 0 */
	const struct b43_txstatus *txstat;
	int slot, firstused;
	bool frame_succeed;
	int skip;
	static u8 err_out1, err_out2;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;

	skip = 0;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality.
		 */
		if (slot == next_slot(ring, next_slot(ring, firstused))) {
			/* If a single header/data pair was missed, skip over
			 * the first two slots in an attempt to recover.
			 */
			slot = firstused;
			skip = 2;
			if (!err_out1) {
				/* Report the error once. */
				b43dbg(dev->wl,
				       "Skip on DMA ring %d slot %d.\n",
				       ring->index, slot);
				err_out1 = 1;
			}
		} else {
			/* More than a single header/data pair were missed.
			 * Report this error once.
			 */
			if (!err_out2)
				b43dbg(dev->wl,
				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
				       ring->index, firstused, slot);
			err_out2 = 1;
			return;
		}
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}

		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr,
					 meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame,
				 * so the skb pointer must not be NULL.
				 */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission. When skipping over
			 * a missed TX status report, use a status structure
			 * filled with zeros to indicate that the frame was not
			 * sent (frame_count 0) and not acknowledged
			 */
			if (unlikely(skip))
				txstat = &fake;
			else
				txstat = status;

			frame_succeed = b43_fill_txstatus_report(dev, info,
								 txstat);
#ifdef CPTCFG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment && !skip) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
		if (skip > 0)
			--skip;
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ring->stopped = false;
	}

	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
	} else {
		/* If the driver queue is running wake the corresponding
		 * mac80211 queue. */
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
	/* Add work to the queue. */
	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
Example #10
0
void ath9k_tx_tasklet(unsigned long data)
{
	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
	struct ieee80211_sta *sta;
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb = NULL;
	__le16 fc;

	while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {

		hdr = (struct ieee80211_hdr *) skb->data;
		fc = hdr->frame_control;
		tx_info = IEEE80211_SKB_CB(skb);

		memset(&tx_info->status, 0, sizeof(tx_info->status));

		rcu_read_lock();

		sta = ieee80211_find_sta(priv->vif, hdr->addr1);
		if (!sta) {
			rcu_read_unlock();
			ieee80211_tx_status(priv->hw, skb);
			continue;
		}

		/* Check if we need to start aggregation */

		if (sta && conf_is_ht(&priv->hw->conf) &&
		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
			if (ieee80211_is_data_qos(fc)) {
				u8 *qc, tid;
				struct ath9k_htc_sta *ista;

				qc = ieee80211_get_qos_ctl(hdr);
				tid = qc[0] & 0xf;
				ista = (struct ath9k_htc_sta *)sta->drv_priv;

				if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
					ieee80211_start_tx_ba_session(sta, tid);
					spin_lock_bh(&priv->tx_lock);
					ista->tid_state[tid] = AGGR_PROGRESS;
					spin_unlock_bh(&priv->tx_lock);
				}
			}
		}

		rcu_read_unlock();

		/* Send status to mac80211 */
		ieee80211_tx_status(priv->hw, skb);
	}

	/* Wake TX queues if needed */
	spin_lock_bh(&priv->tx_lock);
	if (priv->tx_queues_stop) {
		priv->tx_queues_stop = false;
		spin_unlock_bh(&priv->tx_lock);
		ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
			  "Waking up TX queues\n");
		ieee80211_wake_queues(priv->hw);
		return;
	}
	spin_unlock_bh(&priv->tx_lock);
}
Example #11
0
void pcie_tx_xmit_ndp(struct ieee80211_hw *hw,
		      struct ieee80211_tx_control *control,
		      struct sk_buff *skb)
{
	struct mwl_priv *priv = hw->priv;
	struct pcie_priv *pcie_priv = priv->hif.priv;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_key_conf *k_conf;
	struct mwl_vif *mwl_vif;
	int index;
	struct ieee80211_sta *sta;
	struct mwl_sta *sta_info;
	struct ieee80211_hdr *wh;
	u8 *da;
	u16 qos;
	u8 tid = 0;
	struct mwl_ampdu_stream *stream = NULL;
	u16 tx_que_priority;
	bool mgmtframe = false;
	struct ieee80211_mgmt *mgmt;
	bool eapol_frame = false;
	bool start_ba_session = false;
	struct pcie_tx_ctrl_ndp *tx_ctrl;

	tx_info = IEEE80211_SKB_CB(skb);
	k_conf = tx_info->control.hw_key;
	mwl_vif = mwl_dev_get_vif(tx_info->control.vif);
	index = skb_get_queue_mapping(skb);
	sta = control->sta;
	sta_info = sta ? mwl_dev_get_sta(sta) : NULL;

	wh = (struct ieee80211_hdr *)skb->data;

	if (ieee80211_is_data_qos(wh->frame_control))
		qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh)));
	else
		qos = 0xFFFF;

	if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
		index = IEEE80211_AC_VO;
		eapol_frame = true;
	}

	if (ieee80211_is_mgmt(wh->frame_control)) {
		mgmtframe = true;
		mgmt = (struct ieee80211_mgmt *)skb->data;
	}

	if (mgmtframe) {
		u16 capab;

		if (unlikely(ieee80211_is_action(wh->frame_control) &&
			     mgmt->u.action.category == WLAN_CATEGORY_BACK &&
			     mgmt->u.action.u.addba_req.action_code ==
			     WLAN_ACTION_ADDBA_REQ)) {
			capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
			tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
			index = utils_tid_to_ac(tid);
		}

		if (unlikely(ieee80211_is_assoc_req(wh->frame_control)))
			utils_add_basic_rates(hw->conf.chandef.chan->band, skb);

		if (ieee80211_is_probe_req(wh->frame_control) ||
		    ieee80211_is_probe_resp(wh->frame_control))
			tx_que_priority = PROBE_RESPONSE_TXQNUM;
		else {
			if ((
			    (mwl_vif->macid == SYSADPT_NUM_OF_AP) &&
			    (!ieee80211_has_protected(wh->frame_control) ||
			    (ieee80211_has_protected(wh->frame_control) &&
			    ieee80211_is_auth(wh->frame_control)))
			    ) ||
			    !sta ||
			    ieee80211_is_auth(wh->frame_control) ||
			    ieee80211_is_assoc_req(wh->frame_control) ||
			    ieee80211_is_assoc_resp(wh->frame_control))
				tx_que_priority = MGMT_TXQNUM;
			else {
				if (is_multicast_ether_addr(wh->addr1) &&
				    (mwl_vif->macid != SYSADPT_NUM_OF_AP))
					tx_que_priority = mwl_vif->macid *
						SYSADPT_MAX_TID;
				else
					tx_que_priority = SYSADPT_MAX_TID *
						(sta_info->stnid +
						QUEUE_STAOFFSET) + 6;
			}
		}

		if (ieee80211_is_assoc_resp(wh->frame_control) ||
		    ieee80211_is_reassoc_resp(wh->frame_control)) {
			struct sk_buff *ack_skb;
			struct ieee80211_tx_info *ack_info;

			ack_skb = skb_copy(skb, GFP_ATOMIC);
			ack_info = IEEE80211_SKB_CB(ack_skb);
			pcie_tx_prepare_info(priv, 0, ack_info);
			ieee80211_tx_status(hw, ack_skb);
		}

		pcie_tx_encapsulate_frame(priv, skb, k_conf, NULL);
	} else {
Example #12
0
void pcie_tx_done_ndp(struct ieee80211_hw *hw)
{
	struct mwl_priv *priv = hw->priv;
	struct pcie_priv *pcie_priv = priv->hif.priv;
	struct pcie_desc_data_ndp *desc = &pcie_priv->desc_data_ndp;
	u32 tx_done_head, tx_done_tail;
	struct tx_ring_done *ptx_ring_done;
	u32 index;
	struct sk_buff *skb;
	struct ieee80211_tx_info *tx_info;
	struct pcie_tx_ctrl_ndp *tx_ctrl;
	struct pcie_dma_data *dma_data;
	u16 hdrlen;

	spin_lock_bh(&pcie_priv->tx_desc_lock);

	tx_done_head = readl(pcie_priv->iobase1 +
			     MACREG_REG_TXDONEHEAD);
	tx_done_tail = desc->tx_done_tail & (MAX_TX_RING_DONE_SIZE - 1);
	tx_done_head &= (MAX_TX_RING_DONE_SIZE - 1);

	while (tx_done_head != tx_done_tail) {
		ptx_ring_done = &desc->ptx_ring_done[tx_done_tail];

		index = le32_to_cpu(ptx_ring_done->user);
		ptx_ring_done->user = 0;
		if (index >= MAX_TX_RING_SEND_SIZE) {
			wiphy_err(hw->wiphy,
				  "corruption for index of buffer\n");
			break;
		}
		skb = desc->tx_vbuflist[index];
		if (!skb) {
			wiphy_err(hw->wiphy,
				  "buffer is NULL for tx done ring\n");
			break;
		}
		pci_unmap_single(pcie_priv->pdev,
				 desc->pphys_tx_buflist[index],
				 skb->len,
				 PCI_DMA_TODEVICE);
		desc->pphys_tx_buflist[index] = 0;
		desc->tx_vbuflist[index] = NULL;

		tx_info = IEEE80211_SKB_CB(skb);
		tx_ctrl = (struct pcie_tx_ctrl_ndp *)
			tx_info->status.status_driver_data;

		if (tx_ctrl->flags & TX_CTRL_TYPE_DATA) {
			dev_kfree_skb_any(skb);
			goto bypass_ack;
		} else {
			/* Remove H/W dma header */
			dma_data = (struct pcie_dma_data *)skb->data;

			if (ieee80211_is_assoc_resp(
			    dma_data->wh.frame_control) ||
			    ieee80211_is_reassoc_resp(
			    dma_data->wh.frame_control)) {
				dev_kfree_skb_any(skb);
				goto bypass_ack;
			}
			hdrlen = ieee80211_hdrlen(
				dma_data->wh.frame_control);
			memmove(dma_data->data - hdrlen,
				&dma_data->wh, hdrlen);
			skb_pull(skb, sizeof(*dma_data) - hdrlen);
		}

		pcie_tx_prepare_info(priv, 0, tx_info);
		ieee80211_tx_status(hw, skb);

bypass_ack:
		if (++tx_done_tail >= MAX_TX_RING_DONE_SIZE)
			tx_done_tail = 0;
		desc->tx_desc_busy_cnt--;
	}

	writel(tx_done_tail, pcie_priv->iobase1 +
	       MACREG_REG_TXDONETAIL);
	desc->tx_done_tail = tx_done_tail;

	spin_unlock_bh(&pcie_priv->tx_desc_lock);
}
Example #13
0
static inline int pcie_tx_skb_ndp(struct mwl_priv *priv,
				  struct sk_buff *tx_skb)
{
	struct pcie_priv *pcie_priv = priv->hif.priv;
	struct pcie_desc_data_ndp *desc = &pcie_priv->desc_data_ndp;
	u32 tx_send_tail;
	u32 tx_send_head_new;
	struct ieee80211_tx_info *tx_info;
	struct pcie_tx_ctrl_ndp *tx_ctrl;
	struct pcie_tx_desc_ndp *pnext_tx_desc;
	struct ieee80211_hdr *wh;
	u32 ctrl = 0;
	dma_addr_t dma;

	spin_lock_bh(&pcie_priv->tx_desc_lock);

	tx_send_tail = desc->tx_sent_tail;
	tx_send_head_new = desc->tx_sent_head;

	if (((tx_send_head_new + 1) & (MAX_NUM_TX_DESC-1)) == tx_send_tail) {
		/* Update the tx_send_tail */
		tx_send_tail = readl(pcie_priv->iobase1 +
				     MACREG_REG_TXSEDNTAIL);
		desc->tx_sent_tail = tx_send_tail;

		if (((tx_send_head_new + 1) & (MAX_NUM_TX_DESC-1)) ==
		    tx_send_tail) {
			spin_unlock_bh(&pcie_priv->tx_desc_lock);
			return -EAGAIN;
		}
	}

	tx_info = IEEE80211_SKB_CB(tx_skb);
	tx_ctrl = (struct pcie_tx_ctrl_ndp *)tx_info->status.status_driver_data;
	pnext_tx_desc = &desc->ptx_ring[tx_send_head_new];

	if (tx_ctrl->flags & TX_CTRL_TYPE_DATA) {
		wh = (struct ieee80211_hdr *)tx_skb->data;

		skb_pull(tx_skb, tx_ctrl->hdrlen);
		ether_addr_copy(pnext_tx_desc->u.sa,
				ieee80211_get_SA(wh));
		ether_addr_copy(pnext_tx_desc->u.da,
				ieee80211_get_DA(wh));

		if (tx_ctrl->flags & TX_CTRL_EAPOL)
			ctrl = TXRING_CTRL_TAG_EAP << TXRING_CTRL_TAG_SHIFT;
		if (tx_ctrl->flags & TX_CTRL_TCP_ACK) {
			pnext_tx_desc->tcp_dst_src =
				cpu_to_le32(tx_ctrl->tcp_dst_src);
			pnext_tx_desc->tcp_sn = cpu_to_le32(tx_ctrl->tcp_sn);
			ctrl = TXRING_CTRL_TAG_TCP_ACK << TXRING_CTRL_TAG_SHIFT;
		}
		ctrl |= (((tx_ctrl->tx_que_priority & TXRING_CTRL_QID_MASK) <<
			TXRING_CTRL_QID_SHIFT) |
			((tx_skb->len & TXRING_CTRL_LEN_MASK) <<
			TXRING_CTRL_LEN_SHIFT));
	} else {
		/* Assigning rate code; use legacy 6mbps rate. */
		pnext_tx_desc->u.rate_code = cpu_to_le16(RATECODE_TYPE_LEGACY +
			(0 << RATECODE_MCS_SHIFT) + RATECODE_BW_20MHZ);
		pnext_tx_desc->u.max_retry = 5;

		ctrl = (((tx_ctrl->tx_que_priority & TXRING_CTRL_QID_MASK) <<
			TXRING_CTRL_QID_SHIFT) |
			(((tx_skb->len - sizeof(struct pcie_dma_data)) &
			TXRING_CTRL_LEN_MASK) << TXRING_CTRL_LEN_SHIFT) |
			(TXRING_CTRL_TAG_MGMT << TXRING_CTRL_TAG_SHIFT));
	}

	dma = pci_map_single(pcie_priv->pdev, tx_skb->data,
			     tx_skb->len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(pcie_priv->pdev, dma)) {
		dev_kfree_skb_any(tx_skb);
		wiphy_err(priv->hw->wiphy,
			  "failed to map pci memory!\n");
		spin_unlock_bh(&pcie_priv->tx_desc_lock);
		return -EIO;
	}

	pnext_tx_desc->data = cpu_to_le32(dma);
	pnext_tx_desc->ctrl = cpu_to_le32(ctrl);
	pnext_tx_desc->user = cpu_to_le32(pcie_tx_set_skb(priv, tx_skb, dma));

	if ((tx_ctrl->flags & TX_CTRL_TYPE_DATA) &&
	    (tx_ctrl->rate != 0)) {
		skb_push(tx_skb, tx_ctrl->hdrlen);
		skb_get(tx_skb);
		pcie_tx_prepare_info(priv, tx_ctrl->rate, tx_info);
		tx_ctrl->flags |= TX_CTRL_TYPE_DATA;
		ieee80211_tx_status(priv->hw, tx_skb);
	}

	if (++tx_send_head_new >= MAX_NUM_TX_DESC)
		tx_send_head_new = 0;
	desc->tx_sent_head = tx_send_head_new;
	wmb(); /*Data Memory Barrier*/
	writel(tx_send_head_new, pcie_priv->iobase1 + MACREG_REG_TXSENDHEAD);
	desc->tx_desc_busy_cnt++;

	spin_unlock_bh(&pcie_priv->tx_desc_lock);

	return 0;
}
Example #14
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	int slot, firstused;
	bool frame_succeed;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality. */
		b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
		       "Expected %d, but got %d\n",
		       ring->index, firstused, slot);
		return;
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}
		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
				b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame, so
				 * the skb pointer must not be NULL. */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission.
			 */
			frame_succeed = b43_fill_txstatus_report(dev, info, status);
#ifdef CONFIG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		ring->stopped = 0;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
}