Esempio n. 1
0
static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len)
{
	struct sk_buff *skb;
	int reserved = 0;
	struct ieee80211_hdr *hdr = (void *) buf;

	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		reserved += NET_IP_ALIGN;

		if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
			reserved += NET_IP_ALIGN;
	}

	if (ieee80211_has_a4(hdr->frame_control))
		reserved += NET_IP_ALIGN;

	reserved = 32 + (reserved & NET_IP_ALIGN);

	skb = dev_alloc_skb(len + reserved);
	if (likely(skb)) {
		skb_reserve(skb, reserved);
		memcpy(skb_put(skb, len), buf, len);
	}

	return skb;
}
Esempio n. 2
0
/*
 * Sets most of the Tx cmd's fields
 */
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
			struct iwl_tx_cmd *tx_cmd,
			struct ieee80211_tx_info *info, u8 sta_id)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	__le16 fc = hdr->frame_control;
	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
	u32 len = skb->len + FCS_LEN;
	u8 ac;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF;

	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	} else if (ieee80211_is_back_req(fc)) {
		struct ieee80211_bar *bar = (void *)skb->data;
		u16 control = le16_to_cpu(bar->control);

		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
		tx_cmd->tid_tspec = (control &
				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
			IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
	} else {
Esempio n. 3
0
void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	u16 queue;
	u8 tid;

	queue = classify80211(local, skb);
	if (unlikely(queue >= local->hw.queues))
		queue = local->hw.queues - 1;

	/*
	 * Now we know the 1d priority, fill in the QoS header if
	 * there is one (and we haven't done this before).
	 */
	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *p = ieee80211_get_qos_ctl(hdr);
		u8 ack_policy = 0;
		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
		if (unlikely(local->wifi_wme_noack_test))
			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
					QOS_CONTROL_ACK_POLICY_SHIFT;
		/* qos header is 2 bytes, second reserved */
		*p++ = ack_policy | tid;
		*p = 0;
	}

#if 0 /* Not in RHEl5... */
	skb_set_queue_mapping(skb, queue);
#else
	ieee80211_set_queue_mapping(skb, queue);
#endif
}
Esempio n. 4
0
static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
					unsigned int len)
{
#if NET_IP_ALIGN != 0
	unsigned int padding = 0;
#endif

	/* make function no-op when possible */
	if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
		return 0;

#if NET_IP_ALIGN != 0
	/* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
	/* TODO: deduplicate common code, define helper function instead? */

	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);

		padding ^= NET_IP_ALIGN;

		/* Input might be invalid, avoid accessing memory outside
		 * the buffer.
		 */
		if ((unsigned long)qc - (unsigned long)hdr < len &&
		    *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
			padding ^= NET_IP_ALIGN;
	}

	if (ieee80211_has_a4(hdr->frame_control))
		padding ^= NET_IP_ALIGN;

	return padding;
#endif
}
Esempio n. 5
0
/*
 * handle build REPLY_TX command notification.
 */
static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
				      struct sk_buff *skb,
				      struct iwl_tx_cmd *tx_cmd,
				      struct ieee80211_tx_info *info,
				      struct ieee80211_hdr *hdr, u8 sta_id)
{
	__le16 fc = hdr->frame_control;
	__le32 tx_flags = tx_cmd->tx_flags;

	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK_MSK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK_MSK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF_MSK;
	else if (ieee80211_is_back_req(fc))
		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
	else if (info->band == IEEE80211_BAND_2GHZ &&
		 cfg(priv)->bt_params &&
		 cfg(priv)->bt_params->advanced_bt_coexist &&
		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
		 ieee80211_is_reassoc_req(fc) ||
		 skb->protocol == cpu_to_be16(ETH_P_PAE)))
		tx_flags |= TX_CMD_FLG_IGNORE_BT;


	tx_cmd->sta_id = sta_id;
	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
	} else {
		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
		else
			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
	}

	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);

	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
	if (ieee80211_is_mgmt(fc)) {
		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
		else
			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
	} else {
		tx_cmd->timeout.pm_frame_timeout = 0;
	}

	tx_cmd->driver_txop = 0;
	tx_cmd->tx_flags = tx_flags;
	tx_cmd->next_frame_len = 0;
}
Esempio n. 6
0
File: wme.c Progetto: 7799/linux
/**
 * ieee80211_set_qos_hdr - Fill in the QoS header if there is one.
 *
 * @sdata: local subif
 * @skb: packet to be updated
 */
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
			   struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	u8 *p;
	u8 ack_policy, tid;

	if (!ieee80211_is_data_qos(hdr->frame_control))
		return;

	p = ieee80211_get_qos_ctl(hdr);
	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;

	/* preserve EOSP bit */
	ack_policy = *p & IEEE80211_QOS_CTL_EOSP;

	if (is_multicast_ether_addr(hdr->addr1) ||
	    sdata->noack_map & BIT(tid)) {
		ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
		info->flags |= IEEE80211_TX_CTL_NO_ACK;
	}

	/* qos header is 2 bytes */
	*p++ = ack_policy | tid;
	if (ieee80211_vif_is_mesh(&sdata->vif)) {
		/* preserve RSPI and Mesh PS Level bit */
		*p &= ((IEEE80211_QOS_CTL_RSPI |
			IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8);

		/* Nulls don't have a mesh header (frame body) */
		if (!ieee80211_is_qos_nullfunc(hdr->frame_control))
			*p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
	} else {
Esempio n. 7
0
/* This function only applies for first msdu in an msdu chain */
static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
{
	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		if (qc[0] & 0x80)
			return true;
	}
	return false;
}
Esempio n. 8
0
/*
 * returns true if a packet outside BA session is a duplicate and
 * should be dropped
 */
static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
				  struct ieee80211_rx_status *rx_status,
				  struct ieee80211_hdr *hdr,
				  struct iwl_rx_mpdu_desc *desc)
{
	struct iwl_mvm_sta *mvm_sta;
	struct iwl_mvm_rxq_dup_data *dup_data;
	u8 baid, tid, sub_frame_idx;

	if (WARN_ON(IS_ERR_OR_NULL(sta)))
		return false;

	baid = (le32_to_cpu(desc->reorder_data) &
		IWL_RX_MPDU_REORDER_BAID_MASK) >>
		IWL_RX_MPDU_REORDER_BAID_SHIFT;

	if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
		return false;

	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
	dup_data = &mvm_sta->dup_data[queue];

	/*
	 * Drop duplicate 802.11 retransmissions
	 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
	 */
	if (ieee80211_is_ctl(hdr->frame_control) ||
	    ieee80211_is_qos_nullfunc(hdr->frame_control) ||
	    is_multicast_ether_addr(hdr->addr1)) {
		rx_status->flag |= RX_FLAG_DUP_VALIDATED;
		return false;
	}

	if (ieee80211_is_data_qos(hdr->frame_control))
		/* frame has qos control */
		tid = *ieee80211_get_qos_ctl(hdr) &
			IEEE80211_QOS_CTL_TID_MASK;
	else
		tid = IWL_MAX_TID_COUNT;

	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
	sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;

	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
		     dup_data->last_seq[tid] == hdr->seq_ctrl &&
		     dup_data->last_sub_frame[tid] >= sub_frame_idx))
		return true;

	dup_data->last_seq[tid] = hdr->seq_ctrl;
	dup_data->last_sub_frame[tid] = sub_frame_idx;

	rx_status->flag |= RX_FLAG_DUP_VALIDATED;

	return false;
}
Esempio n. 9
0
static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
				   struct ieee80211_sta *sta,
				   struct sk_buff *skb,
				   u16 hw_queue)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl_tx_desc *pdesc = NULL;
	struct rtl_tcb_desc tcb_desc;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
	__le16 fc = hdr->frame_control;
	u8 *pda_addr = hdr->addr1;
	/* ssn */
	u8 *qc = NULL;
	u8 tid = 0;
	u16 seq_number = 0;

	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
	if (ieee80211_is_auth(fc)) {
		RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
		rtl_ips_nic_on(hw);
	}

	if (rtlpriv->psc.sw_ps_enabled) {
		if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
		    !ieee80211_has_pm(fc))
			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
	}

	rtl_action_proc(hw, skb, true);
	if (is_multicast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesmulticast += skb->len;
	else if (is_broadcast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesbroadcast += skb->len;
	else
		rtlpriv->stats.txbytesunicast += skb->len;
	if (ieee80211_is_data_qos(fc)) {
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
		seq_number = (le16_to_cpu(hdr->seq_ctrl) &
			     IEEE80211_SCTL_SEQ) >> 4;
		seq_number += 1;
		seq_number <<= 4;
	}
	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
					hw_queue, &tcb_desc);
	if (!ieee80211_has_morefrags(hdr->frame_control)) {
		if (qc)
			mac->tids[tid].seq_number = seq_number;
	}
	if (ieee80211_is_data(fc))
		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
}
Esempio n. 10
0
/*
 *	increment traffic load value for tid and also remove
 *	any old values if passed the certain time period
 */
static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
			   struct ieee80211_hdr *hdr)
{
	u32 curr_time = jiffies_to_msecs(jiffies);
	u32 time_diff;
	s32 index;
	struct iwl_traffic_load *tl = NULL;
	u8 tid;

	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & 0xf;
	} else
		return IWL_MAX_TID_COUNT;

	if (unlikely(tid >= IWL_MAX_TID_COUNT))
		return IWL_MAX_TID_COUNT;

	tl = &lq_data->load[tid];

	curr_time -= curr_time % TID_ROUND_VALUE;

	/* Happens only for the first packet. Initialize the data */
	if (!(tl->queue_count)) {
		tl->total = 1;
		tl->time_stamp = curr_time;
		tl->queue_count = 1;
		tl->head = 0;
		tl->packet_count[0] = 1;
		return IWL_MAX_TID_COUNT;
	}

	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
	index = time_diff / TID_QUEUE_CELL_SPACING;

	/* The history is too long: remove data that is older than */
	/* TID_MAX_TIME_DIFF */
	if (index >= TID_QUEUE_MAX_SIZE)
		rs_tl_rm_old_stats(tl, curr_time);

	index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
	tl->packet_count[index] = tl->packet_count[index] + 1;
	tl->total = tl->total + 1;

	if ((index + 1) > tl->queue_count)
		tl->queue_count = index + 1;

	return tid;
}
Esempio n. 11
0
void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;

	/* Fill in the QoS header if there is one. */
	if (ieee80211_is_data_qos(hdr->frame_control)) {
		u8 *p = ieee80211_get_qos_ctl(hdr);
		u8 ack_policy = 0, tid;

		tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;

		if (unlikely(local->wifi_wme_noack_test))
			ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
					QOS_CONTROL_ACK_POLICY_SHIFT;
		/* qos header is 2 bytes, second reserved */
		*p++ = ack_policy | tid;
		*p = 0;
	}
}
Esempio n. 12
0
/**
 * ieee80211_set_qos_hdr - Fill in the QoS header if there is one.
 *
 * @sdata: local subif
 * @skb: packet to be updated
 */
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
			   struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
	u8 flags;
	u8 *p;

	if (!ieee80211_is_data_qos(hdr->frame_control))
		return;

	p = ieee80211_get_qos_ctl(hdr);

	/* set up the first byte */

	/*
	 * preserve everything but the TID and ACK policy
	 * (which we both write here)
	 */
	flags = *p & ~(IEEE80211_QOS_CTL_TID_MASK |
		       IEEE80211_QOS_CTL_ACK_POLICY_MASK);

	if (is_multicast_ether_addr(hdr->addr1) ||
	    sdata->noack_map & BIT(tid)) {
		flags |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
		info->flags |= IEEE80211_TX_CTL_NO_ACK;
	}

	*p = flags | tid;

	/* set up the second byte */
	p++;

	if (ieee80211_vif_is_mesh(&sdata->vif)) {
		/* preserve RSPI and Mesh PS Level bit */
		*p &= ((IEEE80211_QOS_CTL_RSPI |
			IEEE80211_QOS_CTL_MESH_PS_LEVEL) >> 8);

		/* Nulls don't have a mesh header (frame body) */
		if (!ieee80211_is_qos_nullfunc(hdr->frame_control))
			*p |= (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8);
	} else {
Esempio n. 13
0
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
					    struct iwl_txq *txq,
					    struct iwl_device_cmd *dev_cmd,
					    struct sk_buff *skb,
					    struct iwl_cmd_meta *out_meta)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
	int len, hdr_len;
	bool amsdu;

	/* There must be data left over for TB1 or this code must be changed */
	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);

	memset(tfd, 0, sizeof(*tfd));

	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
		len = sizeof(struct iwl_tx_cmd_gen2);
	else
		len = sizeof(struct iwl_tx_cmd_gen3);

	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
			(*ieee80211_get_qos_ctl(hdr) &
			 IEEE80211_QOS_CTL_A_MSDU_PRESENT);

	hdr_len = ieee80211_hdrlen(hdr->frame_control);

	/*
	 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
	 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
	 * built in the higher layers already.
	 */
	if (amsdu && skb_shinfo(skb)->gso_size)
		return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
						    out_meta, hdr_len, len);

	return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
				      hdr_len, len, !amsdu);
}
Esempio n. 14
0
static void
minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	u16 tid;

	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
		return;

	if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
		return;

	if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
		return;

	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
	if (likely(sta->ampdu_mlme.tid_tx[tid]))
		return;

	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
}
Esempio n. 15
0
static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key,
			    struct ieee80211_hdr *hdr)
{
	u8 *da, *sa, tid;

	da = ieee80211_get_DA(hdr);
	sa = ieee80211_get_SA(hdr);
	if (ieee80211_is_data_qos(hdr->frame_control))
		tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
	else
		tid = 0;

	mctx->l = get_unaligned_le32(key);
	mctx->r = get_unaligned_le32(key + 4);

	
	michael_block(mctx, get_unaligned_le32(da));
	michael_block(mctx, get_unaligned_le16(&da[4]) |
			    (get_unaligned_le16(sa) << 16));
	michael_block(mctx, get_unaligned_le32(&sa[2]));
	michael_block(mctx, tid);
}
Esempio n. 16
0
File: wme.c Progetto: 7799/linux
/* Indicate which queue to use for this fully formed 802.11 frame */
u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
				 struct sk_buff *skb,
				 struct ieee80211_hdr *hdr)
{
	struct ieee80211_local *local = sdata->local;
	u8 *p;

	if (local->hw.queues < IEEE80211_NUM_ACS)
		return 0;

	if (!ieee80211_is_data(hdr->frame_control)) {
		skb->priority = 7;
		return ieee802_1d_to_ac[skb->priority];
	}
	if (!ieee80211_is_data_qos(hdr->frame_control)) {
		skb->priority = 0;
		return ieee802_1d_to_ac[skb->priority];
	}

	p = ieee80211_get_qos_ctl(hdr);
	skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;

	return ieee80211_downgrade_queue(sdata, skb);
}
Esempio n. 17
0
/**
 * rsi_core_xmit() - This function transmits the packets received from mac80211
 * @common: Pointer to the driver private structure.
 * @skb: Pointer to the socket buffer structure.
 *
 * Return: None.
 */
void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
{
	struct rsi_hw *adapter = common->priv;
	struct ieee80211_tx_info *info;
	struct skb_info *tx_params;
	struct ieee80211_hdr *wh = NULL;
	struct ieee80211_vif *vif;
	u8 q_num, tid = 0;
	struct rsi_sta *rsta = NULL;

	if ((!skb) || (!skb->len)) {
		rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
			__func__);
		goto xmit_fail;
	}
	if (common->fsm_state != FSM_MAC_INIT_DONE) {
		rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
		goto xmit_fail;
	}
	if (common->wow_flags & RSI_WOW_ENABLED) {
		rsi_dbg(ERR_ZONE,
			"%s: Blocking Tx_packets when WOWLAN is enabled\n",
			__func__);
		goto xmit_fail;
	}

	info = IEEE80211_SKB_CB(skb);
	tx_params = (struct skb_info *)info->driver_data;
	wh = (struct ieee80211_hdr *)&skb->data[0];
	tx_params->sta_id = 0;

	vif = rsi_get_vif(adapter, wh->addr2);
	if (!vif)
		goto xmit_fail;
	tx_params->vif = vif;
	tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
	if ((ieee80211_is_mgmt(wh->frame_control)) ||
	    (ieee80211_is_ctl(wh->frame_control)) ||
	    (ieee80211_is_qos_nullfunc(wh->frame_control))) {
		if (ieee80211_is_assoc_req(wh->frame_control) ||
		    ieee80211_is_reassoc_req(wh->frame_control)) {
			struct ieee80211_bss_conf *bss = &vif->bss_conf;

			common->eapol4_confirm = false;
			rsi_hal_send_sta_notify_frame(common,
						      RSI_IFTYPE_STATION,
						      STA_CONNECTED, bss->bssid,
						      bss->qos, bss->aid, 0,
						      vif);
		}

		q_num = MGMT_SOFT_Q;
		skb->priority = q_num;

		if (rsi_prepare_mgmt_desc(common, skb)) {
			rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
			goto xmit_fail;
		}
	} else {
		if (ieee80211_is_data_qos(wh->frame_control)) {
			u8 *qos = ieee80211_get_qos_ctl(wh);

			tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
			skb->priority = TID_TO_WME_AC(tid);
		} else {
			tid = IEEE80211_NONQOS_TID;
			skb->priority = BE_Q;
		}

		q_num = skb->priority;
		tx_params->tid = tid;

		if (((vif->type == NL80211_IFTYPE_AP) ||
		     (vif->type == NL80211_IFTYPE_P2P_GO)) &&
		    (!is_broadcast_ether_addr(wh->addr1)) &&
		    (!is_multicast_ether_addr(wh->addr1))) {
			rsta = rsi_find_sta(common, wh->addr1);
			if (!rsta)
				goto xmit_fail;
			tx_params->sta_id = rsta->sta_id;
		} else {
			tx_params->sta_id = 0;
		}

		if (rsta) {
			/* Start aggregation if not done for this tid */
			if (!rsta->start_tx_aggr[tid]) {
				rsta->start_tx_aggr[tid] = true;
				ieee80211_start_tx_ba_session(rsta->sta,
							      tid, 0);
			}
		}
		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
			q_num = MGMT_SOFT_Q;
			skb->priority = q_num;
		}
		if (rsi_prepare_data_desc(common, skb)) {
			rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
			goto xmit_fail;
		}
	}

	if ((q_num < MGMT_SOFT_Q) &&
	    ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
	     DATA_QUEUE_WATER_MARK)) {
		rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
		if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
			ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
		rsi_set_event(&common->tx_thread.event);
		goto xmit_fail;
	}

	rsi_core_queue_pkt(common, skb);
	rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
	rsi_set_event(&common->tx_thread.event);

	return;

xmit_fail:
	rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
	/* Dropping pkt here */
	ieee80211_free_txskb(common->priv->hw, skb);
}
Esempio n. 18
0
/*
 * Sets most of the Tx cmd's fields
 */
static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
			       struct iwl_tx_cmd *tx_cmd,
			       struct ieee80211_tx_info *info, u8 sta_id)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	__le16 fc = hdr->frame_control;
	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
	u32 len = skb->len + FCS_LEN;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF;
	else if (ieee80211_is_back_req(fc))
		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;

	/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
	if (info->band == IEEE80211_BAND_2GHZ &&
	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
	     is_multicast_ether_addr(hdr->addr1) ||
	     ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
		tx_flags |= TX_CMD_FLG_BT_DIS;

	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	} else {
		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
			tx_flags |= TX_CMD_FLG_SEQ_CTL;
		else
			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	}

	if (ieee80211_is_mgmt(fc)) {
		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
		else
			tx_cmd->pm_frame_timeout = cpu_to_le16(2);

		/* The spec allows Action frames in A-MPDU, we don't support
		 * it
		 */
		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
	} else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
	} else {
		tx_cmd->pm_frame_timeout = 0;
	}

	if (info->flags & IEEE80211_TX_CTL_AMPDU)
		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;

	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
	    !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;

	tx_cmd->driver_txop = 0;
	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
	/* Total # bytes to be transmitted */
	tx_cmd->len = cpu_to_le16((u16)skb->len);
	tx_cmd->next_frame_len = 0;
	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
	tx_cmd->sta_id = sta_id;
}
Esempio n. 19
0
/*
 * Sets the fields in the Tx cmd that are crypto related
 */
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
		   struct ieee80211_sta *sta)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct iwl_mvm_sta *mvmsta;
	struct iwl_device_cmd *dev_cmd;
	struct iwl_tx_cmd *tx_cmd;
	__le16 fc;
	u16 seq_number = 0;
	u8 tid = IWL_MAX_TID_COUNT;
	u8 txq_id = info->hw_queue;
	bool is_data_qos = false, is_ampdu = false;

	mvmsta = (void *)sta->drv_priv;
	fc = hdr->frame_control;

	if (WARN_ON_ONCE(!mvmsta))
		return -1;

	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
		return -1;

	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
	if (!dev_cmd)
		goto drop;

	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
	/* From now on, we cannot access info->control */

	spin_lock(&mvmsta->lock);

	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
		u8 *qc = NULL;
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
			goto drop_unlock_sta;

		seq_number = mvmsta->tid_data[tid].seq_number;
		seq_number &= IEEE80211_SCTL_SEQ;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(seq_number);
		seq_number += 0x10;
		is_data_qos = true;
		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
	}

	/* Copy MAC header from skb into command buffer */
	memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));

	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);

	if (is_ampdu) {
		if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
			goto drop_unlock_sta;
		txq_id = mvmsta->tid_data[tid].txq_id;
	}

	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
		     tid, txq_id, seq_number);

	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
		goto drop_unlock_sta;

	if (is_data_qos && !ieee80211_has_morefrags(fc))
		mvmsta->tid_data[tid].seq_number = seq_number;

	spin_unlock(&mvmsta->lock);

	if (txq_id < mvm->first_agg_queue)
		atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);

	return 0;

drop_unlock_sta:
	iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
	spin_unlock(&mvmsta->lock);
drop:
	return -1;
}
Esempio n. 20
0
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
					struct ieee80211_hdr *hdr,
					u16 len,
					u32 ampdu_status,
					struct iwl_rx_mem_buffer *rxb,
					struct ieee80211_rx_status *stats)
{
	struct sk_buff *skb;
	int ret = 0;
	__le16 fc = hdr->frame_control;

	/* We only process data packets if the interface is open */
	if (unlikely(!priv->is_open)) {
		IWL_DEBUG_DROP_LIMIT(priv,
		    "Dropping packet while interface is not open.\n");
		return;
	}

	/* In case of HW accelerated crypto and bad decryption, drop */
	if (!priv->cfg->mod_params->sw_crypto &&
	    iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
		return;

	skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
	if (!skb) {
		IWL_ERR(priv, "alloc_skb failed\n");
		return;
	}

	skb_reserve(skb, IWL_LINK_HDR_MAX);
	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);

	/* mac80211 currently doesn't support paged SKB. Convert it to
	 * linear SKB for management frame and data frame requires
	 * software decryption or software defragementation. */
	if (ieee80211_is_mgmt(fc) ||
	    ieee80211_has_protected(fc) ||
	    ieee80211_has_morefrags(fc) ||
	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
	    (ieee80211_is_data_qos(fc) &&
	     *ieee80211_get_qos_ctl(hdr) &
	     IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
		ret = skb_linearize(skb);
	else
		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
			 0 : -ENOMEM;

	if (ret) {
		kfree_skb(skb);
		goto out;
	}

	/*
	 * XXX: We cannot touch the page and its virtual memory (hdr) after
	 * here. It might have already been freed by the above skb change.
	 */

	iwl_update_stats(priv, false, fc, len);
	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));

	ieee80211_rx(priv->hw, skb);
 out:
	priv->alloc_rxb_page--;
	rxb->page = NULL;
}
Esempio n. 21
0
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
                                    struct sk_buff *skb)
{
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    u8 *rxdesc = skb->data;
    struct ieee80211_hdr *hdr;
    bool unicast = false;
    __le16 fc;
    struct ieee80211_rx_status rx_status = {0};
    struct rtl_stats stats = {
        .signal = 0,
        .rate = 0,
    };

    skb_pull(skb, RTL_RX_DESC_SIZE);
    rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
    skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
    hdr = (struct ieee80211_hdr *)(skb->data);
    fc = hdr->frame_control;
    if (!stats.crc) {
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

        if (is_broadcast_ether_addr(hdr->addr1)) {
            /*TODO*/;
        } else if (is_multicast_ether_addr(hdr->addr1)) {
            /*TODO*/
        } else {
            unicast = true;
            rtlpriv->stats.rxbytesunicast +=  skb->len;
        }

        if (ieee80211_is_data(fc)) {
            rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

            if (unicast)
                rtlpriv->link_info.num_rx_inperiod++;
        }
        /* static bcn for roaming */
        rtl_beacon_statistic(hw, skb);
    }
}

static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
                                      struct sk_buff *skb)
{
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    u8 *rxdesc = skb->data;
    struct ieee80211_hdr *hdr;
    bool unicast = false;
    __le16 fc;
    struct ieee80211_rx_status rx_status = {0};
    struct rtl_stats stats = {
        .signal = 0,
        .rate = 0,
    };

    skb_pull(skb, RTL_RX_DESC_SIZE);
    rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb);
    skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift));
    hdr = (struct ieee80211_hdr *)(skb->data);
    fc = hdr->frame_control;
    if (!stats.crc) {
        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));

        if (is_broadcast_ether_addr(hdr->addr1)) {
            /*TODO*/;
        } else if (is_multicast_ether_addr(hdr->addr1)) {
            /*TODO*/
        } else {
            unicast = true;
            rtlpriv->stats.rxbytesunicast +=  skb->len;
        }

        if (ieee80211_is_data(fc)) {
            rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);

            if (unicast)
                rtlpriv->link_info.num_rx_inperiod++;
        }

        /* static bcn for roaming */
        rtl_beacon_statistic(hw, skb);

        if (likely(rtl_action_proc(hw, skb, false)))
            ieee80211_rx(hw, skb);
        else
            dev_kfree_skb_any(skb);
    }
}

static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
{
    struct sk_buff *_skb;
    struct sk_buff_head rx_queue;
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

    skb_queue_head_init(&rx_queue);
    if (rtlusb->usb_rx_segregate_hdl)
        rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue);
    WARN_ON(skb_queue_empty(&rx_queue));
    while (!skb_queue_empty(&rx_queue)) {
        _skb = skb_dequeue(&rx_queue);
        _rtl_usb_rx_process_agg(hw, _skb);
        ieee80211_rx(hw, _skb);
    }
}

#define __RX_SKB_MAX_QUEUED	64

static void _rtl_rx_work(unsigned long param)
{
    struct rtl_usb *rtlusb = (struct rtl_usb *)param;
    struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
    struct sk_buff *skb;

    while ((skb = skb_dequeue(&rtlusb->rx_queue))) {
        if (unlikely(IS_USB_STOP(rtlusb))) {
            dev_kfree_skb_any(skb);
            continue;
        }

        if (likely(!rtlusb->usb_rx_segregate_hdl)) {
            _rtl_usb_rx_process_noagg(hw, skb);
        } else {
            /* TO DO */
            _rtl_rx_pre_process(hw, skb);
            pr_err("rx agg not supported\n");
        }
    }
}

static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
                                        unsigned int len)
{
#if NET_IP_ALIGN != 0
    unsigned int padding = 0;
#endif

    /* make function no-op when possible */
    if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
        return 0;

#if NET_IP_ALIGN != 0
    /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
    /* TODO: deduplicate common code, define helper function instead? */

    if (ieee80211_is_data_qos(hdr->frame_control)) {
        u8 *qc = ieee80211_get_qos_ctl(hdr);

        padding ^= NET_IP_ALIGN;

        /* Input might be invalid, avoid accessing memory outside
         * the buffer.
         */
        if ((unsigned long)qc - (unsigned long)hdr < len &&
                *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
            padding ^= NET_IP_ALIGN;
    }

    if (ieee80211_has_a4(hdr->frame_control))
        padding ^= NET_IP_ALIGN;

    return padding;
#endif
}

#define __RADIO_TAP_SIZE_RSV	32

static void _rtl_rx_completed(struct urb *_urb)
{
    struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
    struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    int err = 0;

    if (unlikely(IS_USB_STOP(rtlusb)))
        goto free;

    if (likely(0 == _urb->status)) {
        unsigned int padding;
        struct sk_buff *skb;
        unsigned int qlen;
        unsigned int size = _urb->actual_length;
        struct ieee80211_hdr *hdr;

        if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Too short packet from bulk IN! (len: %d)\n",
                     size);
            goto resubmit;
        }

        qlen = skb_queue_len(&rtlusb->rx_queue);
        if (qlen >= __RX_SKB_MAX_QUEUED) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Pending RX skbuff queue full! (qlen: %d)\n",
                     qlen);
            goto resubmit;
        }

        hdr = (void *)(_urb->transfer_buffer + RTL_RX_DESC_SIZE);
        padding = _rtl_rx_get_padding(hdr, size - RTL_RX_DESC_SIZE);

        skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
        if (!skb) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Can't allocate skb for bulk IN!\n");
            goto resubmit;
        }

        _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep);

        /* Make sure the payload data is 4 byte aligned. */
        skb_reserve(skb, padding);

        /* reserve some space for mac80211's radiotap */
        skb_reserve(skb, __RADIO_TAP_SIZE_RSV);

        memcpy(skb_put(skb, size), _urb->transfer_buffer, size);

        skb_queue_tail(&rtlusb->rx_queue, skb);
        tasklet_schedule(&rtlusb->rx_work_tasklet);

        goto resubmit;
    }

    switch (_urb->status) {
    /* disconnect */
    case -ENOENT:
    case -ECONNRESET:
    case -ENODEV:
    case -ESHUTDOWN:
        goto free;
    default:
        break;
    }

resubmit:
    usb_anchor_urb(_urb, &rtlusb->rx_submitted);
    err = usb_submit_urb(_urb, GFP_ATOMIC);
    if (unlikely(err)) {
        usb_unanchor_urb(_urb);
        goto free;
    }
    return;

free:
    /* On some architectures, usb_free_coherent must not be called from
     * hardirq context. Queue urb to cleanup list.
     */
    usb_anchor_urb(_urb, &rtlusb->rx_cleanup_urbs);
}

#undef __RADIO_TAP_SIZE_RSV

static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
{
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
    struct urb *urb;

    usb_kill_anchored_urbs(&rtlusb->rx_submitted);

    tasklet_kill(&rtlusb->rx_work_tasklet);
    cancel_work_sync(&rtlpriv->works.lps_change_work);

    flush_workqueue(rtlpriv->works.rtl_wq);
    destroy_workqueue(rtlpriv->works.rtl_wq);

    skb_queue_purge(&rtlusb->rx_queue);

    while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
        usb_free_coherent(urb->dev, urb->transfer_buffer_length,
                          urb->transfer_buffer, urb->transfer_dma);
        usb_free_urb(urb);
    }
}

static int _rtl_usb_receive(struct ieee80211_hw *hw)
{
    struct urb *urb;
    int err;
    int i;
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

    WARN_ON(0 == rtlusb->rx_urb_num);
    /* 1600 == 1514 + max WLAN header + rtk info */
    WARN_ON(rtlusb->rx_max_size < 1600);

    for (i = 0; i < rtlusb->rx_urb_num; i++) {
        err = -ENOMEM;
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Failed to alloc URB!!\n");
            goto err_out;
        }

        err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
        if (err < 0) {
            RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
                     "Failed to prep_rx_urb!!\n");
            usb_free_urb(urb);
            goto err_out;
        }

        usb_anchor_urb(urb, &rtlusb->rx_submitted);
        err = usb_submit_urb(urb, GFP_KERNEL);
        if (err)
            goto err_out;
        usb_free_urb(urb);
    }
    return 0;

err_out:
    usb_kill_anchored_urbs(&rtlusb->rx_submitted);
    _rtl_usb_cleanup_rx(hw);
    return err;
}

static int rtl_usb_start(struct ieee80211_hw *hw)
{
    int err;
    struct rtl_priv *rtlpriv = rtl_priv(hw);
    struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
    struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));

    err = rtlpriv->cfg->ops->hw_init(hw);
    if (!err) {
        rtl_init_rx_config(hw);

        /* Enable software */
        SET_USB_START(rtlusb);
        /* should after adapter start and interrupt enable. */
        set_hal_start(rtlhal);

        /* Start bulk IN */
        err = _rtl_usb_receive(hw);
    }

    return err;
}
Esempio n. 22
0
/*
 * Sets most of the Tx cmd's fields
 */
static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
			       struct iwl_tx_cmd *tx_cmd,
			       struct ieee80211_tx_info *info, u8 sta_id)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	__le16 fc = hdr->frame_control;
	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
	u32 len = skb->len + FCS_LEN;
	u8 ac;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF;
	else if (ieee80211_is_back_req(fc))
		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;

	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	} else {
		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
			tx_flags |= TX_CMD_FLG_SEQ_CTL;
		else
			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	}

	/* tid_tspec will default to 0 = BE when QOS isn't enabled */
	ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
	tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
			TX_CMD_FLG_BT_PRIO_POS;

	if (ieee80211_is_mgmt(fc)) {
		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
		else
			tx_cmd->pm_frame_timeout = cpu_to_le16(2);

		/* The spec allows Action frames in A-MPDU, we don't support
		 * it
		 */
		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
	} else {
		tx_cmd->pm_frame_timeout = 0;
	}

	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
	    !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;

	if ((mvm->fw->ucode_capa.capa[0] &
	     IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
	    ieee80211_action_contains_tpc(skb))
		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;

	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
	/* Total # bytes to be transmitted */
	tx_cmd->len = cpu_to_le16((u16)skb->len);
	tx_cmd->next_frame_len = 0;
	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
	tx_cmd->sta_id = sta_id;
}
Esempio n. 23
0
int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = tx_info->control.sta;
	struct ath9k_htc_sta *ista;
	struct ath9k_htc_tx_ctl tx_ctl;
	enum htc_endpoint_id epid;
	u16 qnum;
	__le16 fc;
	u8 *tx_fhdr;
	u8 sta_idx, vif_idx;

	hdr = (struct ieee80211_hdr *) skb->data;
	fc = hdr->frame_control;

	if (tx_info->control.vif &&
			(struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
		vif_idx = ((struct ath9k_htc_vif *)
				tx_info->control.vif->drv_priv)->index;
	else
		vif_idx = priv->nvifs;

	if (sta) {
		ista = (struct ath9k_htc_sta *) sta->drv_priv;
		sta_idx = ista->index;
	} else {
		sta_idx = 0;
	}

	memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));

	if (ieee80211_is_data(fc)) {
		struct tx_frame_hdr tx_hdr;
		u8 *qc;

		memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));

		tx_hdr.node_idx = sta_idx;
		tx_hdr.vif_idx = vif_idx;

		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
			tx_ctl.type = ATH9K_HTC_AMPDU;
			tx_hdr.data_type = ATH9K_HTC_AMPDU;
		} else {
			tx_ctl.type = ATH9K_HTC_NORMAL;
			tx_hdr.data_type = ATH9K_HTC_NORMAL;
		}

		if (ieee80211_is_data_qos(fc)) {
			qc = ieee80211_get_qos_ctl(hdr);
			tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
		}

		/* Check for RTS protection */
		if (priv->hw->wiphy->rts_threshold != (u32) -1)
			if (skb->len > priv->hw->wiphy->rts_threshold)
				tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;

		/* CTS-to-self */
		if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
		    (priv->op_flags & OP_PROTECT_ENABLE))
			tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;

		tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
		if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
			tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
		else
			tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;

		tx_fhdr = skb_push(skb, sizeof(tx_hdr));
		memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));

		qnum = skb_get_queue_mapping(skb);

		switch (qnum) {
		case 0:
			TX_QSTAT_INC(WME_AC_VO);
			epid = priv->data_vo_ep;
			break;
		case 1:
			TX_QSTAT_INC(WME_AC_VI);
			epid = priv->data_vi_ep;
			break;
		case 2:
			TX_QSTAT_INC(WME_AC_BE);
			epid = priv->data_be_ep;
			break;
		case 3:
		default:
			TX_QSTAT_INC(WME_AC_BK);
			epid = priv->data_bk_ep;
			break;
		}
	} else {
		struct tx_mgmt_hdr mgmt_hdr;

		memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));

		tx_ctl.type = ATH9K_HTC_NORMAL;

		mgmt_hdr.node_idx = sta_idx;
		mgmt_hdr.vif_idx = vif_idx;
		mgmt_hdr.tidno = 0;
		mgmt_hdr.flags = 0;

		mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
		if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
			mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
		else
			mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;

		tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
		memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
		epid = priv->mgmt_ep;
	}

	return htc_send(priv->htc, skb, epid, &tx_ctl);
}
Esempio n. 24
0
static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
				int encrypted)
{
	__le16 mask_fc;
	int a4_included;
	u8 qos_tid;
	u8 *b_0, *aad;
	u16 data_len, len_a;
	unsigned int hdrlen;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;

	b_0 = scratch + 3 * AES_BLOCK_LEN;
	aad = scratch + 4 * AES_BLOCK_LEN;

	/*
	 * Mask FC: zero subtype b4 b5 b6
	 * Retry, PwrMgt, MoreData; set Protected
	 */
	mask_fc = hdr->frame_control;
	mask_fc &= ~cpu_to_le16(0x0070 | IEEE80211_FCTL_RETRY |
				IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
	mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);

	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	len_a = hdrlen - 2;
	a4_included = ieee80211_has_a4(hdr->frame_control);

	if (ieee80211_is_data_qos(hdr->frame_control))
		qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
	else
		qos_tid = 0;

	data_len = skb->len - hdrlen - CCMP_HDR_LEN;
	if (encrypted)
		data_len -= CCMP_MIC_LEN;

	/* First block, b_0 */
	b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */
	/* Nonce: QoS Priority | A2 | PN */
	b_0[1] = qos_tid;
	memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
	memcpy(&b_0[8], pn, CCMP_PN_LEN);
	/* l(m) */
	put_unaligned_be16(data_len, &b_0[14]);

	/* AAD (extra authenticate-only data) / masked 802.11 header
	 * FC | A1 | A2 | A3 | SC | [A4] | [QC] */
	put_unaligned_be16(len_a, &aad[0]);
	put_unaligned(mask_fc, (__le16 *)&aad[2]);
	memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);

	/* Mask Seq#, leave Frag# */
	aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
	aad[23] = 0;

	if (a4_included) {
		memcpy(&aad[24], hdr->addr4, ETH_ALEN);
		aad[30] = qos_tid;
		aad[31] = 0;
	} else {
		memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
		aad[24] = qos_tid;
	}
}
Esempio n. 25
0
static inline u16 get_tid_h(struct ieee80211_hdr *hdr)
{
	return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
}
Esempio n. 26
0
int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
	struct ieee80211_sta *sta = tx_info->control.sta;
	struct ieee80211_vif *vif = tx_info->control.vif;
	struct ath9k_htc_sta *ista;
	struct ath9k_htc_vif *avp;
	struct ath9k_htc_tx_ctl tx_ctl;
	enum htc_endpoint_id epid;
	u16 qnum;
	__le16 fc;
	u8 *tx_fhdr;
	u8 sta_idx, vif_idx;

	hdr = (struct ieee80211_hdr *) skb->data;
	fc = hdr->frame_control;

	/*
	 * Find out on which interface this packet has to be
	 * sent out.
	 */
	if (vif) {
		avp = (struct ath9k_htc_vif *) vif->drv_priv;
		vif_idx = avp->index;
	} else {
		if (!priv->ah->is_monitoring) {
			ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
				"VIF is null, but no monitor interface !\n");
			return -EINVAL;
		}

		vif_idx = priv->mon_vif_idx;
	}

	/*
	 * Find out which station this packet is destined for.
	 */
	if (sta) {
		ista = (struct ath9k_htc_sta *) sta->drv_priv;
		sta_idx = ista->index;
	} else {
		sta_idx = priv->vif_sta_pos[vif_idx];
	}

	memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));

	if (ieee80211_is_data(fc)) {
		struct tx_frame_hdr tx_hdr;
		u32 flags = 0;
		u8 *qc;

		memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));

		tx_hdr.node_idx = sta_idx;
		tx_hdr.vif_idx = vif_idx;

		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
			tx_ctl.type = ATH9K_HTC_AMPDU;
			tx_hdr.data_type = ATH9K_HTC_AMPDU;
		} else {
			tx_ctl.type = ATH9K_HTC_NORMAL;
			tx_hdr.data_type = ATH9K_HTC_NORMAL;
		}

		if (ieee80211_is_data_qos(fc)) {
			qc = ieee80211_get_qos_ctl(hdr);
			tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
		}

		/* Check for RTS protection */
		if (priv->hw->wiphy->rts_threshold != (u32) -1)
			if (skb->len > priv->hw->wiphy->rts_threshold)
				flags |= ATH9K_HTC_TX_RTSCTS;

		/* CTS-to-self */
		if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
		    (vif && vif->bss_conf.use_cts_prot))
			flags |= ATH9K_HTC_TX_CTSONLY;

		tx_hdr.flags = cpu_to_be32(flags);
		tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
		if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
			tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
		else
			tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;

		tx_fhdr = skb_push(skb, sizeof(tx_hdr));
		memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));

		qnum = skb_get_queue_mapping(skb);

		switch (qnum) {
		case 0:
			TX_QSTAT_INC(WME_AC_VO);
			epid = priv->data_vo_ep;
			break;
		case 1:
			TX_QSTAT_INC(WME_AC_VI);
			epid = priv->data_vi_ep;
			break;
		case 2:
			TX_QSTAT_INC(WME_AC_BE);
			epid = priv->data_be_ep;
			break;
		case 3:
		default:
			TX_QSTAT_INC(WME_AC_BK);
			epid = priv->data_bk_ep;
			break;
		}
	} else {
		struct tx_mgmt_hdr mgmt_hdr;

		memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));

		tx_ctl.type = ATH9K_HTC_NORMAL;

		mgmt_hdr.node_idx = sta_idx;
		mgmt_hdr.vif_idx = vif_idx;
		mgmt_hdr.tidno = 0;
		mgmt_hdr.flags = 0;

		mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
		if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
			mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
		else
			mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;

		tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
		memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
		epid = priv->mgmt_ep;
	}

	return htc_send(priv->htc, skb, epid, &tx_ctl);
}
Esempio n. 27
0
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
				   int queue, struct ieee80211_sta *sta)
{
	struct iwl_mvm_sta *mvmsta;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
	struct iwl_mvm_key_pn *ptk_pn;
	u8 tid, keyidx;
	u8 pn[IEEE80211_CCMP_PN_LEN];
	u8 *extiv;

	/* do PN checking */

	/* multicast and non-data only arrives on default queue */
	if (!ieee80211_is_data(hdr->frame_control) ||
	    is_multicast_ether_addr(hdr->addr1))
		return 0;

	/* do not check PN for open AP */
	if (!(stats->flag & RX_FLAG_DECRYPTED))
		return 0;

	/*
	 * avoid checking for default queue - we don't want to replicate
	 * all the logic that's necessary for checking the PN on fragmented
	 * frames, leave that to mac80211
	 */
	if (queue == 0)
		return 0;

	/* if we are here - this for sure is either CCMP or GCMP */
	if (IS_ERR_OR_NULL(sta)) {
		IWL_ERR(mvm,
			"expected hw-decrypted unicast frame for station\n");
		return -1;
	}

	mvmsta = iwl_mvm_sta_from_mac80211(sta);

	extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
	keyidx = extiv[3] >> 6;

	ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
	if (!ptk_pn)
		return -1;

	if (ieee80211_is_data_qos(hdr->frame_control))
		tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
	else
		tid = 0;

	/* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
	if (tid >= IWL_MAX_TID_COUNT)
		return -1;

	/* load pn */
	pn[0] = extiv[7];
	pn[1] = extiv[6];
	pn[2] = extiv[5];
	pn[3] = extiv[4];
	pn[4] = extiv[1];
	pn[5] = extiv[0];

	if (memcmp(pn, ptk_pn->q[queue].pn[tid],
		   IEEE80211_CCMP_PN_LEN) <= 0)
		return -1;

	memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
	stats->flag |= RX_FLAG_PN_VALIDATED;

	return 0;
}
Esempio n. 28
0
void pcie_tx_xmit_ndp(struct ieee80211_hw *hw,
		      struct ieee80211_tx_control *control,
		      struct sk_buff *skb)
{
	struct mwl_priv *priv = hw->priv;
	struct pcie_priv *pcie_priv = priv->hif.priv;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_key_conf *k_conf;
	struct mwl_vif *mwl_vif;
	int index;
	struct ieee80211_sta *sta;
	struct mwl_sta *sta_info;
	struct ieee80211_hdr *wh;
	u8 *da;
	u16 qos;
	u8 tid = 0;
	struct mwl_ampdu_stream *stream = NULL;
	u16 tx_que_priority;
	bool mgmtframe = false;
	struct ieee80211_mgmt *mgmt;
	bool eapol_frame = false;
	bool start_ba_session = false;
	struct pcie_tx_ctrl_ndp *tx_ctrl;

	tx_info = IEEE80211_SKB_CB(skb);
	k_conf = tx_info->control.hw_key;
	mwl_vif = mwl_dev_get_vif(tx_info->control.vif);
	index = skb_get_queue_mapping(skb);
	sta = control->sta;
	sta_info = sta ? mwl_dev_get_sta(sta) : NULL;

	wh = (struct ieee80211_hdr *)skb->data;

	if (ieee80211_is_data_qos(wh->frame_control))
		qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh)));
	else
		qos = 0xFFFF;

	if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
		index = IEEE80211_AC_VO;
		eapol_frame = true;
	}

	if (ieee80211_is_mgmt(wh->frame_control)) {
		mgmtframe = true;
		mgmt = (struct ieee80211_mgmt *)skb->data;
	}

	if (mgmtframe) {
		u16 capab;

		if (unlikely(ieee80211_is_action(wh->frame_control) &&
			     mgmt->u.action.category == WLAN_CATEGORY_BACK &&
			     mgmt->u.action.u.addba_req.action_code ==
			     WLAN_ACTION_ADDBA_REQ)) {
			capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
			tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
			index = utils_tid_to_ac(tid);
		}

		if (unlikely(ieee80211_is_assoc_req(wh->frame_control)))
			utils_add_basic_rates(hw->conf.chandef.chan->band, skb);

		if (ieee80211_is_probe_req(wh->frame_control) ||
		    ieee80211_is_probe_resp(wh->frame_control))
			tx_que_priority = PROBE_RESPONSE_TXQNUM;
		else {
			if ((
			    (mwl_vif->macid == SYSADPT_NUM_OF_AP) &&
			    (!ieee80211_has_protected(wh->frame_control) ||
			    (ieee80211_has_protected(wh->frame_control) &&
			    ieee80211_is_auth(wh->frame_control)))
			    ) ||
			    !sta ||
			    ieee80211_is_auth(wh->frame_control) ||
			    ieee80211_is_assoc_req(wh->frame_control) ||
			    ieee80211_is_assoc_resp(wh->frame_control))
				tx_que_priority = MGMT_TXQNUM;
			else {
				if (is_multicast_ether_addr(wh->addr1) &&
				    (mwl_vif->macid != SYSADPT_NUM_OF_AP))
					tx_que_priority = mwl_vif->macid *
						SYSADPT_MAX_TID;
				else
					tx_que_priority = SYSADPT_MAX_TID *
						(sta_info->stnid +
						QUEUE_STAOFFSET) + 6;
			}
		}

		if (ieee80211_is_assoc_resp(wh->frame_control) ||
		    ieee80211_is_reassoc_resp(wh->frame_control)) {
			struct sk_buff *ack_skb;
			struct ieee80211_tx_info *ack_info;

			ack_skb = skb_copy(skb, GFP_ATOMIC);
			ack_info = IEEE80211_SKB_CB(ack_skb);
			pcie_tx_prepare_info(priv, 0, ack_info);
			ieee80211_tx_status(hw, ack_skb);
		}

		pcie_tx_encapsulate_frame(priv, skb, k_conf, NULL);
	} else {
Esempio n. 29
0
static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl8192_tx_ring *ring;
	struct rtl_tx_desc *pdesc;
	u8 idx;
	unsigned int queue_index, hw_queue;
	unsigned long flags;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
	__le16 fc = hdr->frame_control;
	u8 *pda_addr = hdr->addr1;
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	/*ssn */
	u8 *qc = NULL;
	u8 tid = 0;
	u16 seq_number = 0;
	u8 own;
	u8 temp_one = 1;

	if (ieee80211_is_mgmt(fc))
		rtl_tx_mgmt_proc(hw, skb);
	rtl_action_proc(hw, skb, true);

	queue_index = skb_get_queue_mapping(skb);
	hw_queue = _rtl_mac_to_hwqueue(fc, queue_index);

	if (is_multicast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesmulticast += skb->len;
	else if (is_broadcast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesbroadcast += skb->len;
	else
		rtlpriv->stats.txbytesunicast += skb->len;

	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);

	ring = &rtlpci->tx_ring[hw_queue];
	if (hw_queue != BEACON_QUEUE)
		idx = (ring->idx + skb_queue_len(&ring->queue)) %
				ring->entries;
	else
		idx = 0;

	pdesc = &ring->desc[idx];
	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
			true, HW_DESC_OWN);

	if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
			 ("No more TX desc@%d, ring->idx = %d,"
			  "idx = %d, skb_queue_len = 0x%d\n",
			  hw_queue, ring->idx, idx,
			  skb_queue_len(&ring->queue)));

		spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
		return skb->len;
	}

	/*
	 *if(ieee80211_is_nullfunc(fc)) {
	 *      spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
	 *      return 1;
	 *}
	 */

	if (ieee80211_is_data_qos(fc)) {
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;

		seq_number = mac->tids[tid].seq_number;
		seq_number &= IEEE80211_SCTL_SEQ;
		/*
		 *hdr->seq_ctrl = hdr->seq_ctrl &
		 *cpu_to_le16(IEEE80211_SCTL_FRAG);
		 *hdr->seq_ctrl |= cpu_to_le16(seq_number);
		 */

		seq_number += 1;
	}

	if (ieee80211_is_data(fc))
		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);

	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
					info, skb, hw_queue);

	__skb_queue_tail(&ring->queue, skb);

	rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true,
				    HW_DESC_OWN, (u8 *)&temp_one);

	if (!ieee80211_has_morefrags(hdr->frame_control)) {
		if (qc)
			mac->tids[tid].seq_number = seq_number;
	}

	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
	    hw_queue != BEACON_QUEUE) {

		RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
			 ("less desc left, stop skb_queue@%d, "
			  "ring->idx = %d,"
			  "idx = %d, skb_queue_len = 0x%d\n",
			  hw_queue, ring->idx, idx,
			  skb_queue_len(&ring->queue)));

		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
	}

	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);

	rtlpriv->cfg->ops->tx_polling(hw, hw_queue);

	return 0;
}
Esempio n. 30
0
void ath9k_tx_tasklet(unsigned long data)
{
	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
	struct ieee80211_sta *sta;
	struct ieee80211_hdr *hdr;
	struct ieee80211_tx_info *tx_info;
	struct sk_buff *skb = NULL;
	__le16 fc;

	while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {

		hdr = (struct ieee80211_hdr *) skb->data;
		fc = hdr->frame_control;
		tx_info = IEEE80211_SKB_CB(skb);

		memset(&tx_info->status, 0, sizeof(tx_info->status));

		rcu_read_lock();

		sta = ieee80211_find_sta(priv->vif, hdr->addr1);
		if (!sta) {
			rcu_read_unlock();
			ieee80211_tx_status(priv->hw, skb);
			continue;
		}

		/* Check if we need to start aggregation */

		if (sta && conf_is_ht(&priv->hw->conf) &&
		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
			if (ieee80211_is_data_qos(fc)) {
				u8 *qc, tid;
				struct ath9k_htc_sta *ista;

				qc = ieee80211_get_qos_ctl(hdr);
				tid = qc[0] & 0xf;
				ista = (struct ath9k_htc_sta *)sta->drv_priv;

				if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
					ieee80211_start_tx_ba_session(sta, tid);
					spin_lock_bh(&priv->tx_lock);
					ista->tid_state[tid] = AGGR_PROGRESS;
					spin_unlock_bh(&priv->tx_lock);
				}
			}
		}

		rcu_read_unlock();

		/* Send status to mac80211 */
		ieee80211_tx_status(priv->hw, skb);
	}

	/* Wake TX queues if needed */
	spin_lock_bh(&priv->tx_lock);
	if (priv->tx_queues_stop) {
		priv->tx_queues_stop = false;
		spin_unlock_bh(&priv->tx_lock);
		ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
			  "Waking up TX queues\n");
		ieee80211_wake_queues(priv->hw);
		return;
	}
	spin_unlock_bh(&priv->tx_lock);
}