Exemplo n.º 1
0
void wcn36xx_fill_tx_bd(struct wcn36xx *wcn, struct wcn36xx_tx_bd *bd,
			u8 broadcast, u8 encrypt, struct ieee80211_hdr *hdr,
			bool tx_compl)
{
	bd->dpu_rf = WCN36XX_BMU_WQ_TX;
	bd->pdu.tid   = WCN36XX_TID;
	bd->pdu.reserved3 = 0xd;

	if (broadcast) {
		/* broadcast */
		bd->ub = 1;
		bd->queue_id = WCN36XX_TX_B_WQ_ID;

		/* default rate for broadcast */
		if (ieee80211_is_mgmt(hdr->frame_control))
			bd->bd_rate = (wcn->band == IEEE80211_BAND_5GHZ) ?
				WCN36XX_BD_RATE_CTRL :
				WCN36XX_BD_RATE_MGMT;
		/* No ack needed not unicast */
		bd->ack_policy = 1;
	} else {
		bd->queue_id = WCN36XX_TX_U_WQ_ID;
		/* default rate for unicast */
		bd->ack_policy = 0;
		if (ieee80211_is_data(hdr->frame_control))
			bd->bd_rate = WCN36XX_BD_RATE_DATA;
		else if (ieee80211_is_mgmt(hdr->frame_control))
			bd->bd_rate = (wcn->band == IEEE80211_BAND_5GHZ) ?
				WCN36XX_BD_RATE_CTRL :
				WCN36XX_BD_RATE_MGMT;
		else if (ieee80211_is_ctl(hdr->frame_control))
			bd->bd_rate = WCN36XX_BD_RATE_CTRL;
		else
			wcn36xx_warn("frame control type unknown");
	}

	if (ieee80211_is_data(hdr->frame_control)) {
		bd->dpu_sign = wcn->current_vif->ucast_dpu_signature;
		bd->queue_id = 0;
		bd->sta_index = wcn->current_vif->sta_index;
		bd->dpu_desc_idx = wcn->current_vif->dpu_desc_index;
	} else {
		bd->sta_index = wcn->current_vif->self_sta_index;
		bd->dpu_desc_idx = wcn->current_vif->self_dpu_desc_index;
	}

	bd->dpu_ne = encrypt;
	bd->tx_comp = tx_compl;

	buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
	bd->tx_bd_sign = 0xbdbdbdbd;
}
Exemplo n.º 2
0
/*
 * handle build REPLY_TX command notification.
 */
static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
				      struct sk_buff *skb,
				      struct iwl_tx_cmd *tx_cmd,
				      struct ieee80211_tx_info *info,
				      struct ieee80211_hdr *hdr, u8 sta_id)
{
	__le16 fc = hdr->frame_control;
	__le32 tx_flags = tx_cmd->tx_flags;

	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK_MSK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK_MSK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF_MSK;
	else if (ieee80211_is_back_req(fc))
		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
	else if (info->band == IEEE80211_BAND_2GHZ &&
		 cfg(priv)->bt_params &&
		 cfg(priv)->bt_params->advanced_bt_coexist &&
		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
		 ieee80211_is_reassoc_req(fc) ||
		 skb->protocol == cpu_to_be16(ETH_P_PAE)))
		tx_flags |= TX_CMD_FLG_IGNORE_BT;


	tx_cmd->sta_id = sta_id;
	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
	} else {
		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
		else
			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
	}

	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);

	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
	if (ieee80211_is_mgmt(fc)) {
		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
		else
			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
	} else {
		tx_cmd->timeout.pm_frame_timeout = 0;
	}

	tx_cmd->driver_txop = 0;
	tx_cmd->tx_flags = tx_flags;
	tx_cmd->next_frame_len = 0;
}
Exemplo n.º 3
0
static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
{
	__le16 fc = rtl_get_fc(skb);

	if (unlikely(ieee80211_is_beacon(fc)))
		return QSLT_BEACON;
	if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
		return QSLT_MGNT;

	return skb->priority;
}
Exemplo n.º 4
0
static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);

	if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
		return HTT_DATA_TX_EXT_TID_MGMT;
	else if (cb->flags & ATH10K_SKB_F_QOS)
		return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
	else
		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}
Exemplo n.º 5
0
ieee80211_rx_result
ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
	int hdrlen;
	struct ieee80211_key *key = rx->key;
	struct sk_buff *skb = rx->skb;
	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
	u8 pn[CCMP_PN_LEN];
	int data_len;
	int queue;

	hdrlen = ieee80211_hdrlen(hdr->frame_control);

	if (!ieee80211_is_data(hdr->frame_control) &&
	    !ieee80211_is_robust_mgmt_frame(hdr))
		return RX_CONTINUE;

	data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN;
	if (!rx->sta || data_len < 0)
		return RX_DROP_UNUSABLE;

	ccmp_hdr2pn(pn, skb->data + hdrlen);

	queue = ieee80211_is_mgmt(hdr->frame_control) ?
		NUM_RX_DATA_QUEUES : rx->queue;

	if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
		key->u.ccmp.replays++;
		return RX_DROP_UNUSABLE;
	}

	if (!(status->flag & RX_FLAG_DECRYPTED)) {
		/* hardware didn't decrypt/verify MIC */
		ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1);

		if (ieee80211_aes_ccm_decrypt(
			    key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf,
			    skb->data + hdrlen + CCMP_HDR_LEN, data_len,
			    skb->data + skb->len - CCMP_MIC_LEN,
			    skb->data + hdrlen + CCMP_HDR_LEN))
			return RX_DROP_UNUSABLE;
	}

	memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);

	/* Remove CCMP header and MIC */
	skb_trim(skb, skb->len - CCMP_MIC_LEN);
	memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen);
	skb_pull(skb, CCMP_HDR_LEN);

	return RX_CONTINUE;
}
Exemplo n.º 6
0
ieee80211_rx_result
ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
{
	struct sk_buff *skb = rx->skb;
	struct ieee80211_key *key = rx->key;
	struct ieee80211_mmie *mmie;
	u8 aad[20], mic[8], ipn[6];
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;

	if (!ieee80211_is_mgmt(hdr->frame_control))
		return RX_CONTINUE;

	if ((rx->status->flag & RX_FLAG_DECRYPTED) &&
	    (rx->status->flag & RX_FLAG_IV_STRIPPED))
		return RX_CONTINUE;

	if (skb->len < 24 + sizeof(*mmie))
		return RX_DROP_UNUSABLE;

	mmie = (struct ieee80211_mmie *)
		(skb->data + skb->len - sizeof(*mmie));
	if (mmie->element_id != WLAN_EID_MMIE ||
	    mmie->length != sizeof(*mmie) - 2)
		return RX_DROP_UNUSABLE; /* Invalid MMIE */

	bip_ipn_swap(ipn, mmie->sequence_number);

	if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) {
		key->u.aes_cmac.replays++;
		return RX_DROP_UNUSABLE;
	}

	if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
		/* hardware didn't decrypt/verify MIC */
		bip_aad(skb, aad);
		ieee80211_aes_cmac(key->u.aes_cmac.tfm,
				   key->u.aes_cmac.rx_crypto_buf, aad,
				   skb->data + 24, skb->len - 24, mic);
		if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
			key->u.aes_cmac.icverrors++;
			return RX_DROP_UNUSABLE;
		}
	}

	memcpy(key->u.aes_cmac.rx_pn, ipn, 6);

	/* Remove MMIE */
	skb_trim(skb, skb->len - sizeof(*mmie));

	return RX_CONTINUE;
}
Exemplo n.º 7
0
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
			enum nl80211_iftype type)
{
	__le16 fc = hdr->frame_control;

	 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
	if (len < 16)
		return NULL;

	if (ieee80211_is_data(fc)) {
		if (len < 24) /* drop incorrect hdr len (data) */
			return NULL;

		if (ieee80211_has_a4(fc))
			return NULL;
		if (ieee80211_has_tods(fc))
			return hdr->addr1;
		if (ieee80211_has_fromds(fc))
			return hdr->addr2;

		return hdr->addr3;
	}

	if (ieee80211_is_mgmt(fc)) {
		if (len < 24) /* drop incorrect hdr len (mgmt) */
			return NULL;
		return hdr->addr3;
	}

	if (ieee80211_is_ctl(fc)) {
		if(ieee80211_is_pspoll(fc))
			return hdr->addr1;

		if (ieee80211_is_back_req(fc)) {
			switch (type) {
			case NL80211_IFTYPE_STATION:
				return hdr->addr2;
			case NL80211_IFTYPE_AP:
			case NL80211_IFTYPE_AP_VLAN:
				return hdr->addr1;
			default:
				break; /* fall through to the return */
			}
		}
	}

	return NULL;
}
Exemplo n.º 8
0
static enum r92su_rx_control_t
r92su_rx_handle_mgmt(struct r92su *r92su, struct sk_buff *skb,
		     struct r92su_bss_priv *bss_priv)
{
	struct ieee80211_hdr *i3e = (void *)skb->data;

	if (ieee80211_is_mgmt(i3e->frame_control)) {
#if 0
		cfg80211_rx_mgmt(&r92su->wdev,
				 r92su->current_channel->center_freq, 0,
				 skb->data, skb->len, 0, GFP_ATOMIC);
#endif
		dev_kfree_skb_any(skb);
		return RX_QUEUE;
	}
	return RX_CONTINUE;
}
Exemplo n.º 9
0
static u8 _rtl92se_map_hwqueue_to_fwqueue( struct sk_buff *skb,	u8 skb_queue )
{
	__le16 fc = rtl_get_fc( skb );

	if ( unlikely( ieee80211_is_beacon( fc ) ) )
		return QSLT_BEACON;
	if ( ieee80211_is_mgmt( fc ) || ieee80211_is_ctl( fc ) )
		return QSLT_MGNT;
	if ( ieee80211_is_nullfunc( fc ) )
		return QSLT_HIGH;

	/* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
	 * queue V0 at priority 7; however, the RTL8192SE appears to have
	 * that queue at priority 6
	 */
	if ( skb->priority == 7 )
		return QSLT_VO;
	return skb->priority;
}
Exemplo n.º 10
0
static unsigned int _rtl_mac_to_hwqueue(__le16 fc,
		unsigned int mac80211_queue_index)
{
	unsigned int hw_queue_index;

	if (unlikely(ieee80211_is_beacon(fc))) {
		hw_queue_index = BEACON_QUEUE;
		goto out;
	}

	if (ieee80211_is_mgmt(fc)) {
		hw_queue_index = MGNT_QUEUE;
		goto out;
	}

	switch (mac80211_queue_index) {
	case 0:
		hw_queue_index = VO_QUEUE;
		break;
	case 1:
		hw_queue_index = VI_QUEUE;
		break;
	case 2:
		hw_queue_index = BE_QUEUE;;
		break;
	case 3:
		hw_queue_index = BK_QUEUE;
		break;
	default:
		hw_queue_index = BE_QUEUE;
		RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
				  mac80211_queue_index));
		break;
	}

out:
	return hw_queue_index;
}
Exemplo n.º 11
0
Arquivo: coex.c Projeto: Lyude/linux
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
			   struct ieee80211_tx_info *info, u8 ac)
{
	__le16 fc = hdr->frame_control;
	bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);

	if (info->band != NL80211_BAND_2GHZ)
		return 0;

	if (unlikely(mvm->bt_tx_prio))
		return mvm->bt_tx_prio - 1;

	if (likely(ieee80211_is_data(fc))) {
		if (likely(ieee80211_is_data_qos(fc))) {
			switch (ac) {
			case IEEE80211_AC_BE:
				return mplut_enabled ? 1 : 0;
			case IEEE80211_AC_VI:
				return mplut_enabled ? 2 : 3;
			case IEEE80211_AC_VO:
				return 3;
			default:
				return 0;
			}
		} else if (is_multicast_ether_addr(hdr->addr1)) {
			return 3;
		} else
			return 0;
	} else if (ieee80211_is_mgmt(fc)) {
		return ieee80211_is_disassoc(fc) ? 0 : 3;
	} else if (ieee80211_is_ctl(fc)) {
		/* ignore cfend and cfendack frames as we never send those */
		return 3;
	}

	return 0;
}
Exemplo n.º 12
0
int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
		  struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct ath10k_htt_txbuf *txbuf;
	struct htt_data_tx_desc_frag *frags;
	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
	u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	u16 freq = 0;
	u32 frags_paddr = 0;
	u32 txbuf_paddr;
	struct htt_msdu_ext_desc *ext_desc = NULL;
	bool limit_mgmt_desc = false;
	bool is_probe_resp = false;

	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
	    ar->hw_params.max_probe_resp_desc_thres) {
		limit_mgmt_desc = true;

		if (ieee80211_is_probe_resp(hdr->frame_control))
			is_probe_resp = true;
	}

	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
	spin_unlock_bh(&htt->tx_lock);
	if (res < 0)
		goto err_tx_dec;

	msdu_id = res;

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	txbuf = &htt->txbuf.vaddr[msdu_id];
	txbuf_paddr = htt->txbuf.paddr +
		      (sizeof(struct ath10k_htt_txbuf) * msdu_id);

	if ((ieee80211_is_action(hdr->frame_control) ||
	     ieee80211_is_deauth(hdr->frame_control) ||
	     ieee80211_is_disassoc(hdr->frame_control)) &&
	     ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
		   txmode == ATH10K_HW_TXRX_RAW &&
		   ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	}

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res) {
		res = -EIO;
		goto err_free_msdu_id;
	}

	if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
		freq = ar->scan.roc_freq;

	switch (txmode) {
	case ATH10K_HW_TXRX_RAW:
	case ATH10K_HW_TXRX_NATIVE_WIFI:
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
		/* pass through */
	case ATH10K_HW_TXRX_ETHERNET:
		if (ar->hw_params.continuous_frag_desc) {
			memset(&htt->frag_desc.vaddr[msdu_id], 0,
			       sizeof(struct htt_msdu_ext_desc));
			frags = (struct htt_data_tx_desc_frag *)
				&htt->frag_desc.vaddr[msdu_id].frags;
			ext_desc = &htt->frag_desc.vaddr[msdu_id];
			frags[0].tword_addr.paddr_lo =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].tword_addr.paddr_hi = 0;
			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);

			frags_paddr =  htt->frag_desc.paddr +
				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
		} else {
			frags = txbuf->frags;
			frags[0].dword_addr.paddr =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
			frags[1].dword_addr.paddr = 0;
			frags[1].dword_addr.len = 0;

			frags_paddr = txbuf_paddr;
		}
		flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		break;
	case ATH10K_HW_TXRX_MGMT:
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

		frags_paddr = skb_cb->paddr;
		break;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	txbuf->htc_hdr.eid = htt->eid;
	txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
					   sizeof(txbuf->cmd_tx) +
					   prefetch_len);
	txbuf->htc_hdr.flags = 0;

	if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
		if (ar->hw_params.continuous_frag_desc)
			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
	}

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	txbuf->cmd_tx.flags0 = flags0;
	txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	if (ath10k_mac_tx_frm_has_freq(ar)) {
		txbuf->cmd_tx.offchan_tx.peerid =
				__cpu_to_le16(HTT_INVALID_PEERID);
		txbuf->cmd_tx.offchan_tx.freq =
				__cpu_to_le16(freq);
	} else {
		txbuf->cmd_tx.peerid =
				__cpu_to_le32(HTT_INVALID_PEERID);
	}

	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
		   (u32)skb_cb->paddr, vdev_id, tid, freq);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			msdu->data, msdu->len);
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);

	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &txbuf->htc_hdr;
	sg_items[0].paddr = txbuf_paddr +
			    sizeof(txbuf->frags);
	sg_items[0].len = sizeof(txbuf->htc_hdr) +
			  sizeof(txbuf->cmd_hdr) +
			  sizeof(txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
	return res;
}
Exemplo n.º 13
0
static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_key *key = tx->key;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	int hdrlen, len, tail;
	u8 *pos, *pn;
	int i;
	bool skip_hw;

	skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT) &&
		ieee80211_is_mgmt(hdr->frame_control);

	if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
	    !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
	    !skip_hw) {
		/* hwaccel - with no need for preallocated room for CCMP
		 * header or MIC fields */
		info->control.hw_key = &tx->key->conf;
		return 0;
	}

	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	len = skb->len - hdrlen;

	if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
		tail = 0;
	else
		tail = CCMP_MIC_LEN;

	if (WARN_ON(skb_tailroom(skb) < tail ||
		    skb_headroom(skb) < CCMP_HDR_LEN))
		return -1;

	pos = skb_push(skb, CCMP_HDR_LEN);
	memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
	hdr = (struct ieee80211_hdr *) pos;
	pos += hdrlen;

	/* PN = PN + 1 */
	pn = key->u.ccmp.tx_pn;

	for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
		pn[i]++;
		if (pn[i])
			break;
	}

	ccmp_pn2hdr(pos, pn, key->conf.keyidx);

	if ((key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && !skip_hw) {
		/* hwaccel - with preallocated room for CCMP header */
		info->control.hw_key = &tx->key->conf;
		return 0;
	}

	pos += CCMP_HDR_LEN;
	ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
	ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len,
				  pos, skb_put(skb, CCMP_MIC_LEN));

	return 0;
}
Exemplo n.º 14
0
void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
				   struct txentry_desc *txdesc,
				   const struct rt2x00_rate *hwrate)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;

	if (tx_info->control.sta)
		txdesc->mpdu_density =
		    tx_info->control.sta->ht_cap.ampdu_density;
	else
		txdesc->mpdu_density = 0;

	txdesc->ba_size = 7;

	txdesc->stbc =
	    (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;

	/*
	 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
	 * mcs rate to be used
	 */
	if (txrate->flags & IEEE80211_TX_RC_MCS) {
		txdesc->mcs = txrate->idx;
	} else {
		txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
			txdesc->mcs |= 0x08;
	}


	/*
	 * Convert flags
	 */
	if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
		__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);

	/*
	 * Determine HT Mix/Greenfield rate mode
	 */
	if (txrate->flags & IEEE80211_TX_RC_MCS)
		txdesc->rate_mode = RATE_MODE_HT_MIX;
	if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
		txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
	if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
		__set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
	if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
		__set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);

	/*
	 * Determine IFS values
	 * - Use TXOP_BACKOFF for management frames
	 * - Use TXOP_SIFS for fragment bursts
	 * - Use TXOP_HTTXOP for everything else
	 *
	 * Note: rt2800 devices won't use CTS protection (if used)
	 * for frames not transmitted with TXOP_HTTXOP
	 */
	if (ieee80211_is_mgmt(hdr->frame_control))
		txdesc->txop = TXOP_BACKOFF;
	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
		txdesc->txop = TXOP_SIFS;
	else
		txdesc->txop = TXOP_HTTXOP;
}
Exemplo n.º 15
0
static inline void mwl_tx_skb(struct mwl_priv *priv, int desc_num,
			      struct sk_buff *tx_skb)
{
	struct ieee80211_tx_info *tx_info;
	struct mwl_tx_ctrl *tx_ctrl;
	struct mwl_tx_hndl *tx_hndl;
	struct mwl_tx_desc *tx_desc;
	struct ieee80211_sta *sta;
	struct ieee80211_vif *vif;
	struct mwl_vif *mwl_vif;
	struct ieee80211_key_conf *k_conf;
	bool ccmp = false;
	struct mwl_dma_data *dma_data;
	struct ieee80211_hdr *wh;
	dma_addr_t dma;

	if (WARN_ON(!tx_skb))
		return;

	tx_info = IEEE80211_SKB_CB(tx_skb);
	tx_ctrl = (struct mwl_tx_ctrl *)&tx_info->status;
	sta = (struct ieee80211_sta *)tx_ctrl->sta;
	vif = (struct ieee80211_vif *)tx_ctrl->vif;
	mwl_vif = mwl_dev_get_vif(vif);
	k_conf = (struct ieee80211_key_conf *)tx_ctrl->k_conf;

	mwl_tx_encapsulate_frame(priv, tx_skb, k_conf, &ccmp);

	dma_data = (struct mwl_dma_data *)tx_skb->data;
	wh = &dma_data->wh;

	if (ieee80211_is_data(wh->frame_control) ||
	    (ieee80211_is_mgmt(wh->frame_control) &&
	    ieee80211_has_protected(wh->frame_control) &&
	    !is_multicast_ether_addr(wh->addr1))) {
		if (is_multicast_ether_addr(wh->addr1)) {
			if (ccmp) {
				mwl_tx_insert_ccmp_hdr(dma_data->data,
						       mwl_vif->keyidx,
						       mwl_vif->iv16,
						       mwl_vif->iv32);
				INCREASE_IV(mwl_vif->iv16, mwl_vif->iv32);
			}
		} else {
			if (ccmp) {
				if (vif->type == NL80211_IFTYPE_STATION) {
					mwl_tx_insert_ccmp_hdr(dma_data->data,
							       mwl_vif->keyidx,
							       mwl_vif->iv16,
							       mwl_vif->iv32);
					INCREASE_IV(mwl_vif->iv16,
						    mwl_vif->iv32);
				} else {
					struct mwl_sta *sta_info;

					sta_info = mwl_dev_get_sta(sta);

					mwl_tx_insert_ccmp_hdr(dma_data->data,
							       0,
							       sta_info->iv16,
							       sta_info->iv32);
					INCREASE_IV(sta_info->iv16,
						    sta_info->iv32);
				}
			}
		}
	}

	tx_hndl = priv->desc_data[desc_num].pnext_tx_hndl;
	tx_hndl->psk_buff = tx_skb;
	tx_desc = tx_hndl->pdesc;
	tx_desc->tx_priority = tx_ctrl->tx_priority;
	tx_desc->qos_ctrl = cpu_to_le16(tx_ctrl->qos_ctrl);
	tx_desc->pkt_len = cpu_to_le16(tx_skb->len);
	tx_desc->packet_info = 0;
	tx_desc->data_rate = 0;
	tx_desc->type = tx_ctrl->type;
	tx_desc->xmit_control = tx_ctrl->xmit_control;
	tx_desc->sap_pkt_info = 0;
	dma = pci_map_single(priv->pdev, tx_skb->data,
			     tx_skb->len, PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(priv->pdev, dma)) {
		dev_kfree_skb_any(tx_skb);
		wiphy_err(priv->hw->wiphy,
			  "failed to map pci memory!\n");
		return;
	}
	tx_desc->pkt_ptr = cpu_to_le32(dma);
	tx_desc->status = cpu_to_le32(EAGLE_TXD_STATUS_FW_OWNED);
	/* make sure all the memory transactions done by cpu were completed */
	wmb();	/*Data Memory Barrier*/
	writel(MACREG_H2ARIC_BIT_PPA_READY,
	       priv->iobase1 + MACREG_REG_H2A_INTERRUPT_EVENTS);
	priv->desc_data[desc_num].pnext_tx_hndl = tx_hndl->pnext;
	priv->fw_desc_cnt[desc_num]++;
}
Exemplo n.º 16
0
static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
				int encrypted)
{
	__le16 mask_fc;
	int a4_included, mgmt;
	u8 qos_tid;
	u8 *b_0, *aad;
	u16 data_len, len_a;
	unsigned int hdrlen;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;

	b_0 = scratch + 3 * AES_BLOCK_LEN;
	aad = scratch + 4 * AES_BLOCK_LEN;

	/*
	 * Mask FC: zero subtype b4 b5 b6 (if not mgmt)
	 * Retry, PwrMgt, MoreData; set Protected
	 */
	mgmt = ieee80211_is_mgmt(hdr->frame_control);
	mask_fc = hdr->frame_control;
	mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
				IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
	if (!mgmt)
		mask_fc &= ~cpu_to_le16(0x0070);
	mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);

	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	len_a = hdrlen - 2;
	a4_included = ieee80211_has_a4(hdr->frame_control);

	if (ieee80211_is_data_qos(hdr->frame_control))
		qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
	else
		qos_tid = 0;

	data_len = skb->len - hdrlen - CCMP_HDR_LEN;
	if (encrypted)
		data_len -= CCMP_MIC_LEN;

	/* First block, b_0 */
	b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */
	/* Nonce: Nonce Flags | A2 | PN
	 * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
	 */
	b_0[1] = qos_tid | (mgmt << 4);
	memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
	memcpy(&b_0[8], pn, CCMP_PN_LEN);
	/* l(m) */
	put_unaligned_be16(data_len, &b_0[14]);

	/* AAD (extra authenticate-only data) / masked 802.11 header
	 * FC | A1 | A2 | A3 | SC | [A4] | [QC] */
	put_unaligned_be16(len_a, &aad[0]);
	put_unaligned(mask_fc, (__le16 *)&aad[2]);
	memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);

	/* Mask Seq#, leave Frag# */
	aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
	aad[23] = 0;

	if (a4_included) {
		memcpy(&aad[24], hdr->addr4, ETH_ALEN);
		aad[30] = qos_tid;
		aad[31] = 0;
	} else {
		memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
		aad[24] = qos_tid;
	}
}
Exemplo n.º 17
0
/**
 * rsi_core_xmit() - This function transmits the packets received from mac80211
 * @common: Pointer to the driver private structure.
 * @skb: Pointer to the socket buffer structure.
 *
 * Return: None.
 */
void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
{
	struct rsi_hw *adapter = common->priv;
	struct ieee80211_tx_info *info;
	struct skb_info *tx_params;
	struct ieee80211_hdr *wh = NULL;
	struct ieee80211_vif *vif;
	u8 q_num, tid = 0;
	struct rsi_sta *rsta = NULL;

	if ((!skb) || (!skb->len)) {
		rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
			__func__);
		goto xmit_fail;
	}
	if (common->fsm_state != FSM_MAC_INIT_DONE) {
		rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
		goto xmit_fail;
	}
	if (common->wow_flags & RSI_WOW_ENABLED) {
		rsi_dbg(ERR_ZONE,
			"%s: Blocking Tx_packets when WOWLAN is enabled\n",
			__func__);
		goto xmit_fail;
	}

	info = IEEE80211_SKB_CB(skb);
	tx_params = (struct skb_info *)info->driver_data;
	wh = (struct ieee80211_hdr *)&skb->data[0];
	tx_params->sta_id = 0;

	vif = rsi_get_vif(adapter, wh->addr2);
	if (!vif)
		goto xmit_fail;
	tx_params->vif = vif;
	tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
	if ((ieee80211_is_mgmt(wh->frame_control)) ||
	    (ieee80211_is_ctl(wh->frame_control)) ||
	    (ieee80211_is_qos_nullfunc(wh->frame_control))) {
		if (ieee80211_is_assoc_req(wh->frame_control) ||
		    ieee80211_is_reassoc_req(wh->frame_control)) {
			struct ieee80211_bss_conf *bss = &vif->bss_conf;

			common->eapol4_confirm = false;
			rsi_hal_send_sta_notify_frame(common,
						      RSI_IFTYPE_STATION,
						      STA_CONNECTED, bss->bssid,
						      bss->qos, bss->aid, 0,
						      vif);
		}

		q_num = MGMT_SOFT_Q;
		skb->priority = q_num;

		if (rsi_prepare_mgmt_desc(common, skb)) {
			rsi_dbg(ERR_ZONE, "Failed to prepare desc\n");
			goto xmit_fail;
		}
	} else {
		if (ieee80211_is_data_qos(wh->frame_control)) {
			u8 *qos = ieee80211_get_qos_ctl(wh);

			tid = *qos & IEEE80211_QOS_CTL_TID_MASK;
			skb->priority = TID_TO_WME_AC(tid);
		} else {
			tid = IEEE80211_NONQOS_TID;
			skb->priority = BE_Q;
		}

		q_num = skb->priority;
		tx_params->tid = tid;

		if (((vif->type == NL80211_IFTYPE_AP) ||
		     (vif->type == NL80211_IFTYPE_P2P_GO)) &&
		    (!is_broadcast_ether_addr(wh->addr1)) &&
		    (!is_multicast_ether_addr(wh->addr1))) {
			rsta = rsi_find_sta(common, wh->addr1);
			if (!rsta)
				goto xmit_fail;
			tx_params->sta_id = rsta->sta_id;
		} else {
			tx_params->sta_id = 0;
		}

		if (rsta) {
			/* Start aggregation if not done for this tid */
			if (!rsta->start_tx_aggr[tid]) {
				rsta->start_tx_aggr[tid] = true;
				ieee80211_start_tx_ba_session(rsta->sta,
							      tid, 0);
			}
		}
		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
			q_num = MGMT_SOFT_Q;
			skb->priority = q_num;
		}
		if (rsi_prepare_data_desc(common, skb)) {
			rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n");
			goto xmit_fail;
		}
	}

	if ((q_num < MGMT_SOFT_Q) &&
	    ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
	     DATA_QUEUE_WATER_MARK)) {
		rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
		if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
			ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
		rsi_set_event(&common->tx_thread.event);
		goto xmit_fail;
	}

	rsi_core_queue_pkt(common, skb);
	rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__);
	rsi_set_event(&common->tx_thread.event);

	return;

xmit_fail:
	rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
	/* Dropping pkt here */
	ieee80211_free_txskb(common->priv->hw, skb);
}
Exemplo n.º 18
0
/*
 * il_update_stats function record all the MGMT, CTRL and DATA pkt for
 * both TX and Rx . Use debugfs to display the rx/rx_stats
 */
void
il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
{
	struct traffic_stats *stats;

	if (is_tx)
		stats = &il->tx_stats;
	else
		stats = &il->rx_stats;

	if (ieee80211_is_mgmt(fc)) {
		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
			stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
			stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
			stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
			stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
			stats->mgmt[MANAGEMENT_PROBE_REQ]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
			stats->mgmt[MANAGEMENT_PROBE_RESP]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_BEACON):
			stats->mgmt[MANAGEMENT_BEACON]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_ATIM):
			stats->mgmt[MANAGEMENT_ATIM]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
			stats->mgmt[MANAGEMENT_DISASSOC]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_AUTH):
			stats->mgmt[MANAGEMENT_AUTH]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
			stats->mgmt[MANAGEMENT_DEAUTH]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_ACTION):
			stats->mgmt[MANAGEMENT_ACTION]++;
			break;
		}
	} else if (ieee80211_is_ctl(fc)) {
		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
		case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
			stats->ctrl[CONTROL_BACK_REQ]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_BACK):
			stats->ctrl[CONTROL_BACK]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
			stats->ctrl[CONTROL_PSPOLL]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_RTS):
			stats->ctrl[CONTROL_RTS]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_CTS):
			stats->ctrl[CONTROL_CTS]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_ACK):
			stats->ctrl[CONTROL_ACK]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_CFEND):
			stats->ctrl[CONTROL_CFEND]++;
			break;
		case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
			stats->ctrl[CONTROL_CFENDACK]++;
			break;
		}
	} else {
		/* data */
		stats->data_cnt++;
		stats->data_bytes += len;
	}
}
Exemplo n.º 19
0
/*
 * Sets most of the Tx cmd's fields
 */
static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
			       struct iwl_tx_cmd *tx_cmd,
			       struct ieee80211_tx_info *info, u8 sta_id)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	__le16 fc = hdr->frame_control;
	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
	u32 len = skb->len + FCS_LEN;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF;
	else if (ieee80211_is_back_req(fc))
		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;

	/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
	if (info->band == IEEE80211_BAND_2GHZ &&
	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
	     is_multicast_ether_addr(hdr->addr1) ||
	     ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
		tx_flags |= TX_CMD_FLG_BT_DIS;

	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	} else {
		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
			tx_flags |= TX_CMD_FLG_SEQ_CTL;
		else
			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	}

	if (ieee80211_is_mgmt(fc)) {
		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
		else
			tx_cmd->pm_frame_timeout = cpu_to_le16(2);

		/* The spec allows Action frames in A-MPDU, we don't support
		 * it
		 */
		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
	} else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
	} else {
		tx_cmd->pm_frame_timeout = 0;
	}

	if (info->flags & IEEE80211_TX_CTL_AMPDU)
		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;

	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
	    !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;

	tx_cmd->driver_txop = 0;
	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
	/* Total # bytes to be transmitted */
	tx_cmd->len = cpu_to_le16((u16)skb->len);
	tx_cmd->next_frame_len = 0;
	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
	tx_cmd->sta_id = sta_id;
}
Exemplo n.º 20
0
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct htt_data_tx_desc_frag *frags;
	u8 vdev_id = skb_cb->vdev_id;
	u8 tid = skb_cb->htt.tid;
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	dma_addr_t paddr;
	u32 frags_paddr;
	bool use_frags;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	htt->pending_tx[msdu_id] = msdu;
	spin_unlock_bh(&htt->tx_lock);

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
	 * fragment list host driver specifies directly frame pointer. */
	use_frags = htt->target_version_major < 3 ||
		    !ieee80211_is_mgmt(hdr->frame_control);

	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
					   &paddr);
	if (!skb_cb->htt.txbuf)
		goto err_free_msdu_id;
	skb_cb->htt.txbuf_paddr = paddr;

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res)
		goto err_free_txbuf;

	if (likely(use_frags)) {
		frags = skb_cb->htt.txbuf->frags;

		frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
		frags[0].len = __cpu_to_le32(msdu->len);
		frags[1].paddr = 0;
		frags[1].len = 0;

		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

		frags_paddr = skb_cb->htt.txbuf_paddr;
	} else {
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

		frags_paddr = skb_cb->paddr;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			sizeof(skb_cb->htt.txbuf->cmd_tx) +
			prefetch_len);
	skb_cb->htt.txbuf->htc_hdr.flags = 0;

	if (!ieee80211_has_protected(hdr->frame_control))
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);

	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			msdu->data, msdu->len);
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);

	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
			    sizeof(skb_cb->htt.txbuf->frags);
	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
	dma_pool_free(htt->tx_pool,
		      skb_cb->htt.txbuf,
		      skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	htt->pending_tx[msdu_id] = NULL;
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
Exemplo n.º 21
0
static void
mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
{
	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct ieee80211_sta *sta;
	struct mt76_wcid *wcid = status->wcid;
	bool ps;
	int i;

	if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
		sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
		if (sta)
			wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv;
	}

	if (!wcid || !wcid->sta)
		return;

	sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);

	if (status->signal <= 0)
		ewma_signal_add(&wcid->rssi, -status->signal);

	wcid->inactive_count = 0;

	if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
		return;

	if (ieee80211_is_pspoll(hdr->frame_control)) {
		ieee80211_sta_pspoll(sta);
		return;
	}

	if (ieee80211_has_morefrags(hdr->frame_control) ||
		!(ieee80211_is_mgmt(hdr->frame_control) ||
		  ieee80211_is_data(hdr->frame_control)))
		return;

	ps = ieee80211_has_pm(hdr->frame_control);

	if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
		   ieee80211_is_qos_nullfunc(hdr->frame_control)))
		ieee80211_sta_uapsd_trigger(sta, status->tid);

	if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
		return;

	if (ps)
		set_bit(MT_WCID_FLAG_PS, &wcid->flags);
	else
		clear_bit(MT_WCID_FLAG_PS, &wcid->flags);

	dev->drv->sta_ps(dev, sta, ps);
	ieee80211_sta_ps_transition(sta, ps);

	if (ps)
		return;

	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
		struct mt76_txq *mtxq;

		if (!sta->txq[i])
			continue;

		mtxq = (struct mt76_txq *) sta->txq[i]->drv_priv;
		if (!skb_queue_empty(&mtxq->retry_q))
			ieee80211_schedule_txq(dev->hw, sta->txq[i]);
	}
}
Exemplo n.º 22
0
/*
 * Sets most of the Tx cmd's fields
 */
static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
			       struct iwl_tx_cmd *tx_cmd,
			       struct ieee80211_tx_info *info, u8 sta_id)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	__le16 fc = hdr->frame_control;
	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
	u32 len = skb->len + FCS_LEN;
	u8 ac;

	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
		tx_flags |= TX_CMD_FLG_ACK;
	else
		tx_flags &= ~TX_CMD_FLG_ACK;

	if (ieee80211_is_probe_resp(fc))
		tx_flags |= TX_CMD_FLG_TSF;
	else if (ieee80211_is_back_req(fc))
		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;

	if (ieee80211_has_morefrags(fc))
		tx_flags |= TX_CMD_FLG_MORE_FRAG;

	if (ieee80211_is_data_qos(fc)) {
		u8 *qc = ieee80211_get_qos_ctl(hdr);
		tx_cmd->tid_tspec = qc[0] & 0xf;
		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	} else {
		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
			tx_flags |= TX_CMD_FLG_SEQ_CTL;
		else
			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
	}

	/* tid_tspec will default to 0 = BE when QOS isn't enabled */
	ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
	tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
			TX_CMD_FLG_BT_PRIO_POS;

	if (ieee80211_is_mgmt(fc)) {
		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
		else
			tx_cmd->pm_frame_timeout = cpu_to_le16(2);

		/* The spec allows Action frames in A-MPDU, we don't support
		 * it
		 */
		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
	} else {
		tx_cmd->pm_frame_timeout = 0;
	}

	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
	    !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;

	if ((mvm->fw->ucode_capa.capa[0] &
	     IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
	    ieee80211_action_contains_tpc(skb))
		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;

	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
	/* Total # bytes to be transmitted */
	tx_cmd->len = cpu_to_le16((u16)skb->len);
	tx_cmd->next_frame_len = 0;
	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
	tx_cmd->sta_id = sta_id;
}
Exemplo n.º 23
0
void pcie_tx_xmit_ndp(struct ieee80211_hw *hw,
		      struct ieee80211_tx_control *control,
		      struct sk_buff *skb)
{
	struct mwl_priv *priv = hw->priv;
	struct pcie_priv *pcie_priv = priv->hif.priv;
	struct ieee80211_tx_info *tx_info;
	struct ieee80211_key_conf *k_conf;
	struct mwl_vif *mwl_vif;
	int index;
	struct ieee80211_sta *sta;
	struct mwl_sta *sta_info;
	struct ieee80211_hdr *wh;
	u8 *da;
	u16 qos;
	u8 tid = 0;
	struct mwl_ampdu_stream *stream = NULL;
	u16 tx_que_priority;
	bool mgmtframe = false;
	struct ieee80211_mgmt *mgmt;
	bool eapol_frame = false;
	bool start_ba_session = false;
	struct pcie_tx_ctrl_ndp *tx_ctrl;

	tx_info = IEEE80211_SKB_CB(skb);
	k_conf = tx_info->control.hw_key;
	mwl_vif = mwl_dev_get_vif(tx_info->control.vif);
	index = skb_get_queue_mapping(skb);
	sta = control->sta;
	sta_info = sta ? mwl_dev_get_sta(sta) : NULL;

	wh = (struct ieee80211_hdr *)skb->data;

	if (ieee80211_is_data_qos(wh->frame_control))
		qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh)));
	else
		qos = 0xFFFF;

	if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
		index = IEEE80211_AC_VO;
		eapol_frame = true;
	}

	if (ieee80211_is_mgmt(wh->frame_control)) {
		mgmtframe = true;
		mgmt = (struct ieee80211_mgmt *)skb->data;
	}

	if (mgmtframe) {
		u16 capab;

		if (unlikely(ieee80211_is_action(wh->frame_control) &&
			     mgmt->u.action.category == WLAN_CATEGORY_BACK &&
			     mgmt->u.action.u.addba_req.action_code ==
			     WLAN_ACTION_ADDBA_REQ)) {
			capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
			tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
			index = utils_tid_to_ac(tid);
		}

		if (unlikely(ieee80211_is_assoc_req(wh->frame_control)))
			utils_add_basic_rates(hw->conf.chandef.chan->band, skb);

		if (ieee80211_is_probe_req(wh->frame_control) ||
		    ieee80211_is_probe_resp(wh->frame_control))
			tx_que_priority = PROBE_RESPONSE_TXQNUM;
		else {
			if ((
			    (mwl_vif->macid == SYSADPT_NUM_OF_AP) &&
			    (!ieee80211_has_protected(wh->frame_control) ||
			    (ieee80211_has_protected(wh->frame_control) &&
			    ieee80211_is_auth(wh->frame_control)))
			    ) ||
			    !sta ||
			    ieee80211_is_auth(wh->frame_control) ||
			    ieee80211_is_assoc_req(wh->frame_control) ||
			    ieee80211_is_assoc_resp(wh->frame_control))
				tx_que_priority = MGMT_TXQNUM;
			else {
				if (is_multicast_ether_addr(wh->addr1) &&
				    (mwl_vif->macid != SYSADPT_NUM_OF_AP))
					tx_que_priority = mwl_vif->macid *
						SYSADPT_MAX_TID;
				else
					tx_que_priority = SYSADPT_MAX_TID *
						(sta_info->stnid +
						QUEUE_STAOFFSET) + 6;
			}
		}

		if (ieee80211_is_assoc_resp(wh->frame_control) ||
		    ieee80211_is_reassoc_resp(wh->frame_control)) {
			struct sk_buff *ack_skb;
			struct ieee80211_tx_info *ack_info;

			ack_skb = skb_copy(skb, GFP_ATOMIC);
			ack_info = IEEE80211_SKB_CB(ack_skb);
			pcie_tx_prepare_info(priv, 0, ack_info);
			ieee80211_tx_status(hw, ack_skb);
		}

		pcie_tx_encapsulate_frame(priv, skb, k_conf, NULL);
	} else {
Exemplo n.º 24
0
/**
 * rsi_core_xmit() - This function transmits the packets received from mac80211
 * @common: Pointer to the driver private structure.
 * @skb: Pointer to the socket buffer structure.
 *
 * Return: None.
 */
void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
{
	struct rsi_hw *adapter = common->priv;
	struct ieee80211_tx_info *info;
	struct skb_info *tx_params;
	struct ieee80211_hdr *tmp_hdr = NULL;
	u8 q_num, tid = 0;

	if ((!skb) || (!skb->len)) {
		rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
			__func__);
		goto xmit_fail;
	}
	info = IEEE80211_SKB_CB(skb);
	tx_params = (struct skb_info *)info->driver_data;
	tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];

	if (common->fsm_state != FSM_MAC_INIT_DONE) {
		rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
		goto xmit_fail;
	}

	if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
	    (ieee80211_is_ctl(tmp_hdr->frame_control)) ||
	    (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) {
		q_num = MGMT_SOFT_Q;
		skb->priority = q_num;
	} else {
		if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
			tid = (skb->data[24] & IEEE80211_QOS_TID);
			skb->priority = TID_TO_WME_AC(tid);
		} else {
			tid = IEEE80211_NONQOS_TID;
			skb->priority = BE_Q;
		}
		q_num = skb->priority;
		tx_params->tid = tid;
		tx_params->sta_id = 0;
	}

	if ((q_num != MGMT_SOFT_Q) &&
	    ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
	     DATA_QUEUE_WATER_MARK)) {
		rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
		if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
			ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
		rsi_set_event(&common->tx_thread.event);
		goto xmit_fail;
	}

	rsi_core_queue_pkt(common, skb);
	rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
	rsi_set_event(&common->tx_thread.event);

	return;

xmit_fail:
	rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
	/* Dropping pkt here */
	ieee80211_free_txskb(common->priv->hw, skb);
}
Exemplo n.º 25
0
BOOLEAN CFG80211_CheckActionFrameType(
        IN  RTMP_ADAPTER 								 *pAd,
		IN	PUCHAR										 preStr,
		IN	PUCHAR										 pData,
		IN	UINT32                              		 length)
{
	BOOLEAN isP2pFrame = FALSE;
	struct ieee80211_mgmt *mgmt;
	mgmt = (struct ieee80211_mgmt *)pData;
	if (ieee80211_is_mgmt(mgmt->frame_control)) 
	{
		if (ieee80211_is_probe_resp(mgmt->frame_control)) 
		{
			DBGPRINT(RT_DEBUG_INFO, ("CFG80211_PKT: %s ProbeRsp Frame %d\n", preStr, pAd->LatchRfRegs.Channel));
	        if (!mgmt->u.probe_resp.timestamp)
    		{
            		struct timeval tv;
            		do_gettimeofday(&tv);
            		mgmt->u.probe_resp.timestamp = ((UINT64) tv.tv_sec * 1000000) + tv.tv_usec;
    		}
		}
		else if (ieee80211_is_disassoc(mgmt->frame_control))
		{
			DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s DISASSOC Frame\n", preStr));
		}
		else if (ieee80211_is_deauth(mgmt->frame_control))
		{
			DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s Deauth Frame\n", preStr, pAd->LatchRfRegs.Channel));
		}
		else if (ieee80211_is_action(mgmt->frame_control)) 
		{
			PP2P_PUBLIC_FRAME pFrame = (PP2P_PUBLIC_FRAME)pData;
			if ((pFrame->p80211Header.FC.SubType == SUBTYPE_ACTION) &&
			    (pFrame->Category == CATEGORY_PUBLIC) &&
			    (pFrame->Action == ACTION_WIFI_DIRECT))
			{
				isP2pFrame = TRUE;
				switch (pFrame->Subtype)
				{
					case GO_NEGOCIATION_REQ:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s GO_NEGOCIACTION_REQ %d\n", 
									preStr, pAd->LatchRfRegs.Channel));
						break;	

					case GO_NEGOCIATION_RSP:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s GO_NEGOCIACTION_RSP %d\n", 
									preStr, pAd->LatchRfRegs.Channel));
						break;

					case GO_NEGOCIATION_CONFIRM:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s GO_NEGOCIACTION_CONFIRM %d\n", 
									preStr,  pAd->LatchRfRegs.Channel));
						break;

					case P2P_PROVISION_REQ:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2P_PROVISION_REQ %d\n", 
									preStr, pAd->LatchRfRegs.Channel));
						break;

					case P2P_PROVISION_RSP:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2P_PROVISION_RSP %d\n", 
									preStr, pAd->LatchRfRegs.Channel));
						break;

					case P2P_INVITE_REQ:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2P_INVITE_REQ %d\n", 
									preStr, pAd->LatchRfRegs.Channel));
						break;

					case P2P_INVITE_RSP:
						DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2P_INVITE_RSP %d\n", 
									preStr, pAd->LatchRfRegs.Channel));
						break;
					case P2P_DEV_DIS_REQ:
                        DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2P_DEV_DIS_REQ %d\n",
                                                preStr, pAd->LatchRfRegs.Channel));
						break;						
					case P2P_DEV_DIS_RSP:
                        DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2P_DEV_DIS_RSP %d\n",
                                                preStr, pAd->LatchRfRegs.Channel));
                        break;
				}
			}
			 else if ((pFrame->p80211Header.FC.SubType == SUBTYPE_ACTION) &&
					  (pFrame->Category == CATEGORY_PUBLIC) &&
					   ((pFrame->Action == ACTION_GAS_INITIAL_REQ) 	 || 
						(pFrame->Action == ACTION_GAS_INITIAL_RSP)	 || 
						(pFrame->Action == ACTION_GAS_COMEBACK_REQ ) || 
						(pFrame->Action == ACTION_GAS_COMEBACK_RSP)))
			{
											isP2pFrame = TRUE;
			}			
			else if	((pFrame->Category == CATEGORY_VENDOR_SPECIFIC_WFD) && 
				  RTMPEqualMemory(&pFrame->Octet[1], CFG_P2POUIBYTE, 4)) 
			{
				isP2pFrame = TRUE;
				switch (pFrame->Subtype)
				{
                    case P2PACT_NOA:
                        DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2PACT_NOA %d\n",
                                                  preStr, pAd->LatchRfRegs.Channel));
						break;
					case P2PACT_PERSENCE_REQ:
                        DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2PACT_PERSENCE_REQ %d\n",
                                                  preStr, pAd->LatchRfRegs.Channel));
						break;
					case P2PACT_PERSENCE_RSP:
                        DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2PACT_PERSENCE_RSP %d\n",
                                                  preStr, pAd->LatchRfRegs.Channel));
						break;
					case P2PACT_GO_DISCOVER_REQ:
                        DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s P2PACT_GO_DISCOVER_REQ %d\n",
                                                  preStr, pAd->LatchRfRegs.Channel));
						break;
				}
			}
			else
			{
				DBGPRINT(RT_DEBUG_INFO, ("CFG80211_PKT: %s ACTION Frame with Channel%d\n", preStr, pAd->LatchRfRegs.Channel));
			}
		}	
		else
		{
			DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s UNKOWN MGMT FRAME TYPE\n", preStr));
		}
	}
	else
	{
		DBGPRINT(RT_DEBUG_ERROR, ("CFG80211_PKT: %s UNKOWN FRAME TYPE\n", preStr));
	}

	return isP2pFrame;
}
Exemplo n.º 26
0
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct device *dev = htt->ar->dev;
	struct htt_cmd *cmd;
	struct htt_data_tx_desc_frag *tx_frags;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct sk_buff *txdesc = NULL;
	bool use_frags;
	u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
	u8 tid;
	int prefetch_len, desc_len;
	int msdu_id = -1;
	int res;
	u8 flags0;
	u16 flags1;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	htt->pending_tx[msdu_id] = msdu;
	spin_unlock_bh(&htt->tx_lock);

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;

	txdesc = ath10k_htc_alloc_skb(desc_len);
	if (!txdesc) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}

	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
	 * fragment list host driver specifies directly frame pointer. */
	use_frags = htt->target_version_major < 3 ||
		    !ieee80211_is_mgmt(hdr->frame_control);

	if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
		ath10k_warn("htt alignment check failed. dropping packet.\n");
		res = -EIO;
		goto err_free_txdesc;
	}

	if (use_frags) {
		skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
		skb_cb->htt.pad_len = (unsigned long)msdu->data -
				      round_down((unsigned long)msdu->data, 4);

		skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
	} else {
		skb_cb->htt.frag_len = 0;
		skb_cb->htt.pad_len = 0;
	}

	res = ath10k_skb_map(dev, msdu);
	if (res)
		goto err_pull_txfrag;

	if (use_frags) {
		dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
					DMA_TO_DEVICE);

		/* tx fragment list must be terminated with zero-entry */
		tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
		tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
						  skb_cb->htt.frag_len +
						  skb_cb->htt.pad_len);
		tx_frags[0].len   = __cpu_to_le32(msdu->len -
						  skb_cb->htt.frag_len -
						  skb_cb->htt.pad_len);
		tx_frags[1].paddr = __cpu_to_le32(0);
		tx_frags[1].len   = __cpu_to_le32(0);

		dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
					   DMA_TO_DEVICE);
	}

	ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
		   (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
	ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
			msdu->data, msdu->len);

	skb_put(txdesc, desc_len);
	cmd = (struct htt_cmd *)txdesc->data;

	tid = ATH10K_SKB_CB(msdu)->htt.tid;

	ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);

	flags0  = 0;
	if (!ieee80211_has_protected(hdr->frame_control))
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

	if (use_frags)
		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
	else
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

	flags1  = 0;
	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;

	cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
	cmd->data_tx.flags0      = flags0;
	cmd->data_tx.flags1      = __cpu_to_le16(flags1);
	cmd->data_tx.len         = __cpu_to_le16(msdu->len -
						 skb_cb->htt.frag_len -
						 skb_cb->htt.pad_len);
	cmd->data_tx.id          = __cpu_to_le16(msdu_id);
	cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
	cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);

	memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);

	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
	skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
	dev_kfree_skb_any(txdesc);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	htt->pending_tx[msdu_id] = NULL;
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
Exemplo n.º 27
0
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
					struct ieee80211_hdr *hdr,
					u16 len,
					u32 ampdu_status,
					struct iwl_rx_mem_buffer *rxb,
					struct ieee80211_rx_status *stats)
{
	struct sk_buff *skb;
	int ret = 0;
	__le16 fc = hdr->frame_control;

	/* We only process data packets if the interface is open */
	if (unlikely(!priv->is_open)) {
		IWL_DEBUG_DROP_LIMIT(priv,
		    "Dropping packet while interface is not open.\n");
		return;
	}

	/* In case of HW accelerated crypto and bad decryption, drop */
	if (!priv->cfg->mod_params->sw_crypto &&
	    iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
		return;

	skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
	if (!skb) {
		IWL_ERR(priv, "alloc_skb failed\n");
		return;
	}

	skb_reserve(skb, IWL_LINK_HDR_MAX);
	skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);

	/* mac80211 currently doesn't support paged SKB. Convert it to
	 * linear SKB for management frame and data frame requires
	 * software decryption or software defragementation. */
	if (ieee80211_is_mgmt(fc) ||
	    ieee80211_has_protected(fc) ||
	    ieee80211_has_morefrags(fc) ||
	    le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
	    (ieee80211_is_data_qos(fc) &&
	     *ieee80211_get_qos_ctl(hdr) &
	     IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
		ret = skb_linearize(skb);
	else
		ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
			 0 : -ENOMEM;

	if (ret) {
		kfree_skb(skb);
		goto out;
	}

	/*
	 * XXX: We cannot touch the page and its virtual memory (hdr) after
	 * here. It might have already been freed by the above skb change.
	 */

	iwl_update_stats(priv, false, fc, len);
	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));

	ieee80211_rx(priv->hw, skb);
 out:
	priv->alloc_rxb_page--;
	rxb->page = NULL;
}
Exemplo n.º 28
0
static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct rtl8192_tx_ring *ring;
	struct rtl_tx_desc *pdesc;
	u8 idx;
	unsigned int queue_index, hw_queue;
	unsigned long flags;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
	__le16 fc = hdr->frame_control;
	u8 *pda_addr = hdr->addr1;
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	/*ssn */
	u8 *qc = NULL;
	u8 tid = 0;
	u16 seq_number = 0;
	u8 own;
	u8 temp_one = 1;

	if (ieee80211_is_mgmt(fc))
		rtl_tx_mgmt_proc(hw, skb);
	rtl_action_proc(hw, skb, true);

	queue_index = skb_get_queue_mapping(skb);
	hw_queue = _rtl_mac_to_hwqueue(fc, queue_index);

	if (is_multicast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesmulticast += skb->len;
	else if (is_broadcast_ether_addr(pda_addr))
		rtlpriv->stats.txbytesbroadcast += skb->len;
	else
		rtlpriv->stats.txbytesunicast += skb->len;

	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);

	ring = &rtlpci->tx_ring[hw_queue];
	if (hw_queue != BEACON_QUEUE)
		idx = (ring->idx + skb_queue_len(&ring->queue)) %
				ring->entries;
	else
		idx = 0;

	pdesc = &ring->desc[idx];
	own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
			true, HW_DESC_OWN);

	if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
			 ("No more TX desc@%d, ring->idx = %d,"
			  "idx = %d, skb_queue_len = 0x%d\n",
			  hw_queue, ring->idx, idx,
			  skb_queue_len(&ring->queue)));

		spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
		return skb->len;
	}

	/*
	 *if(ieee80211_is_nullfunc(fc)) {
	 *      spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
	 *      return 1;
	 *}
	 */

	if (ieee80211_is_data_qos(fc)) {
		qc = ieee80211_get_qos_ctl(hdr);
		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;

		seq_number = mac->tids[tid].seq_number;
		seq_number &= IEEE80211_SCTL_SEQ;
		/*
		 *hdr->seq_ctrl = hdr->seq_ctrl &
		 *cpu_to_le16(IEEE80211_SCTL_FRAG);
		 *hdr->seq_ctrl |= cpu_to_le16(seq_number);
		 */

		seq_number += 1;
	}

	if (ieee80211_is_data(fc))
		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);

	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
					info, skb, hw_queue);

	__skb_queue_tail(&ring->queue, skb);

	rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true,
				    HW_DESC_OWN, (u8 *)&temp_one);

	if (!ieee80211_has_morefrags(hdr->frame_control)) {
		if (qc)
			mac->tids[tid].seq_number = seq_number;
	}

	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
	    hw_queue != BEACON_QUEUE) {

		RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
			 ("less desc left, stop skb_queue@%d, "
			  "ring->idx = %d,"
			  "idx = %d, skb_queue_len = 0x%d\n",
			  hw_queue, ring->idx, idx,
			  skb_queue_len(&ring->queue)));

		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
	}

	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);

	rtlpriv->cfg->ops->tx_polling(hw, hw_queue);

	return 0;
}
Exemplo n.º 29
0
static void ccmp_special_blocks(struct ieee80211_hdr *hdr, size_t hdrlen,
				const u64 pn, u8 *b_0, u8 *aad)
{
	__le16 mask_fc;
	int a4_included, mgmt;
	u8 qos_tid;
	u16 len_a;

	/* Mask FC: zero subtype b4 b5 b6 (if not mgmt)
	 * Retry, PwrMgt, MoreData; set Protected
	 */
	mgmt = ieee80211_is_mgmt(hdr->frame_control);
	mask_fc = hdr->frame_control;
	mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY |
				IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA);
	if (!mgmt)
		mask_fc &= ~cpu_to_le16(0x0070);
	mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);

	len_a = hdrlen - 2;
	a4_included = ieee80211_has_a4(hdr->frame_control);

	if (ieee80211_is_data_qos(hdr->frame_control))
		qos_tid = *ieee80211_get_qos_ctl(hdr) &
			IEEE80211_QOS_CTL_TID_MASK;
	else
		qos_tid = 0;

	/* In CCM, the initial vectors (IV) used for CTR mode encryption and CBC
	 * mode authentication are not allowed to collide, yet both are derived
	 * from this vector b_0. We only set L := 1 here to indicate that the
	 * data size can be represented in (L+1) bytes. The CCM layer will take
	 * care of storing the data length in the top (L+1) bytes and setting
	 * and clearing the other bits as is required to derive the two IVs.
	 */
	b_0[0] = 0x1;

	/* Nonce: Nonce Flags | A2 | PN
	 * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7)
	 */
	b_0[1] = qos_tid | (mgmt << 4);
	ether_addr_copy(&b_0[2], hdr->addr2);
	b_0[8]  = pn >> 40;
	b_0[9]  = pn >> 32;
	b_0[10] = pn >> 24;
	b_0[11] = pn >> 16;
	b_0[12] = pn >> 8;
	b_0[13] = pn;

	/* AAD (extra authenticate-only data) / masked 802.11 header
	 * FC | A1 | A2 | A3 | SC | [A4] | [QC]
	 */
	put_unaligned_be16(len_a, &aad[0]);
	put_unaligned(mask_fc, (__le16 *)&aad[2]);
	memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);

	/* Mask Seq#, leave Frag# */
	aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
	aad[23] = 0;

	if (a4_included) {
		ether_addr_copy(&aad[24], hdr->addr4);
		aad[30] = qos_tid;
		aad[31] = 0;
	} else {
		memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN);
		aad[24] = qos_tid;
	}
}