Пример #1
0
/*
 * This function processes the received buffer.
 *
 * The function looks into the RxPD and performs sanity tests on the
 * received buffer to ensure its a valid packet, before processing it
 * further. If the packet is determined to be aggregated, it is
 * de-aggregated accordingly. Non-unicast packets are sent directly to
 * the kernel/upper layers. Unicast packets are handed over to the
 * Rx reordering routine if 11n is enabled.
 *
 * The completion callback is called after processing in complete.
 */
int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
				  struct sk_buff *skb)
{
	struct mwifiex_adapter *adapter = priv->adapter;
	int ret = 0;
	struct rxpd *local_rx_pd;
	struct rx_packet_hdr *rx_pkt_hdr;
	u8 ta[ETH_ALEN];
	u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
	struct mwifiex_sta_node *sta_ptr;

	local_rx_pd = (struct rxpd *) (skb->data);
	rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
	rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
	rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
	seq_num = le16_to_cpu(local_rx_pd->seq_num);

	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;

	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
		dev_err(adapter->dev,
			"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
			skb->len, rx_pkt_offset, rx_pkt_length);
		priv->stats.rx_dropped++;
		dev_kfree_skb_any(skb);
		return ret;
	}

	if (rx_pkt_type == PKT_TYPE_MGMT) {
		ret = mwifiex_process_mgmt_packet(priv, skb);
		if (ret)
			dev_err(adapter->dev, "Rx of mgmt packet failed");
		dev_kfree_skb_any(skb);
		return ret;
	}

	/*
	 * If the packet is not an unicast packet then send the packet
	 * directly to os. Don't pass thru rx reordering
	 */
	if ((!IS_11N_ENABLED(priv) &&
	     !(ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
	       !(local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET))) ||
	    !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
		mwifiex_process_rx_packet(priv, skb);
		return ret;
	}

	if (mwifiex_queuing_ra_based(priv) ||
	    (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
	     local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET)) {
		memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
		if (local_rx_pd->flags & MWIFIEX_RXPD_FLAGS_TDLS_PACKET &&
		    local_rx_pd->priority < MAX_NUM_TID) {
			sta_ptr = mwifiex_get_sta_entry(priv, ta);
			if (sta_ptr)
				sta_ptr->rx_seq[local_rx_pd->priority] =
					      le16_to_cpu(local_rx_pd->seq_num);
		}
	} else {
		if (rx_pkt_type != PKT_TYPE_BAR)
			priv->rx_seq[local_rx_pd->priority] = seq_num;
		memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
		       ETH_ALEN);
	}

	/* Reorder and send to OS */
	ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
					 ta, (u8) rx_pkt_type, skb);

	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
		dev_kfree_skb_any(skb);

	if (ret)
		priv->stats.rx_dropped++;

	return ret;
}
Пример #2
0
/*
 * This function processes the received buffer.
 *
 * The function looks into the RxPD and performs sanity tests on the
 * received buffer to ensure its a valid packet, before processing it
 * further. If the packet is determined to be aggregated, it is
 * de-aggregated accordingly. Non-unicast packets are sent directly to
 * the kernel/upper layers. Unicast packets are handed over to the
 * Rx reordering routine if 11n is enabled.
 *
 * The completion callback is called after processing in complete.
 */
int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
				  struct sk_buff *skb)
{
	struct mwifiex_adapter *adapter = priv->adapter;
	int ret = 0;
	struct rxpd *local_rx_pd;
	struct rx_packet_hdr *rx_pkt_hdr;
	u8 ta[ETH_ALEN];
	u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;

	local_rx_pd = (struct rxpd *) (skb->data);
	rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
	rx_pkt_offset = le16_to_cpu(local_rx_pd->rx_pkt_offset);
	rx_pkt_length = le16_to_cpu(local_rx_pd->rx_pkt_length);
	seq_num = le16_to_cpu(local_rx_pd->seq_num);

	rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;

	if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
		dev_err(adapter->dev,
			"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
			skb->len, rx_pkt_offset, rx_pkt_length);
		priv->stats.rx_dropped++;
		dev_kfree_skb_any(skb);
		return ret;
	}

	if (rx_pkt_type == PKT_TYPE_AMSDU) {
		struct sk_buff_head list;
		struct sk_buff *rx_skb;

		__skb_queue_head_init(&list);

		skb_pull(skb, rx_pkt_offset);
		skb_trim(skb, rx_pkt_length);

		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
					 priv->wdev->iftype, 0, false);

		while (!skb_queue_empty(&list)) {
			rx_skb = __skb_dequeue(&list);
			ret = mwifiex_recv_packet(priv, rx_skb);
			if (ret == -1)
				dev_err(adapter->dev, "Rx of A-MSDU failed");
		}
		return 0;
	} else if (rx_pkt_type == PKT_TYPE_MGMT) {
		ret = mwifiex_process_mgmt_packet(priv, skb);
		if (ret)
			dev_err(adapter->dev, "Rx of mgmt packet failed");
		dev_kfree_skb_any(skb);
		return ret;
	}

	/*
	 * If the packet is not an unicast packet then send the packet
	 * directly to os. Don't pass thru rx reordering
	 */
	if (!IS_11N_ENABLED(priv) ||
	    !ether_addr_equal_unaligned(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest)) {
		mwifiex_process_rx_packet(priv, skb);
		return ret;
	}

	if (mwifiex_queuing_ra_based(priv)) {
		memcpy(ta, rx_pkt_hdr->eth803_hdr.h_source, ETH_ALEN);
	} else {
		if (rx_pkt_type != PKT_TYPE_BAR)
			priv->rx_seq[local_rx_pd->priority] = seq_num;
		memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
		       ETH_ALEN);
	}

	/* Reorder and send to OS */
	ret = mwifiex_11n_rx_reorder_pkt(priv, seq_num, local_rx_pd->priority,
					 ta, (u8) rx_pkt_type, skb);

	if (ret || (rx_pkt_type == PKT_TYPE_BAR))
		dev_kfree_skb_any(skb);

	if (ret)
		priv->stats.rx_dropped++;

	return ret;
}