Exemple #1
0
/*
 * Packet send completion callback handler.
 *
 * It either frees the buffer directly or forwards it to another
 * completion callback which checks conditions, updates statistics,
 * wakes up stalled traffic queue if required, and then frees the buffer.
 */
int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
				struct sk_buff *skb, int aggr, int status)
{
	struct mwifiex_private *priv;
	struct mwifiex_txinfo *tx_info;
	struct netdev_queue *txq;
	int index;

	if (!skb)
		return 0;

	tx_info = MWIFIEX_SKB_TXCB(skb);
	priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
				      tx_info->bss_type);
	if (!priv)
		goto done;

	if (adapter->iface_type == MWIFIEX_USB)
		adapter->data_sent = false;

	mwifiex_set_trans_start(priv->netdev);
	if (!status) {
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len;
		if (priv->tx_timeout_cnt)
			priv->tx_timeout_cnt = 0;
	} else {
		priv->stats.tx_errors++;
	}

	if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
		atomic_dec_return(&adapter->pending_bridged_pkts);

	if (aggr)
		/* For skb_aggr, do not wake up tx queue */
		goto done;

	atomic_dec(&adapter->tx_pending);

	index = mwifiex_1d_to_wmm_queue[skb->priority];
	if (atomic_dec_return(&priv->wmm_tx_pending[index]) < LOW_TX_PENDING) {
		txq = netdev_get_tx_queue(priv->netdev, index);
		if (netif_tx_queue_stopped(txq)) {
			netif_tx_wake_queue(txq);
			dev_dbg(adapter->dev, "wake queue: %d\n", index);
		}
	}
done:
	dev_kfree_skb_any(skb);

	return 0;
}
Exemple #2
0
/*
 * Adds TxPD to AMSDU header.
 *
 * Each AMSDU packet will contain one TxPD at the beginning,
 * followed by multiple AMSDU subframes.
 */
static void
mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
			    struct sk_buff *skb)
{
	struct txpd *local_tx_pd;
	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
	unsigned int pad;
	int headroom = (priv->adapter->iface_type ==
			MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;

	pad = ((void *)skb->data - sizeof(*local_tx_pd) -
		headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
	skb_push(skb, pad);

	skb_push(skb, sizeof(*local_tx_pd));

	local_tx_pd = (struct txpd *) skb->data;
	memset(local_tx_pd, 0, sizeof(struct txpd));

	/* Original priority has been overwritten */
	local_tx_pd->priority = (u8) skb->priority;
	local_tx_pd->pkt_delay_2ms =
		mwifiex_wmm_compute_drv_pkt_delay(priv, skb);
	local_tx_pd->bss_num = priv->bss_num;
	local_tx_pd->bss_type = priv->bss_type;
	/* Always zero as the data is followed by struct txpd */
	local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
						 pad);
	local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
	local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
						 sizeof(*local_tx_pd) -
						 pad);

	if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
		local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;

	if (local_tx_pd->tx_control == 0)
		/* TxCtrl set by user or default */
		local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);

	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
	    priv->adapter->pps_uapsd_mode) {
		if (true == mwifiex_check_last_packet_indication(priv)) {
			priv->adapter->tx_lock_flag = true;
			local_tx_pd->flags =
				MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
		}
	}
}
/*
 * Packet send completion callback handler.
 *
 * It either frees the buffer directly or forwards it to another
 * completion callback which checks conditions, updates statistics,
 * wakes up stalled traffic queue if required, and then frees the buffer.
 */
int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
				struct sk_buff *skb, int status)
{
	struct mwifiex_private *priv, *tpriv;
	struct mwifiex_txinfo *tx_info;
	int i;

	if (!skb)
		return 0;

	tx_info = MWIFIEX_SKB_TXCB(skb);
	priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
				      tx_info->bss_type);
	if (!priv)
		goto done;

	mwifiex_set_trans_start(priv->netdev);
	if (!status) {
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len;
	} else {
		priv->stats.tx_errors++;
	}

	if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING)
		goto done;

	for (i = 0; i < adapter->priv_num; i++) {

		tpriv = adapter->priv[i];

		if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
		    (tpriv->media_connected)) {
			if (netif_queue_stopped(tpriv->netdev))
				mwifiex_wake_up_net_dev_queue(tpriv->netdev,
							      adapter);
		}
	}
done:
	dev_kfree_skb_any(skb);

	return 0;
}
Exemple #4
0
/*
 * CFG802.11 network device handler for data transmission.
 */
static int
mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
	struct sk_buff *new_skb;
	struct mwifiex_txinfo *tx_info;
	bool multicast;

	dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
		jiffies, priv->bss_type, priv->bss_num);

	if (priv->adapter->surprise_removed) {
		kfree_skb(skb);
		priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
		dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
		kfree_skb(skb);
		priv->stats.tx_dropped++;
		return 0;
	}
	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
		dev_dbg(priv->adapter->dev,
			"data: Tx: insufficient skb headroom %d\n",
			skb_headroom(skb));
		/* Insufficient skb headroom - allocate a new skb */
		new_skb =
			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
		if (unlikely(!new_skb)) {
			dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
			kfree_skb(skb);
			priv->stats.tx_dropped++;
			return 0;
		}
		kfree_skb(skb);
		skb = new_skb;
		dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
			skb_headroom(skb));
	}

	tx_info = MWIFIEX_SKB_TXCB(skb);
	memset(tx_info, 0, sizeof(*tx_info));
	tx_info->bss_num = priv->bss_num;
	tx_info->bss_type = priv->bss_type;
	tx_info->pkt_len = skb->len;

	multicast = is_multicast_ether_addr(skb->data);

	if (unlikely(!multicast && skb->sk &&
		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS &&
		     priv->adapter->fw_api_ver == MWIFIEX_FW_V15))
		skb = mwifiex_clone_skb_for_tx_status(priv,
						      skb,
					MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS, NULL);

	/* Record the current time the packet was queued; used to
	 * determine the amount of time the packet was queued in
	 * the driver before it was sent to the firmware.
	 * The delay is then sent along with the packet to the
	 * firmware for aggregate delay calculation for stats and
	 * MSDU lifetime expiry.
	 */
	__net_timestamp(skb);

	if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
	    priv->bss_type == MWIFIEX_BSS_TYPE_STA &&
	    !ether_addr_equal_unaligned(priv->cfg_bssid, skb->data)) {
		if (priv->adapter->auto_tdls && priv->check_tdls_tx)
			mwifiex_tdls_check_tx(priv, skb);
	}

	mwifiex_queue_tx_pkt(priv, skb);

	return 0;
}
Exemple #5
0
/*
 * This function tells firmware to send a NULL data packet.
 *
 * The function creates a NULL data packet with TxPD and sends to the
 * firmware for transmission, with highest priority setting.
 */
int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
{
	struct mwifiex_adapter *adapter = priv->adapter;
	struct txpd *local_tx_pd;
/* sizeof(struct txpd) + Interface specific header */
#define NULL_PACKET_HDR 64
	u32 data_len = NULL_PACKET_HDR;
	struct sk_buff *skb;
	int ret;
	struct mwifiex_txinfo *tx_info = NULL;

	if (adapter->surprise_removed)
		return -1;

	if (!priv->media_connected)
		return -1;

	if (adapter->data_sent)
		return -1;

	skb = dev_alloc_skb(data_len);
	if (!skb)
		return -1;

	tx_info = MWIFIEX_SKB_TXCB(skb);
	tx_info->bss_index = priv->bss_index;
	skb_reserve(skb, sizeof(struct txpd) + INTF_HEADER_LEN);
	skb_push(skb, sizeof(struct txpd));

	local_tx_pd = (struct txpd *) skb->data;
	local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);
	local_tx_pd->flags = flags;
	local_tx_pd->priority = WMM_HIGHEST_PRIORITY;
	local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
	local_tx_pd->bss_num = priv->bss_num;
	local_tx_pd->bss_type = priv->bss_type;

	skb_push(skb, INTF_HEADER_LEN);

	ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
					     skb->data, skb->len, NULL);
	switch (ret) {
	case -EBUSY:
		adapter->data_sent = true;
		/* Fall through FAILURE handling */
	case -1:
		dev_kfree_skb_any(skb);
		dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
						__func__, ret);
		adapter->dbg.num_tx_host_to_card_failure++;
		break;
	case 0:
		dev_kfree_skb_any(skb);
		dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
						__func__);
		adapter->tx_lock_flag = true;
		break;
	case -EINPROGRESS:
		break;
	default:
		break;
	}

	return ret;
}
/*
 * This function fills the TxPD for tx packets.
 *
 * The Tx buffer received by this function should already have the
 * header space allocated for TxPD.
 *
 * This function inserts the TxPD in between interface header and actual
 * data and adjusts the buffer pointers accordingly.
 *
 * The following TxPD fields are set by this function, as required -
 *      - BSS number
 *      - Tx packet length and offset
 *      - Priority
 *      - Packet delay
 *      - Priority specific Tx control
 *      - Flags
 */
void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
				struct sk_buff *skb)
{
	struct mwifiex_adapter *adapter = priv->adapter;
	struct txpd *local_tx_pd;
	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
	u8 pad;

	if (!skb->len) {
		dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
		tx_info->status_code = -1;
		return skb->data;
	}

	/* If skb->data is not aligned; add padding */
	pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;

	BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN
				    + pad));
	skb_push(skb, sizeof(*local_tx_pd) + pad);

	local_tx_pd = (struct txpd *) skb->data;
	memset(local_tx_pd, 0, sizeof(struct txpd));
	local_tx_pd->bss_num = priv->bss_num;
	local_tx_pd->bss_type = priv->bss_type;
	local_tx_pd->tx_pkt_length = cpu_to_le16((u16)(skb->len -
						       (sizeof(struct txpd)
							+ pad)));

	local_tx_pd->priority = (u8) skb->priority;
	local_tx_pd->pkt_delay_2ms =
				mwifiex_wmm_compute_drv_pkt_delay(priv, skb);

	if (local_tx_pd->priority <
	    ARRAY_SIZE(priv->wmm.user_pri_pkt_tx_ctrl))
		/*
		 * Set the priority specific tx_control field, setting of 0 will
		 *   cause the default value to be used later in this function
		 */
		local_tx_pd->tx_control =
			cpu_to_le32(priv->wmm.user_pri_pkt_tx_ctrl[local_tx_pd->
								   priority]);

	if (adapter->pps_uapsd_mode) {
		if (mwifiex_check_last_packet_indication(priv)) {
			adapter->tx_lock_flag = true;
			local_tx_pd->flags =
				MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET;
		}
	}

	/* Offset of actual data */
	local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad);

	/* make space for INTF_HEADER_LEN */
	skb_push(skb, INTF_HEADER_LEN);

	if (!local_tx_pd->tx_control)
		/* TxCtrl set by user or default */
		local_tx_pd->tx_control = cpu_to_le32(priv->pkt_tx_ctrl);

	return skb->data;
}
Exemple #7
0
static int mwifiex_usb_prepare_tx_aggr_skb(struct mwifiex_adapter *adapter,
					   struct usb_tx_data_port *port,
					   struct sk_buff **skb_send)
{
	struct sk_buff *skb_aggr, *skb_tmp;
	u8 *payload, pad;
	u16 align = adapter->bus_aggr.tx_aggr_align;
	struct mwifiex_txinfo *tx_info = NULL;
	bool is_txinfo_set = false;

	/* Packets in aggr_list will be send in either skb_aggr or
	 * write complete, delete the tx_aggr timer
	 */
	if (port->tx_aggr.timer_cnxt.is_hold_timer_set) {
		del_timer(&port->tx_aggr.timer_cnxt.hold_timer);
		port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
		port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
	}

	skb_aggr = mwifiex_alloc_dma_align_buf(port->tx_aggr.aggr_len,
					       GFP_ATOMIC);
	if (!skb_aggr) {
		mwifiex_dbg(adapter, ERROR,
			    "%s: alloc skb_aggr failed\n", __func__);

		while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list)))
			mwifiex_write_data_complete(adapter, skb_tmp, 0, -1);

		port->tx_aggr.aggr_num = 0;
		port->tx_aggr.aggr_len = 0;
		return -EBUSY;
	}

	tx_info = MWIFIEX_SKB_TXCB(skb_aggr);
	memset(tx_info, 0, sizeof(*tx_info));

	while ((skb_tmp = skb_dequeue(&port->tx_aggr.aggr_list))) {
		/* padding for aligning next packet header*/
		pad = (align - (skb_tmp->len & (align - 1))) % align;
		payload = skb_put(skb_aggr, skb_tmp->len + pad);
		memcpy(payload, skb_tmp->data, skb_tmp->len);
		if (skb_queue_empty(&port->tx_aggr.aggr_list)) {
			/* do not padding for last packet*/
			*(u16 *)payload = cpu_to_le16(skb_tmp->len);
			*(u16 *)&payload[2] =
				cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2 | 0x80);
			skb_trim(skb_aggr, skb_aggr->len - pad);
		} else {
			/* add aggregation interface header */
			*(u16 *)payload = cpu_to_le16(skb_tmp->len + pad);
			*(u16 *)&payload[2] =
				cpu_to_le16(MWIFIEX_TYPE_AGGR_DATA_V2);
		}

		if (!is_txinfo_set) {
			tx_info->bss_num = MWIFIEX_SKB_TXCB(skb_tmp)->bss_num;
			tx_info->bss_type = MWIFIEX_SKB_TXCB(skb_tmp)->bss_type;
			is_txinfo_set = true;
		}

		port->tx_aggr.aggr_num--;
		port->tx_aggr.aggr_len -= (skb_tmp->len + pad);
		mwifiex_write_data_complete(adapter, skb_tmp, 0, 0);
	}

	tx_info->pkt_len = skb_aggr->len -
			(sizeof(struct txpd) + adapter->intf_hdr_len);
	tx_info->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;

	port->tx_aggr.aggr_num = 0;
	port->tx_aggr.aggr_len = 0;
	*skb_send = skb_aggr;

	return 0;
}
Exemple #8
0
static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
					 struct sk_buff *skb)
{
	struct mwifiex_adapter *adapter = priv->adapter;
	struct uap_rxpd *uap_rx_pd;
	struct rx_packet_hdr *rx_pkt_hdr;
	struct sk_buff *new_skb;
	struct mwifiex_txinfo *tx_info;
	int hdr_chop;
	struct timeval tv;
	u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };

	uap_rx_pd = (struct uap_rxpd *)(skb->data);
	rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);

	if ((atomic_read(&adapter->pending_bridged_pkts) >=
					     MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
		dev_err(priv->adapter->dev,
			"Tx: Bridge packet limit reached. Drop packet!\n");
		kfree_skb(skb);
		return;
	}

	if (!memcmp(&rx_pkt_hdr->rfc1042_hdr,
		    rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr)))
		/* Chop off the rxpd + the excess memory from
		 * 802.2/llc/snap header that was removed.
		 */
		hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd;
	else
		/* Chop off the rxpd */
		hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd;

	/* Chop off the leading header bytes so the it points
	 * to the start of either the reconstructed EthII frame
	 * or the 802.2/llc/snap frame.
	 */
	skb_pull(skb, hdr_chop);

	if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
		dev_dbg(priv->adapter->dev,
			"data: Tx: insufficient skb headroom %d\n",
			skb_headroom(skb));
		/* Insufficient skb headroom - allocate a new skb */
		new_skb =
			skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
		if (unlikely(!new_skb)) {
			dev_err(priv->adapter->dev,
				"Tx: cannot allocate new_skb\n");
			kfree_skb(skb);
			priv->stats.tx_dropped++;
			return;
		}

		kfree_skb(skb);
		skb = new_skb;
		dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
			skb_headroom(skb));
	}

	tx_info = MWIFIEX_SKB_TXCB(skb);
	tx_info->bss_num = priv->bss_num;
	tx_info->bss_type = priv->bss_type;
	tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;

	do_gettimeofday(&tv);
	skb->tstamp = timeval_to_ktime(tv);
	mwifiex_wmm_add_buf_txqueue(priv, skb);
	atomic_inc(&adapter->tx_pending);
	atomic_inc(&adapter->pending_bridged_pkts);

	if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
		mwifiex_set_trans_start(priv->netdev);
		mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
	}
	return;
}