Exemplo n.º 1
0
static void rtw_check_xmit_resource(_adapter *padapter, _pkt *pkt)
{
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
	u16	queue;

	queue = skb_get_queue_mapping(pkt);
	if (padapter->registrypriv.wifi_spec) {
		/* No free space for Tx, tx_worker is too slow */
		if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) {
			//DBG_871X("%s(): stop netif_subqueue[%d]\n", __FUNCTION__, queue);
			netif_stop_subqueue(padapter->pnetdev, queue);
		}
	} else {
		if(pxmitpriv->free_xmitframe_cnt<=4) {
			if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
				netif_stop_subqueue(padapter->pnetdev, queue);
		}
	}
#else
	if(pxmitpriv->free_xmitframe_cnt<=4)
	{
		if (!rtw_netif_queue_stopped(padapter->pnetdev))
			rtw_netif_stop_queue(padapter->pnetdev);
	}
#endif
}
Exemplo n.º 2
0
static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
{
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
	u16	queue;

	queue = skb_get_queue_mapping(pkt);
	if (padapter->registrypriv.wifi_spec) {
		/* No free space for Tx, tx_worker is too slow */
		if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
			netif_stop_subqueue(padapter->pnetdev, queue);
	} else {
		if (pxmitpriv->free_xmitframe_cnt <= 4) {
			if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
				netif_stop_subqueue(padapter->pnetdev, queue);
		}
	}
}
Exemplo n.º 3
0
/* hfi1_vnic_maybe_stop_tx - stop tx queue if required */
static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo,
				    u8 q_idx)
{
	netif_stop_subqueue(vinfo->netdev, q_idx);
	if (!hfi1_vnic_sdma_write_avail(vinfo, q_idx))
		return;

	netif_start_subqueue(vinfo->netdev, q_idx);
}
Exemplo n.º 4
0
void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
{
	struct adapter_struct *adapter = netdev_priv(netdev);
	int i;

	netif_stop_queue(netdev);
	if (netif_is_multiqueue(netdev))
		for (i = 0; i < adapter->num_tx_queues; i++)
			netif_stop_subqueue(netdev, i);
}
Exemplo n.º 5
0
static int cpmac_start_xmit(struct sk_buff *skb)
{
	int queue, len, ret;
	lock_s(l1);
	//struct cpmac_desc *desc;
	//struct cpmac_priv *priv = netdev_priv(dev);

	//if (unlikely(atomic_read(reset_pending)))
	//	return NETDEV_TX_BUSY;


        //cpmac_write(CPMAC_TX_PTR(queue), (u32)desc_ring[queue].mapping);

        // BUG: move this line to the  *** location below
        notify(cond_irq_can_happen);

	if (unlikely(skb_padto(skb, ETH_ZLEN))) {
            ret = NETDEV_TX_OK;
        } else {
            len = max(skb->len, ETH_ZLEN);
            //queue = skb_get_queue_mapping(skb);
            netif_stop_subqueue(/*queue*/);

            //desc = &desc_ring[queue];
            if (unlikely(desc_ring[queue].dataflags & CPMAC_OWN)) {
    //		if (netif_msg_tx_err(priv) && net_ratelimit())
    //			netdev_warn(dev, "tx dma ring full\n");

                    ret = NETDEV_TX_BUSY;
            } else {

                spin_lock(cplock);
                spin_unlock(cplock);
                desc_ring[queue].dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
                desc_ring[queue].skb = skb;
                desc_ring[queue].data_mapping = dma_map_single(skb->data, len,
                                                    DMA_TO_DEVICE);
                desc_ring[queue].hw_data = (u32)desc_ring[queue].data_mapping;
                desc_ring[queue].datalen = len;
                desc_ring[queue].buflen = len;
        //	if (unlikely(netif_msg_tx_queued(priv)))
        //		netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
        //	if (unlikely(netif_msg_hw(priv)))
        //		cpmac_dump_desc(dev, &desc_ring[queue]);
        //	if (unlikely(netif_msg_pktdata(priv)))
        //		cpmac_dump_skb(dev, skb);
	

                ret = NETDEV_TX_OK;
            }
        }
        // ***
        unlock_s(l1);
        return ret;
}
Exemplo n.º 6
0
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	int queue, len;
	struct cpmac_desc *desc;
	struct cpmac_priv *priv = netdev_priv(dev);

	if (unlikely(atomic_read(&priv->reset_pending)))
		return NETDEV_TX_BUSY;

	if (unlikely(skb_padto(skb, ETH_ZLEN)))
		return NETDEV_TX_OK;

	len = max(skb->len, ETH_ZLEN);
	queue = skb_get_queue_mapping(skb);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
	netif_stop_subqueue(dev, queue);
#else
	netif_stop_queue(dev);
#endif

	desc = &priv->desc_ring[queue];
	if (unlikely(desc->dataflags & CPMAC_OWN)) {
		if (netif_msg_tx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: tx dma ring full\n",
			       dev->name);
		return NETDEV_TX_BUSY;
	}

	spin_lock(&priv->lock);
	dev->trans_start = jiffies;
	spin_unlock(&priv->lock);
	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
	desc->skb = skb;
	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
					    DMA_TO_DEVICE);
	desc->hw_data = (u32)desc->data_mapping;
	desc->datalen = len;
	desc->buflen = len;
	if (unlikely(netif_msg_tx_queued(priv)))
		printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
		       skb->len);
	if (unlikely(netif_msg_hw(priv)))
		cpmac_dump_desc(dev, desc);
	if (unlikely(netif_msg_pktdata(priv)))
		cpmac_dump_skb(dev, skb);
	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);

	return NETDEV_TX_OK;
}
static bool rtw_check_xmit_resource(_adapter *padapter, _pkt *pkt)
{
	bool busy = _FALSE;
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
	u16	qidx;

	qidx = skb_get_queue_mapping(pkt);
	if (rtw_os_need_stop_queue(padapter, qidx)) {
		if (DBG_DUMP_OS_QUEUE_CTL)
			DBG_871X(FUNC_ADPT_FMT": netif_stop_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), qidx);
		netif_stop_subqueue(padapter->pnetdev, qidx);
		busy = _TRUE;
	}
#else
	if (rtw_os_need_stop_queue(padapter, 0)) {
		if (DBG_DUMP_OS_QUEUE_CTL)
			DBG_871X(FUNC_ADPT_FMT": netif_stop_queue\n", FUNC_ADPT_ARG(padapter));
		rtw_netif_stop_queue(padapter->pnetdev);
		busy = _TRUE;
	}
#endif
	return busy;
}
Exemplo n.º 8
0
static int xlgmac_maybe_stop_tx_queue(
			struct xlgmac_channel *channel,
			struct xlgmac_ring *ring,
			unsigned int count)
{
	struct xlgmac_pdata *pdata = channel->pdata;

	if (count > xlgmac_tx_avail_desc(ring)) {
		netif_info(pdata, drv, pdata->netdev,
			   "Tx queue stopped, not enough descriptors available\n");
		netif_stop_subqueue(pdata->netdev, channel->queue_index);
		ring->tx.queue_stopped = 1;

		/* If we haven't notified the hardware because of xmit_more
		 * support, tell it now
		 */
		if (ring->tx.xmit_more)
			pdata->hw_ops.tx_start_xmit(channel, ring);

		return NETDEV_TX_BUSY;
	}

	return 0;
}
Exemplo n.º 9
0
int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
{
	_adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
#ifdef CONFIG_TX_MCAST2UNI
	struct mlme_priv	*pmlmepriv = &padapter->mlmepriv;
	extern int rtw_mc2u_disable;
#endif	// CONFIG_TX_MCAST2UNI
	s32 res = 0;
#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
	u16 queue;
#endif

_func_enter_;

	RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));

	if (rtw_if_up(padapter) == _FALSE) {
		RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit_entry: rtw_if_up fail\n"));
		#ifdef DBG_TX_DROP_FRAME
		DBG_871X("DBG_TX_DROP_FRAME %s if_up fail\n", __FUNCTION__);
		#endif
		goto drop_packet;
	}

#if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
	queue = skb_get_queue_mapping(pkt);
	/* No free space for Tx, tx_worker is too slow */
	if (pxmitpriv->hwxmits[queue].accnt > NR_XMITFRAME/2) {
		//DBG_871X("%s(): stop netif_subqueue[%d]\n", __FUNCTION__, queue);
		netif_stop_subqueue(padapter->pnetdev, queue);
		return NETDEV_TX_BUSY;
	}
#endif

#ifdef CONFIG_TX_MCAST2UNI
	if ( !rtw_mc2u_disable
		&& check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE
		&& ( IP_MCAST_MAC(pkt->data)
			|| ICMPV6_MCAST_MAC(pkt->data) )
		&& (padapter->registrypriv.wifi_spec == 0)
		)
	{
		if ( pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME/4) ) {
			res = rtw_mlcst2unicst(padapter, pkt);
			if (res == _TRUE) {
				goto exit;
			}
		} else {
			//DBG_871X("Stop M2U(%d, %d)! ", pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmitbuf_cnt);
			//DBG_871X("!m2u );
		}
	}
#endif	// CONFIG_TX_MCAST2UNI

	res = rtw_xmit(padapter, &pkt);
	if (res < 0) {
		#ifdef DBG_TX_DROP_FRAME
		DBG_871X("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __FUNCTION__);
		#endif
		goto drop_packet;
	}

	pxmitpriv->tx_pkts++;
	RT_TRACE(_module_xmit_osdep_c_, _drv_info_, ("rtw_xmit_entry: tx_pkts=%d\n", (u32)pxmitpriv->tx_pkts));
	goto exit;

drop_packet:
	pxmitpriv->tx_drop++;
	rtw_skb_free(pkt);
	RT_TRACE(_module_xmit_osdep_c_, _drv_notice_, ("rtw_xmit_entry: drop, tx_drop=%d\n", (u32)pxmitpriv->tx_drop));

exit:

_func_exit_;

	return 0;
}
Exemplo n.º 10
0
int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct iwm_priv *iwm = ndev_to_iwm(netdev);
	struct wireless_dev *wdev = iwm_to_wdev(iwm);
	struct iwm_tx_info *tx_info;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	u8 *dst_addr, sta_id;
	u16 queue;
	int ret;


	if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
			   "not associated\n");
		netif_tx_stop_all_queues(netdev);
		goto drop;
	}

	queue = skb_get_queue_mapping(skb);
	BUG_ON(queue >= IWM_TX_DATA_QUEUES); 

	txq = &iwm->txq[queue];

	
	if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
	    (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
		IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
		netif_stop_subqueue(netdev, queue);
		return NETDEV_TX_BUSY;
	}

	ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
				       iwm->bssid, 0);
	if (ret) {
		IWM_ERR(iwm, "build wifi header failed\n");
		goto drop;
	}

	dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;

	for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
		sta_info = &iwm->sta_table[sta_id];
		if (sta_info->valid &&
		    !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
			break;
	}

	if (sta_id == IWM_STA_TABLE_NUM) {
		IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
			dst_addr);
		goto drop;
	}

	tx_info = skb_to_tx_info(skb);
	tx_info->sta = sta_id;
	tx_info->color = sta_info->color;
	
	if (sta_info->qos)
		tx_info->tid = skb->priority;
	else
		tx_info->tid = IWM_UMAC_MGMT_TID;

	spin_lock_bh(&iwm->txq[queue].lock);
	skb_queue_tail(&iwm->txq[queue].queue, skb);
	spin_unlock_bh(&iwm->txq[queue].lock);

	queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);

	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += skb->len;
	return NETDEV_TX_OK;

 drop:
	netdev->stats.tx_dropped++;
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}
Exemplo n.º 11
0
/*----------------------------------------------------------------------------*/
static int bowHardStartXmit(IN struct sk_buff *prSkb, IN struct net_device *prDev)
{
	P_GLUE_INFO_T prGlueInfo = *((P_GLUE_INFO_T *) netdev_priv(prDev));

	P_QUE_ENTRY_T prQueueEntry = NULL;
	P_QUE_T prTxQueue = NULL;
	UINT_16 u2QueueIdx = 0;
	UINT_8 ucDSAP, ucSSAP, ucControl;
	UINT_8 aucOUI[3];
	PUINT_8 aucLookAheadBuf = NULL;
	UINT_8 ucBssIndex;

	GLUE_SPIN_LOCK_DECLARATION();

	ASSERT(prSkb);
	ASSERT(prDev);
	ASSERT(prGlueInfo);

	aucLookAheadBuf = prSkb->data;

	ucDSAP = *(PUINT_8) &aucLookAheadBuf[ETH_LLC_OFFSET];
	ucSSAP = *(PUINT_8) &aucLookAheadBuf[ETH_LLC_OFFSET + 1];
	ucControl = *(PUINT_8) &aucLookAheadBuf[ETH_LLC_OFFSET + 2];
	aucOUI[0] = *(PUINT_8) &aucLookAheadBuf[ETH_SNAP_OFFSET];
	aucOUI[1] = *(PUINT_8) &aucLookAheadBuf[ETH_SNAP_OFFSET + 1];
	aucOUI[2] = *(PUINT_8) &aucLookAheadBuf[ETH_SNAP_OFFSET + 2];

	if (!(ucDSAP == ETH_LLC_DSAP_SNAP &&
	      ucSSAP == ETH_LLC_SSAP_SNAP &&
	      ucControl == ETH_LLC_CONTROL_UNNUMBERED_INFORMATION &&
	      aucOUI[0] == ETH_SNAP_BT_SIG_OUI_0 &&
	      aucOUI[1] == ETH_SNAP_BT_SIG_OUI_1 &&
	      aucOUI[2] == ETH_SNAP_BT_SIG_OUI_2) || (prSkb->len > 1514)) {
		dev_kfree_skb(prSkb);
		return NETDEV_TX_OK;
	}

	if (prGlueInfo->u4Flag & GLUE_FLAG_HALT) {
		DBGLOG(BOW, TRACE, ("GLUE_FLAG_HALT skip tx\n"));
		dev_kfree_skb(prSkb);
		return NETDEV_TX_OK;
	}

	GLUE_SET_PKT_FLAG_PAL(prSkb);

	ucBssIndex = wlanGetBssIdxByNetInterface(prGlueInfo, NET_DEV_BOW_IDX);

	GLUE_SET_PKT_BSS_IDX(prSkb, ucBssIndex);

	prQueueEntry = (P_QUE_ENTRY_T) GLUE_GET_PKT_QUEUE_ENTRY(prSkb);
	prTxQueue = &prGlueInfo->rTxQueue;

	if (wlanProcessSecurityFrame(prGlueInfo->prAdapter, (P_NATIVE_PACKET) prSkb) == FALSE) {
		GLUE_ACQUIRE_SPIN_LOCK(prGlueInfo, SPIN_LOCK_TX_QUE);
		QUEUE_INSERT_TAIL(prTxQueue, prQueueEntry);
		GLUE_RELEASE_SPIN_LOCK(prGlueInfo, SPIN_LOCK_TX_QUE);


		GLUE_INC_REF_CNT(prGlueInfo->i4TxPendingFrameNum);
		GLUE_INC_REF_CNT(prGlueInfo->ai4TxPendingFrameNumPerQueue[ucBssIndex][u2QueueIdx]);

		if (prGlueInfo->ai4TxPendingFrameNumPerQueue[ucBssIndex][u2QueueIdx] >=
		    CFG_TX_STOP_NETIF_PER_QUEUE_THRESHOLD) {
			netif_stop_subqueue(prDev, u2QueueIdx);
		}
	} else {
		GLUE_INC_REF_CNT(prGlueInfo->i4TxPendingSecurityFrameNum);
	}

	kalSetEvent(prGlueInfo);

	/* For Linux, we'll always return OK FLAG, because we'll free this skb by ourself */
	return NETDEV_TX_OK;
}