コード例 #1
0
ファイル: cpmac.c プロジェクト: AdrianHuang/uclinux-robutest
static void cpmac_end_xmit(struct net_device *dev, int queue)
{
	struct cpmac_desc *desc;
	struct cpmac_priv *priv = netdev_priv(dev);

	desc = &priv->desc_ring[queue];
	cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
	if (likely(desc->skb)) {
		spin_lock(&priv->lock);
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += desc->skb->len;
		spin_unlock(&priv->lock);
		dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
				 DMA_TO_DEVICE);

		if (unlikely(netif_msg_tx_done(priv)))
			printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
			       desc->skb, desc->skb->len);

		dev_kfree_skb_irq(desc->skb);
		desc->skb = NULL;
		if (__netif_subqueue_stopped(dev, queue))
			netif_wake_subqueue(dev, queue);
	} else {
		if (netif_msg_tx_err(priv) && net_ratelimit())
			printk(KERN_WARNING
			       "%s: end_xmit: spurious interrupt\n", dev->name);
		if (__netif_subqueue_stopped(dev, queue))
			netif_wake_subqueue(dev, queue);
	}
}
コード例 #2
0
ファイル: xmit_linux.c プロジェクト: 020gzh/linux
void rtw_os_pkt_complete23a(struct rtw_adapter *padapter, struct sk_buff *pkt)
{
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
	u16	queue;

	queue = skb_get_queue_mapping(pkt);
	if (padapter->registrypriv.wifi_spec) {
		if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
		    (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
			netif_wake_subqueue(padapter->pnetdev, queue);
	} else {
		if (__netif_subqueue_stopped(padapter->pnetdev, queue))
			netif_wake_subqueue(padapter->pnetdev, queue);
	}
	dev_kfree_skb_any(pkt);
}
コード例 #3
0
static struct sk_buff *rr_dequeue(struct Qdisc* sch)
{
	struct sk_buff *skb;
	struct prio_sched_data *q = qdisc_priv(sch);
	struct Qdisc *qdisc;
	int bandcount;

	/* Only take one pass through the queues.  If nothing is available,
	 * return nothing.
	 */
	for (bandcount = 0; bandcount < q->bands; bandcount++) {
		/* Check if the target subqueue is available before
		 * pulling an skb.  This way we avoid excessive requeues
		 * for slower queues.  If the queue is stopped, try the
		 * next queue.
		 */
		if (!__netif_subqueue_stopped(sch->dev,
					    (q->mq ? q->curband : 0))) {
			qdisc = q->queues[q->curband];
			skb = qdisc->dequeue(qdisc);
			if (skb) {
				sch->q.qlen--;
				q->curband++;
				if (q->curband >= q->bands)
					q->curband = 0;
				return skb;
			}
		}
		q->curband++;
		if (q->curband >= q->bands)
			q->curband = 0;
	}
	return NULL;
}
コード例 #4
0
static struct sk_buff *
prio_dequeue(struct Qdisc* sch)
{
	struct sk_buff *skb;
	struct prio_sched_data *q = qdisc_priv(sch);
	int prio;
	struct Qdisc *qdisc;

	for (prio = 0; prio < q->bands; prio++) {
		/* Check if the target subqueue is available before
		 * pulling an skb.  This way we avoid excessive requeues
		 * for slower queues.
		 */
		if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
			qdisc = q->queues[prio];
			skb = qdisc->dequeue(qdisc);
			if (skb) {
				sch->q.qlen--;
				return skb;
			}
		}
	}
	return NULL;

}
コード例 #5
0
static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
{
	struct multiq_sched_data *q = qdisc_priv(sch);
	struct Qdisc *qdisc;
	struct sk_buff *skb;
	int band;

	for (band = 0; band < q->bands; band++) {
		/* cycle through bands to ensure fairness */
		q->curband++;
		if (q->curband >= q->bands)
			q->curband = 0;

		/* Check that target subqueue is available before
		 * pulling an skb to avoid head-of-line blocking.
		 */
		if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
			qdisc = q->queues[q->curband];
			skb = qdisc->dequeue(qdisc);
			if (skb) {
				sch->q.qlen--;
				return skb;
			}
		}
	}
	return NULL;

}
コード例 #6
0
static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
{
	struct multiq_sched_data *q = qdisc_priv(sch);
	struct Qdisc *qdisc;
	struct sk_buff *skb;
	int band;

	for (band = 0; band < q->bands; band++) {
		
		q->curband++;
		if (q->curband >= q->bands)
			q->curband = 0;

		
		if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
			qdisc = q->queues[q->curband];
			skb = qdisc->dequeue(qdisc);
			if (skb) {
				sch->q.qlen--;
				return skb;
			}
		}
	}
	return NULL;

}
コード例 #7
0
static struct sk_buff *multiq_peek(struct Qdisc *sch)
{
	struct multiq_sched_data *q = qdisc_priv(sch);
	unsigned int curband = q->curband;
	struct Qdisc *qdisc;
	struct sk_buff *skb;
	int band;

	for (band = 0; band < q->bands; band++) {
		
		curband++;
		if (curband >= q->bands)
			curband = 0;

		
		if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
			qdisc = q->queues[curband];
			skb = qdisc->ops->peek(qdisc);
			if (skb)
				return skb;
		}
	}
	return NULL;

}
コード例 #8
0
void iwm_tx_worker(struct work_struct *work)
{
	struct iwm_priv *iwm;
	struct iwm_tx_info *tx_info = NULL;
	struct sk_buff *skb;
	int cmdlen, ret;
	struct iwm_tx_queue *txq;
	int pool_id;

	txq = container_of(work, struct iwm_tx_queue, worker);
	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);

	pool_id = queue_to_pool_id(txq->id);

	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	       !skb_queue_empty(&txq->queue)) {

		skb = skb_dequeue(&txq->queue);
		tx_info = skb_to_tx_info(skb);
		cmdlen = IWM_UDMA_HDR_LEN + skb->len;

		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
			   tx_info->color);

#if !CONFIG_IWM_TX_CONCATENATED
		
		ret = iwm_send_packet(iwm, skb, pool_id);
#else

		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
			iwm_tx_send_concat_packets(iwm, txq);

		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
		if (ret) {
			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
				   "%d, Tx worker stopped\n", txq->id);
			skb_queue_head(&txq->queue, skb);
			break;
		}

		txq->concat_ptr = txq->concat_buf + txq->concat_count;
		iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
		txq->concat_count += ALIGN(cmdlen, 16);
#endif
		kfree_skb(skb);
	}

	iwm_tx_send_concat_packets(iwm, txq);

	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
	}
}
コード例 #9
0
ファイル: xmit_linux.c プロジェクト: vampirefo/rtl8723bs
void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt)
{
	u16	queue;
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;

	queue = skb_get_queue_mapping(pkt);
	if (padapter->registrypriv.wifi_spec) {
		if(__netif_subqueue_stopped(padapter->pnetdev, queue) &&
			(pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
		{
			netif_wake_subqueue(padapter->pnetdev, queue);
		}
	} else {
		if(__netif_subqueue_stopped(padapter->pnetdev, queue))
			netif_wake_subqueue(padapter->pnetdev, queue);
	}

	rtw_skb_free(pkt);
}
コード例 #10
0
ファイル: xmit_linux.c プロジェクト: Adri9102/rtl8188eu
void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
	u16	queue;
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;

	queue = skb_get_queue_mapping(pkt);
	if (padapter->registrypriv.wifi_spec) {
		if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
		    (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
			netif_wake_subqueue(padapter->pnetdev, queue);
	} else {
		if (__netif_subqueue_stopped(padapter->pnetdev, queue))
			netif_wake_subqueue(padapter->pnetdev, queue);
	}
#else
	if (netif_queue_stopped(padapter->pnetdev))
		netif_wake_queue(padapter->pnetdev);
#endif

	dev_kfree_skb_any(pkt);
}
コード例 #11
0
void dump_os_queue(void *sel, _adapter *padapter)
{
	struct net_device *ndev = padapter->pnetdev;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
	int i;

	for (i=0;i<4;i++) {
		DBG_871X_SEL_NL(sel, "os_queue[%d]:%s\n"
			, i, __netif_subqueue_stopped(ndev, i)?"stopped":"waked");
	}
#else
	DBG_871X_SEL_NL(sel, "os_queue:%s\n"
			, netif_queue_stopped(ndev)?"stopped":"waked");
#endif
}
コード例 #12
0
ファイル: xmit_linux.c プロジェクト: 1nfused/RemoteAccess
void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
	u16	queue;
	struct xmit_priv *pxmitpriv = &padapter->xmitpriv;

	queue = skb_get_queue_mapping(pkt);
	if(__netif_subqueue_stopped(padapter->pnetdev, queue) &&
		(pxmitpriv->hwxmits[queue].accnt < NR_XMITFRAME/2))
	{
		netif_wake_subqueue(padapter->pnetdev, queue);
	}
#else
	if (netif_queue_stopped(padapter->pnetdev))
		netif_wake_queue(padapter->pnetdev);
#endif

	rtw_skb_free(pkt);
}
コード例 #13
0
ファイル: lio_core.c プロジェクト: AlexShiLucky/linux
/* Runs in interrupt context. */
static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
{
	struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
	struct net_device *netdev;
	struct lio *lio;

	netdev = oct->props[iq->ifidx].netdev;

	/* This is needed because the first IQ does not have
	 * a netdev associated with it.
	 */
	if (!netdev)
		return;

	lio = GET_LIO(netdev);
	if (__netif_subqueue_stopped(netdev, iq->q_index) &&
	    lio->linfo.link.s.link_up &&
	    (!octnet_iq_is_full(oct, iq_num))) {
		netif_wake_subqueue(netdev, iq->q_index);
		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
					  tx_restart, 1);
	}
}
コード例 #14
0
void iwm_tx_worker(struct work_struct *work)
{
	struct iwm_priv *iwm;
	struct iwm_tx_info *tx_info = NULL;
	struct sk_buff *skb;
	struct iwm_tx_queue *txq;
	struct iwm_sta_info *sta_info;
	struct iwm_tid_info *tid_info;
	int cmdlen, ret, pool_id;

	txq = container_of(work, struct iwm_tx_queue, worker);
	iwm = container_of(txq, struct iwm_priv, txq[txq->id]);

	pool_id = queue_to_pool_id(txq->id);

	while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	       !skb_queue_empty(&txq->queue)) {

		spin_lock_bh(&txq->lock);
		skb = skb_dequeue(&txq->queue);
		spin_unlock_bh(&txq->lock);

		tx_info = skb_to_tx_info(skb);
		sta_info = &iwm->sta_table[tx_info->sta];
		if (!sta_info->valid) {
			IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
			kfree_skb(skb);
			continue;
		}

		tid_info = &sta_info->tid_info[tx_info->tid];

		mutex_lock(&tid_info->mutex);

		if (tid_info->stopped) {
			IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
				   tx_info->sta, tx_info->tid);
			spin_lock_bh(&txq->lock);
			skb_queue_tail(&txq->stopped_queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			continue;
		}

		cmdlen = IWM_UDMA_HDR_LEN + skb->len;

		IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
			   "%d, color: %d\n", txq->id, skb, tx_info->sta,
			   tx_info->color);

		if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
			iwm_tx_send_concat_packets(iwm, txq);

		ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
		if (ret) {
			IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
				   "%d, Tx worker stopped\n", txq->id);
			spin_lock_bh(&txq->lock);
			skb_queue_head(&txq->queue, skb);
			spin_unlock_bh(&txq->lock);

			mutex_unlock(&tid_info->mutex);
			break;
		}

		txq->concat_ptr = txq->concat_buf + txq->concat_count;
		tid_info->last_seq_num =
			iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
		txq->concat_count += ALIGN(cmdlen, 16);

		mutex_unlock(&tid_info->mutex);

		kfree_skb(skb);
	}

	iwm_tx_send_concat_packets(iwm, txq);

	if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
	    !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
	    (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
		IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
		netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
	}
}