Пример #1
0
static void ri_tasklet(unsigned long dev) 
{

	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct net_device_stats *stats = &dp->stats;
	struct sk_buff *skb;

	dp->st_task_enter++;
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		dp->st_txq_refl_try++;
		if (spin_trylock(&_dev->xmit_lock)) {
			dp->st_rxq_enter++;
			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
				skb_queue_tail(&dp->tq, skb);
				dp->st_rx2tx_tran++;
			}
			spin_unlock(&_dev->xmit_lock);
		} else {
			/* reschedule */
			dp->st_rxq_notenter++;
			goto resched;
		}
	}

	while ((skb = skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
		stats->tx_packets++;
		stats->tx_bytes +=skb->len;
		if (from & AT_EGRESS) {
			dp->st_rx_frm_egr++;
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {

			dp->st_rx_frm_ing++;
			netif_rx(skb);
		} else {
			dev_kfree_skb(skb);
			stats->tx_dropped++;
		}
	}

	if (spin_trylock(&_dev->xmit_lock)) {
		dp->st_rxq_check++;
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			dp->st_rxq_rsch++;
			spin_unlock(&_dev->xmit_lock);
			goto resched;
		}
		spin_unlock(&_dev->xmit_lock);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
static void ri_tasklet(unsigned long dev)
{

	struct net_device *_dev = (struct net_device *)dev;
	struct ifb_private *dp = netdev_priv(_dev);
	struct net_device_stats *stats = &_dev->stats;
	struct netdev_queue *txq;
	struct sk_buff *skb;

	txq = netdev_get_tx_queue(_dev, 0);
	if ((skb = skb_peek(&dp->tq)) == NULL) {
		if (__netif_tx_trylock(txq)) {
			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
			__netif_tx_unlock(txq);
		} else {
			/* reschedule */
			goto resched;
		}
	}

	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
		u32 from = G_TC_FROM(skb->tc_verd);

		skb->tc_verd = 0;
		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
		stats->tx_packets++;
		stats->tx_bytes +=skb->len;

		rcu_read_lock();
		skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
		if (!skb->dev) {
			rcu_read_unlock();
			dev_kfree_skb(skb);
			stats->tx_dropped++;
			if (skb_queue_len(&dp->tq) != 0)
				goto resched;
			break;
		}
		rcu_read_unlock();
		skb->skb_iif = _dev->ifindex;

		if (from & AT_EGRESS) {
			dev_queue_xmit(skb);
		} else if (from & AT_INGRESS) {
			skb_pull(skb, skb->dev->hard_header_len);
			netif_receive_skb(skb);
		} else
			BUG();
	}

	if (__netif_tx_trylock(txq)) {
		if ((skb = skb_peek(&dp->rq)) == NULL) {
			dp->tasklet_pending = 0;
			if (netif_queue_stopped(_dev))
				netif_wake_queue(_dev);
		} else {
			__netif_tx_unlock(txq);
			goto resched;
		}
		__netif_tx_unlock(txq);
	} else {
resched:
		dp->tasklet_pending = 1;
		tasklet_schedule(&dp->ifb_tasklet);
	}

}
Пример #3
0
static void ri_tasklet(unsigned long dev)
{

    struct net_device *_dev = (struct net_device *)dev;
    struct ifb_private *dp = netdev_priv(_dev);
    struct net_device_stats *stats = &_dev->stats;
    struct netdev_queue *txq;
    struct sk_buff *skb;

    txq = netdev_get_tx_queue(_dev, 0);
    dp->st_task_enter++;
    if ((skb = skb_peek(&dp->tq)) == NULL) {
        dp->st_txq_refl_try++;
        if (__netif_tx_trylock(txq)) {
            dp->st_rxq_enter++;
            while ((skb = skb_dequeue(&dp->rq)) != NULL) {
                skb_queue_tail(&dp->tq, skb);
                dp->st_rx2tx_tran++;
            }
            __netif_tx_unlock(txq);
        } else {
            /* reschedule */
            dp->st_rxq_notenter++;
            goto resched;
        }
    }

    while ((skb = skb_dequeue(&dp->tq)) != NULL) {
        u32 from = G_TC_FROM(skb->tc_verd);

        skb->tc_verd = 0;
        skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
        stats->tx_packets++;
        stats->tx_bytes +=skb->len;

        skb->dev = dev_get_by_index(&init_net, skb->iif);
        if (!skb->dev) {
            dev_kfree_skb(skb);
            stats->tx_dropped++;
            break;
        }
        dev_put(skb->dev);
        skb->iif = _dev->ifindex;

        if (from & AT_EGRESS) {
            dp->st_rx_frm_egr++;
            dev_queue_xmit(skb);
        } else if (from & AT_INGRESS) {
            dp->st_rx_frm_ing++;
            skb_pull(skb, skb->dev->hard_header_len);
            netif_rx(skb);
        } else
            BUG();
    }

    if (__netif_tx_trylock(txq)) {
        dp->st_rxq_check++;
        if ((skb = skb_peek(&dp->rq)) == NULL) {
            dp->tasklet_pending = 0;
            if (netif_queue_stopped(_dev))
                netif_wake_queue(_dev);
        } else {
            dp->st_rxq_rsch++;
            __netif_tx_unlock(txq);
            goto resched;
        }
        __netif_tx_unlock(txq);
    } else {
resched:
        dp->tasklet_pending = 1;
        tasklet_schedule(&dp->ifb_tasklet);
    }

}