static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; u32 from = G_TC_FROM(skb->tc_verd); stats->rx_packets++; stats->rx_bytes+=skb->len; if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { dev_kfree_skb(skb); stats->rx_dropped++; return NETDEV_TX_OK; } if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { netif_stop_queue(dev); } __skb_queue_tail(&dp->rq, skb); if (!dp->tasklet_pending) { dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } return NETDEV_TX_OK; }
static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (qdisc_is_throttled(sch)) return NULL; tfifo_dequeue: skb = qdisc_peek_head(sch); if (skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); /* if more time remaining? */ if (cb->time_to_send <= psched_get_time()) { __skb_unlink(skb, &sch->q); sch->qstats.backlog -= qdisc_pkt_len(skb); #ifdef CONFIG_NET_CLS_ACT /* * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) skb->tstamp.tv64 = 0; #endif if (q->qdisc) { int err = qdisc_enqueue(skb, q->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { sch->qstats.drops++; qdisc_tree_decrease_qlen(sch, 1); } } goto tfifo_dequeue; } deliver: qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); if (skb) goto deliver; } qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); if (skb) goto deliver; } return NULL; }
static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); struct net_device_stats *stats = &dp->stats; int ret = 0; u32 from = G_TC_FROM(skb->tc_verd); stats->tx_packets++; stats->tx_bytes+=skb->len; if (!from || !skb->input_dev) { dropped: dev_kfree_skb(skb); stats->rx_dropped++; return ret; } else { /* * note we could be going * ingress -> egress or * egress -> ingress */ skb->dev = skb->input_dev; skb->input_dev = dev; if (from & AT_INGRESS) { skb_pull(skb, skb->dev->hard_header_len); } else { if (!(from & AT_EGRESS)) { goto dropped; } } } if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { netif_stop_queue(dev); } dev->trans_start = jiffies; skb_queue_tail(&dp->rq, skb); if (!dp->tasklet_pending) { dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } return ret; }
static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (qdisc_is_throttled(sch)) return NULL; skb = q->qdisc->ops->peek(q->qdisc); if (skb) { const struct netem_skb_cb *cb = netem_skb_cb(skb); psched_time_t now = psched_get_time(); /* if more time remaining? */ if (cb->time_to_send <= now) { skb = qdisc_dequeue_peeked(q->qdisc); if (unlikely(!skb)) return NULL; #ifdef CONFIG_NET_CLS_ACT /* * If it's at ingress let's pretend the delay is * from the network (tstamp will be updated). */ if (G_TC_FROM(skb->tc_verd) & AT_INGRESS) skb->tstamp.tv64 = 0; #endif sch->q.qlen--; qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send); } return NULL; }
static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &_dev->stats; struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(_dev, 0); if ((skb = skb_peek(&dp->tq)) == NULL) { if (__netif_tx_trylock(txq)) { skb_queue_splice_tail_init(&dp->rq, &dp->tq); __netif_tx_unlock(txq); } else { /* reschedule */ goto resched; } } while ((skb = __skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); stats->tx_packets++; stats->tx_bytes +=skb->len; rcu_read_lock(); skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); stats->tx_dropped++; if (skb_queue_len(&dp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = _dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); } else if (from & AT_INGRESS) { skb_pull(skb, skb->dev->hard_header_len); netif_receive_skb(skb); } else BUG(); } if (__netif_tx_trylock(txq)) { if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } }
static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &dp->stats; struct sk_buff *skb; dp->st_task_enter++; if ((skb = skb_peek(&dp->tq)) == NULL) { dp->st_txq_refl_try++; if (spin_trylock(&_dev->xmit_lock)) { dp->st_rxq_enter++; while ((skb = skb_dequeue(&dp->rq)) != NULL) { skb_queue_tail(&dp->tq, skb); dp->st_rx2tx_tran++; } spin_unlock(&_dev->xmit_lock); } else { /* reschedule */ dp->st_rxq_notenter++; goto resched; } } while ((skb = skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); stats->tx_packets++; stats->tx_bytes +=skb->len; if (from & AT_EGRESS) { dp->st_rx_frm_egr++; dev_queue_xmit(skb); } else if (from & AT_INGRESS) { dp->st_rx_frm_ing++; netif_rx(skb); } else { dev_kfree_skb(skb); stats->tx_dropped++; } } if (spin_trylock(&_dev->xmit_lock)) { dp->st_rxq_check++; if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { dp->st_rxq_rsch++; spin_unlock(&_dev->xmit_lock); goto resched; } spin_unlock(&_dev->xmit_lock); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } }
static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &_dev->stats; struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(_dev, 0); dp->st_task_enter++; if ((skb = skb_peek(&dp->tq)) == NULL) { dp->st_txq_refl_try++; if (__netif_tx_trylock(txq)) { dp->st_rxq_enter++; while ((skb = skb_dequeue(&dp->rq)) != NULL) { skb_queue_tail(&dp->tq, skb); dp->st_rx2tx_tran++; } __netif_tx_unlock(txq); } else { /* reschedule */ dp->st_rxq_notenter++; goto resched; } } while ((skb = skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); stats->tx_packets++; stats->tx_bytes +=skb->len; skb->dev = dev_get_by_index(&init_net, skb->iif); if (!skb->dev) { dev_kfree_skb(skb); stats->tx_dropped++; break; } dev_put(skb->dev); skb->iif = _dev->ifindex; if (from & AT_EGRESS) { dp->st_rx_frm_egr++; dev_queue_xmit(skb); } else if (from & AT_INGRESS) { dp->st_rx_frm_ing++; skb_pull(skb, skb->dev->hard_header_len); netif_rx(skb); } else BUG(); } if (__netif_tx_trylock(txq)) { dp->st_rxq_check++; if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { dp->st_rxq_rsch++; __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } }