static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct pie_sched_data *q = qdisc_priv(sch); bool enqueue = false; if (unlikely(qdisc_qlen(sch) >= sch->limit)) { q->stats.overlimit++; goto out; } if (!drop_early(sch, &q->params, &q->vars, skb->len)) { enqueue = true; } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && INET_ECN_set_ce(skb)) { /* If packet is ecn capable, mark it if drop probability * is lower than 10%, else drop it. */ q->stats.ecn_mark++; enqueue = true; } /* we can enqueue the packet */ if (enqueue) { q->stats.packets_in++; if (qdisc_qlen(sch) > q->stats.maxq) q->stats.maxq = qdisc_qlen(sch); return qdisc_enqueue_tail(skb, sch); } out: q->stats.dropped++; return qdisc_drop(skb, sch); }
/* * Transmit one skb, and handle the return status as required. Holding the * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this * function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock) { int ret = NETDEV_TX_BUSY; /* And release qdisc */ spin_unlock(root_lock); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); #ifdef CONFIG_MTK_NET_LOGGING if(ret != NETDEV_TX_OK ){ if(qdisc_qlen(q) < 16){ if(4 == (qdisc_qlen(q)) % 16) printk(KERN_INFO "[mtk_net][sched]dev_hard_start_xmit ret = %d(%s), txq state = %lu\n", ret, dev->name, txq->state); } else { if(64 == (qdisc_qlen(q)) % 128) printk(KERN_INFO "[mtk_net][sched]warning: dev_hard_start_xmit ret = %d(%s), txq state = %lu\n", ret, dev->name, txq->state); } } #endif spin_lock(root_lock); if (dev_xmit_complete(ret)) { /* Driver sent out skb successfully or skb was consumed */ ret = qdisc_qlen(q); } else if (ret == NETDEV_TX_LOCKED) { /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely(ret != NETDEV_TX_BUSY)) net_warn_ratelimited("BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } if (ret && netif_xmit_frozen_or_stopped(txq)) ret = 0; return ret; }
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct codel_sched_data *q; if (likely(qdisc_qlen(sch) < sch->limit)) { if(qdisc_qlen(sch) > 128) skb = skb_reduce_truesize(skb); codel_set_enqueue_time(skb); return qdisc_enqueue_tail(skb, sch); } q = qdisc_priv(sch); q->drop_overlimit++; return qdisc_drop(skb, sch); }
static inline int handle_dev_cpu_collision(struct sk_buff *skb, struct netdev_queue *dev_queue, struct Qdisc *q) { int ret; if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { /* * Same CPU holding the lock. It may be a transient * configuration error, when hard_start_xmit() recurses. We * detect it by checking xmit owner and drop the packet when * deadloop is detected. Return OK to try the next skb. */ kfree_skb(skb); if (net_ratelimit()) printk(KERN_WARNING "Dead loop on netdevice %s, " "fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); } else { /* * Another cpu is holding lock, requeue & delay xmits for * some time. */ __get_cpu_var(netdev_rx_stat).cpu_collision++; ret = dev_requeue_skb(skb, q); } return ret; }
static int generic_qdisc_enqueue(struct mbuf *m, struct Qdisc *qdisc) { struct nm_generic_qdisc *priv = qdisc_priv(qdisc); if (unlikely(qdisc_qlen(qdisc) >= priv->limit)) { RD(5, "dropping mbuf"); return qdisc_drop(m, qdisc); /* or qdisc_reshape_fail() ? */ } ND(5, "Enqueuing mbuf, len %u", qdisc_qlen(qdisc)); return qdisc_enqueue_tail(m, qdisc); }
/* * NOTE: Called under dev->queue_lock with locally disabled BH. * * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this * device at a time. dev->queue_lock serializes queue accesses for * this device AND dev->qdisc pointer itself. * * netif_tx_lock serializes accesses to device driver. * * dev->queue_lock and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; unsigned lockless; int ret; /* Dequeue packet */ if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) return 0; /* * When the driver has LLTX set, it does its own locking in * start_xmit. These checks are worth it because even uncongested * locks can be quite expensive. The driver can do a trylock, as * is being done here; in case of lock contention it should return * NETDEV_TX_LOCKED and the packet will be requeued. */ lockless = (dev->features & NETIF_F_LLTX); if (!lockless && !netif_tx_trylock(dev)) { /* Another CPU grabbed the driver tx lock */ return handle_dev_cpu_collision(skb, dev, q); } /* And release queue */ spin_unlock(&dev->queue_lock); ret = dev_hard_start_xmit(skb, dev); if (!lockless) netif_tx_unlock(dev); spin_lock(&dev->queue_lock); q = dev->qdisc; switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, dev, q); break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, dev, q); break; } return ret; }
/* * NOTE: Called under qdisc_lock(q) with locally disabled BH. * * __QDISC_STATE_RUNNING guarantees only one CPU can process * this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this queue. * * netif_tx_lock serializes accesses to device driver. * * qdisc_lock(q) and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline int qdisc_restart(struct Qdisc *q) { struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; struct net_device *dev; spinlock_t *root_lock; struct sk_buff *skb; /* Dequeue packet */ //从队列上取下一个要发送的数据包. if (unlikely((skb = dequeue_skb(q)) == NULL)) return 0; root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); HARD_TX_LOCK(dev, txq, smp_processor_id()); //调用dev_hard_start_xmit吧数据包通过实际的链路发送出去... if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); break; } if (ret && (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) ret = 0; return ret; }
/* * Transmit possibly several skbs, and handle the return status as * required. Holding the __QDISC___STATE_RUNNING bit guarantees that * only one CPU can execute this function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; /* And release qdisc */ spin_unlock(root_lock); /* Note that we validate skb (GSO, checksum, ...) outside of locks */ if (validate) skb = validate_xmit_skb_list(skb, dev); if (likely(skb)) { HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) skb = dev_hard_start_xmit(skb, dev, txq, &ret); HARD_TX_UNLOCK(dev, txq); } else { spin_lock(root_lock); return qdisc_qlen(q); } spin_lock(root_lock); if (dev_xmit_complete(ret)) { /* Driver sent out skb successfully or skb was consumed */ ret = qdisc_qlen(q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely(ret != NETDEV_TX_BUSY)) net_warn_ratelimited("BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } if (ret && netif_xmit_frozen_or_stopped(txq)) ret = 0; return ret; }
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct codel_sched_data *q; q = qdisc_priv(sch); if (likely(qdisc_qlen(sch) < q->limit)) { codel_set_enqueue_time(skb); return qdisc_enqueue_tail(skb, sch); } q->drop_overlimit++; return qdisc_drop(skb, sch); }
/* * Transmit one skb, and handle the return status as required. Holding the * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this * function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock) { int ret = NETDEV_TX_BUSY; /* And release qdisc */ spin_unlock(root_lock); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); break; } if (ret && (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) ret = 0; return ret; }
/* * Transmit one skb, and handle the return status as required. Holding the * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this * function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. */ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, struct net_device *dev, struct netdev_queue *txq, spinlock_t *root_lock) { int ret = NETDEV_TX_BUSY; if (IS_ERR(skb) || (!skb)) return 0; if (IS_ERR(dev) || (!dev)) return 0; /* And release qdisc */ spin_unlock(root_lock); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); if (dev_xmit_complete(ret)) { /* Driver sent out skb successfully or skb was consumed */ ret = qdisc_qlen(q); } else if (ret == NETDEV_TX_LOCKED) { /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); } else { /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) pr_warning("BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, q); } if (ret && netif_xmit_frozen_or_stopped(txq)) ret = 0; return ret; }
static inline int handle_dev_cpu_collision(struct sk_buff *skb, struct netdev_queue *dev_queue, struct Qdisc *q) { int ret; if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { kfree_skb(skb); if (net_ratelimit()) printk(KERN_WARNING "Dead loop on netdevice %s, " "fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); } else { __get_cpu_var(netdev_rx_stat).cpu_collision++; ret = dev_requeue_skb(skb, q); } return ret; }
static struct mbuf * generic_qdisc_dequeue(struct Qdisc *qdisc) { struct mbuf *m = qdisc_dequeue_head(qdisc); if (!m) { return NULL; } if (unlikely(m->priority == NM_MAGIC_PRIORITY_TXQE)) { /* nm_os_generic_xmit_frame() asked us an event on this mbuf. * We have to set the priority to the normal TX token, so that * generic_ndo_start_xmit can pass it to the driver. */ m->priority = NM_MAGIC_PRIORITY_TX; ND(5, "Event met, notify %p", m); netmap_generic_irq(NA(qdisc_dev(qdisc)), skb_get_queue_mapping(m), NULL); } ND(5, "Dequeuing mbuf, len %u", qdisc_qlen(qdisc)); return m; }
static struct mbuf * generic_qdisc_peek(struct Qdisc *qdisc) { RD(5, "Peeking queue, curr len %u", qdisc_qlen(qdisc)); return skb_peek(&qdisc->q); }
/* * NOTE: Called under qdisc_lock(q) with locally disabled BH. * * __QDISC_STATE_RUNNING guarantees only one CPU can process * this qdisc at a time. qdisc_lock(q) serializes queue accesses for * this queue. * * netif_tx_lock serializes accesses to device driver. * * qdisc_lock(q) and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline int qdisc_restart(struct Qdisc *q) { struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; struct net_device *dev; spinlock_t *root_lock; struct sk_buff *skb; /* Dequeue packet */ if (unlikely((skb = dequeue_skb(q)) == NULL)) return 0; root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); dev = qdisc_dev(q); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); spin_lock(root_lock); switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, txq, q); break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); //[ENODEV] No such device. An attempt was made to apply an inappropriate function to a device if (ret == -ENODEV) { printk(KERN_EMERG "%s: STOP QUEUE. Reason = %d (-ENODEV) (net\\sched\\sch_generic.c 170)\n", dev->name, ret); if (strnicmp((char *)&dev->name, "eth0", 4) == 0) { //Stop upper layers calling the device hard_start_xmit routine. //Used for flow control when transmit resources are unavailable. ret = dev_requeue_skb(skb, q); netif_stop_queue(dev); } } else ret = dev_requeue_skb(skb, q); break; } if (ret && (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) ret = 0; return ret; }