/* * NOTE: Called under dev->queue_lock with locally disabled BH. * * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this * device at a time. dev->queue_lock serializes queue accesses for * this device AND dev->qdisc pointer itself. * * netif_tx_lock serializes accesses to device driver. * * dev->queue_lock and netif_tx_lock are mutually exclusive, * if one is grabbed, another must be free. * * Note, that this procedure can be called by a watchdog timer * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. * */ static inline int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; unsigned lockless; int ret; /* Dequeue packet */ if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) return 0; /* * When the driver has LLTX set, it does its own locking in * start_xmit. These checks are worth it because even uncongested * locks can be quite expensive. The driver can do a trylock, as * is being done here; in case of lock contention it should return * NETDEV_TX_LOCKED and the packet will be requeued. */ lockless = (dev->features & NETIF_F_LLTX); if (!lockless && !netif_tx_trylock(dev)) { /* Another CPU grabbed the driver tx lock */ return handle_dev_cpu_collision(skb, dev, q); } /* And release queue */ spin_unlock(&dev->queue_lock); ret = dev_hard_start_xmit(skb, dev); if (!lockless) netif_tx_unlock(dev); spin_lock(&dev->queue_lock); q = dev->qdisc; switch (ret) { case NETDEV_TX_OK: /* Driver sent out skb successfully */ ret = qdisc_qlen(q); break; case NETDEV_TX_LOCKED: /* Driver try lock failed */ ret = handle_dev_cpu_collision(skb, dev, q); break; default: /* Driver returned NETDEV_TX_BUSY - requeue skb */ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) printk(KERN_WARNING "BUG %s code %d qlen %d\n", dev->name, ret, q->q.qlen); ret = dev_requeue_skb(skb, dev, q); break; } return ret; }
static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { int status = NETDEV_TX_BUSY; unsigned long tries; struct net_device *dev = np->dev; struct netpoll_info *npinfo = np->dev->npinfo; if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { __kfree_skb(skb); return; } /* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && npinfo->poll_owner != smp_processor_id()) { unsigned long flags; local_irq_save(flags); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (netif_tx_trylock(dev)) { if (!netif_queue_stopped(dev)) status = dev->hard_start_xmit(skb, dev); netif_tx_unlock(dev); if (status == NETDEV_TX_OK) break; } /* tickle device maybe there is some cleanup */ netpoll_poll(np); udelay(USEC_PER_POLL); } local_irq_restore(flags); } if (status != NETDEV_TX_OK) { skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } }
/* Kick device. Returns: 0 - queue is empty or throttled. >0 - queue is not empty. NOTE: Called under dev->queue_lock with locally disabled BH. */ static inline int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; /* Dequeue packet */ if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) { unsigned nolock = (dev->features & NETIF_F_LLTX); dev->gso_skb = NULL; /* * When the driver has LLTX set it does its own locking * in start_xmit. No need to add additional overhead by * locking again. These checks are worth it because * even uncongested locks can be quite expensive. * The driver can do trylock like here too, in case * of lock congestion it should return -1 and the packet * will be requeued. */ if (!nolock) { if (!netif_tx_trylock(dev)) { collision: /* So, someone grabbed the driver. */ /* It may be transient configuration error, when hard_start_xmit() recurses. We detect it by checking xmit owner and drop the packet when deadloop is detected. */ if (dev->xmit_lock_owner == smp_processor_id()) { kfree_skb(skb); if (net_ratelimit()) printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); goto out; } __get_cpu_var(netdev_rx_stat).cpu_collision++; goto requeue; } } { /* And release queue */ spin_unlock(&dev->queue_lock); if (!netif_queue_stopped(dev)) { int ret; ret = dev_hard_start_xmit(skb, dev); if (ret == NETDEV_TX_OK) { if (!nolock) { netif_tx_unlock(dev); } spin_lock(&dev->queue_lock); q = dev->qdisc; goto out; } if (ret == NETDEV_TX_LOCKED && nolock) { spin_lock(&dev->queue_lock); q = dev->qdisc; goto collision; } } /* NETDEV_TX_BUSY - we need to requeue */ /* Release the driver */ if (!nolock) { netif_tx_unlock(dev); } spin_lock(&dev->queue_lock); q = dev->qdisc; } /* Device kicked us out :( This is possible in three cases: 0. driver is locked 1. fastroute is enabled 2. device cannot determine busy state before start of transmission (f.e. dialout) 3. device is buggy (ppp) */ requeue: if (unlikely(q == &noop_qdisc)) kfree_skb(skb); else if (skb->next) dev->gso_skb = skb; else q->ops->requeue(skb, q); netif_schedule(dev); } return 0; out: BUG_ON((int) q->q.qlen < 0); return q->q.qlen; }
static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct net_device_stats *stats = &dp->stats; struct sk_buff *skb; dp->st_task_enter++; if ((skb = skb_peek(&dp->tq)) == NULL) { dp->st_txq_refl_try++; if (netif_tx_trylock(_dev)) { dp->st_rxq_enter++; while ((skb = skb_dequeue(&dp->rq)) != NULL) { skb_queue_tail(&dp->tq, skb); dp->st_rx2tx_tran++; } netif_tx_unlock(_dev); } else { /* reschedule */ dp->st_rxq_notenter++; goto resched; } } while ((skb = skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); stats->tx_packets++; stats->tx_bytes +=skb->len; skb->dev = __dev_get_by_index(skb->iif); if (!skb->dev) { dev_kfree_skb(skb); stats->tx_dropped++; break; } skb->iif = _dev->ifindex; if (from & AT_EGRESS) { dp->st_rx_frm_egr++; dev_queue_xmit(skb); } else if (from & AT_INGRESS) { dp->st_rx_frm_ing++; skb_pull(skb, skb->dev->hard_header_len); netif_rx(skb); } else BUG(); } if (netif_tx_trylock(_dev)) { dp->st_rxq_check++; if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { dp->st_rxq_rsch++; netif_tx_unlock(_dev); goto resched; } netif_tx_unlock(_dev); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } }