void dev_deactivate(struct net_device *dev) { struct Qdisc *qdisc; struct sk_buff *skb; spin_lock_bh(&dev->queue_lock); qdisc = dev->qdisc; dev->qdisc = &noop_qdisc; qdisc_reset(qdisc); skb = dev->gso_skb; dev->gso_skb = NULL; spin_unlock_bh(&dev->queue_lock); kfree_skb(skb); dev_watchdog_down(dev); /* Wait for outstanding dev_queue_xmit calls. */ synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) yield(); }
void dev_deactivate(struct net_device *dev) { netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc); dev_watchdog_down(dev); /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ while (some_qdisc_is_busy(dev)) yield(); }
/** * dev_deactivate_many - deactivate transmissions on several devices * @head: list of devices to deactivate * * This function returns only when all outstanding transmissions * have completed, unless all devices are in dismantle phase. */ void dev_deactivate_many(struct list_head *head) { struct net_device *dev; bool sync_needed = false; list_for_each_entry(dev, head, close_list) { netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); if (dev_ingress_queue(dev)) dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc); dev_watchdog_down(dev); sync_needed |= !dev->dismantle; }
void dev_deactivate(struct net_device *dev) { struct Qdisc *qdisc; struct sk_buff *skb; int running; spin_lock_bh(&dev->queue_lock); qdisc = dev->qdisc; dev->qdisc = &noop_qdisc; qdisc_reset(qdisc); skb = dev->gso_skb; dev->gso_skb = NULL; spin_unlock_bh(&dev->queue_lock); kfree_skb(skb); dev_watchdog_down(dev); /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ synchronize_rcu(); /* Wait for outstanding qdisc_run calls. */ do { while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) yield(); /* * Double-check inside queue lock to ensure that all effects * of the queue run are visible when we return. */ spin_lock_bh(&dev->queue_lock); running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); spin_unlock_bh(&dev->queue_lock); /* * The running flag should never be set at this point because * we've already set dev->qdisc to noop_qdisc *inside* the same * pair of spin locks. That is, if any qdisc_run starts after * our initial test it should see the noop_qdisc and then * clear the RUNNING bit before dropping the queue lock. So * if it is set here then we've found a bug. */ } while (WARN_ON_ONCE(running)); }
void dev_deactivate(struct net_device *dev) { struct Qdisc *qdisc; spin_lock_bh(&dev->queue_lock); qdisc = dev->qdisc; dev->qdisc = &noop_qdisc; qdisc_reset(qdisc); spin_unlock_bh(&dev->queue_lock); dev_watchdog_down(dev); while (test_bit(__LINK_STATE_SCHED, &dev->state)) yield(); spin_unlock_wait(&dev->xmit_lock); }