static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); struct tc_prio_qopt *qopt; int i; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) return -EINVAL; for (i = 0; i <= TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= qopt->bands) return -EINVAL; } sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; if (child != &noop_qdisc) { qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; if (old != &noop_qdisc) { qdisc_tree_decrease_qlen(old, old->q.qlen); qdisc_destroy(old); } sch_tree_unlock(sch); } } } return 0; }
static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); struct tc_multiq_qopt *qopt; int i; if (!netif_is_multiqueue(qdisc_dev(sch))) return -EOPNOTSUPP; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); qopt->bands = qdisc_dev(sch)->real_num_tx_queues; sch_tree_lock(sch); q->bands = qopt->bands; for (i = q->bands; i < q->max_bands; i++) { if (q->queues[i] != &noop_qdisc) { struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; if (child != &noop_qdisc) qdisc_hash_add(child, true); if (old != &noop_qdisc) { qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); qdisc_destroy(old); } sch_tree_unlock(sch); } } } return 0; }
static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); struct Qdisc *queues[TCQ_PRIO_BANDS]; int oldbands = q->bands, i; struct tc_prio_qopt *qopt; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) return -EINVAL; for (i = 0; i <= TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= qopt->bands) return -EINVAL; } /* Before commit, make sure we can allocate all new qdiscs */ for (i = oldbands; i < qopt->bands; i++) { queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (!queues[i]) { while (i > oldbands) qdisc_destroy(queues[--i]); return -ENOMEM; } } sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i = q->bands; i < oldbands; i++) { struct Qdisc *child = q->queues[i]; qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); qdisc_destroy(child); } for (i = oldbands; i < q->bands; i++) { q->queues[i] = queues[i]; if (q->queues[i] != &noop_qdisc) qdisc_hash_add(q->queues[i], true); } sch_tree_unlock(sch); return 0; }
static int netem_init(struct Qdisc *sch, struct rtattr *opt) { struct netem_sched_data *q = qdisc_priv(sch); int ret; if (!opt) return -EINVAL; skb_queue_head_init(&q->delayed); init_timer(&q->timer); q->timer.function = netem_watchdog; q->timer.data = (unsigned long) sch; q->counter = 0; q->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); if (!q->qdisc) { pr_debug("netem: qdisc create failed\n"); return -ENOMEM; } ret = netem_change(sch, opt); if (ret) { pr_debug("netem: change failed\n"); qdisc_destroy(q->qdisc); } return ret; }
static void tbf_destroy(struct Qdisc *sch) { struct tbf_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); qdisc_destroy(q->qdisc); }
static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit) { struct Qdisc *q; struct rtattr *rta; int ret; q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops, TC_H_MAKE(sch->handle, 1)); if (q) { rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); if (rta) { rta->rta_type = RTM_NEWQDISC; rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; ret = q->ops->change(q, rta); kfree(rta); if (ret == 0) return q; } qdisc_destroy(q); } return NULL; }
static void netem_destroy(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); del_timer_sync(&q->timer); qdisc_destroy(q->qdisc); kfree(q->delay_dist); }
void dev_shutdown(struct net_device *dev) { struct Qdisc *qdisc; qdisc_lock_tree(dev); qdisc = dev->qdisc_sleeping; dev->qdisc = &noop_qdisc; dev->qdisc_sleeping = &noop_qdisc; qdisc_destroy(qdisc); #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) if ((qdisc = dev->qdisc_ingress) != NULL) { dev->qdisc_ingress = NULL; qdisc_destroy(qdisc); } #endif BUG_TRAP(!timer_pending(&dev->watchdog_timer)); qdisc_unlock_tree(dev); }
static int prio_tune(struct Qdisc *sch, struct rtattr *opt) { struct prio_sched_data *q = (struct prio_sched_data *)sch->data; struct tc_prio_qopt *qopt = RTA_DATA(opt); int i; if (opt->rta_len < RTA_LENGTH(sizeof(*qopt))) return -EINVAL; if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) return -EINVAL; for (i=0; i<=TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= qopt->bands) return -EINVAL; } sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); if (child != &noop_qdisc) qdisc_destroy(child); } sch_tree_unlock(sch); for (i=0; i<=TC_PRIO_MAX; i++) { int band = q->prio2band[i]; if (q->queues[band] == &noop_qdisc) { struct Qdisc *child; child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); if (child) { sch_tree_lock(sch); child = xchg(&q->queues[band], child); if (child != &noop_qdisc) qdisc_destroy(child); sch_tree_unlock(sch); } } } return 0; }
static void prio_destroy(struct Qdisc *sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (prio = 0; prio < q->bands; prio++) qdisc_destroy(q->queues[prio]); }
static void prio_destroy(struct Qdisc* sch) { int prio; struct prio_sched_data *q = (struct prio_sched_data *)sch->data; for (prio=0; prio<q->bands; prio++) { qdisc_destroy(q->queues[prio]); q->queues[prio] = &noop_qdisc; } MOD_DEC_USE_COUNT; }
static void multiq_destroy(struct Qdisc *sch) { int band; struct multiq_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); kfree(q->queues); }
static void multiq_destroy(struct Qdisc *sch) { int band; struct multiq_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); kfree(q->queues); }
static void mq_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); unsigned int ntx; if (!priv->qdiscs) return; for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) qdisc_destroy(priv->qdiscs[ntx]); kfree(priv->qdiscs); }
static void tbf_destroy(struct Qdisc *sch) { struct tbf_sched_data *q = qdisc_priv(sch); del_timer(&q->wd_timer); if (q->P_tab) qdisc_put_rtab(q->P_tab); if (q->R_tab) qdisc_put_rtab(q->R_tab); qdisc_destroy(q->qdisc); }
void dev_shutdown(struct device *dev) { struct Qdisc *qdisc; start_bh_atomic(); qdisc = dev->qdisc_sleeping; dev->qdisc = &noop_qdisc; dev->qdisc_sleeping = &noop_qdisc; qdisc_destroy(qdisc); BUG_TRAP(dev->qdisc_list == NULL); dev->qdisc_list = NULL; end_bh_atomic(); }
static void shutdown_scheduler_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { struct Qdisc *qdisc = dev_queue->qdisc_sleeping; struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { rcu_assign_pointer(dev_queue->qdisc, qdisc_default); dev_queue->qdisc_sleeping = qdisc_default; qdisc_destroy(qdisc); } }
/* Release Qdisc resources */ static void wfq_destroy(struct Qdisc *sch) { struct wfq_sched_data *q = qdisc_priv(sch); int i; if (likely(q->queues)) { for (i = 0; i < wfq_max_queues && (q->queues[i]).qdisc; i++) qdisc_destroy((q->queues[i]).qdisc); } qdisc_watchdog_cancel(&q->watchdog); printk(KERN_INFO "destroy sch_wfq on %s\n", sch->dev_queue->dev->name); print_wfq_sched_data(sch); }
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) { struct Qdisc *sch; sch = qdisc_alloc(dev, ops); if (IS_ERR(sch)) goto errout; if (!ops->init || ops->init(sch, NULL) == 0) return sch; qdisc_destroy(sch); errout: return NULL; }
static void tbf_destroy(struct Qdisc *sch) { struct tbf_sched_data *q = qdisc_priv(sch); qdisc_watchdog_cancel(&q->watchdog); if (q->P_tab) qdisc_put_rtab(q->P_tab); if (q->R_tab) qdisc_put_rtab(q->R_tab); qdisc_destroy(q->qdisc); #if defined(CONFIG_ASF_EGRESS_SHAPER) || defined(CONFIG_ASF_HW_SHAPER) _tbf_del_hook(sch); #endif }
static void mq_attach(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); struct Qdisc *qdisc; unsigned int ntx; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = priv->qdiscs[ntx]; qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (qdisc) qdisc_destroy(qdisc); } kfree(priv->qdiscs); priv->qdiscs = NULL; }
static void mqprio_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); unsigned int ntx; if (priv->qdiscs) { for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) qdisc_destroy(priv->qdiscs[ntx]); kfree(priv->qdiscs); } netdev_set_num_tc(dev, 0); }
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch; sch = qdisc_alloc(dev, ops); if (IS_ERR(sch)) goto errout; sch->stats_lock = &dev->queue_lock; sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0) return sch; qdisc_destroy(sch); errout: return NULL; }
static void prio_destroy(struct Qdisc* sch) { int prio; struct prio_sched_data *q = (struct prio_sched_data *)sch->data; struct tcf_proto *tp; while ((tp = q->filter_list) != NULL) { q->filter_list = tp->next; tcf_destroy(tp); } for (prio=0; prio<q->bands; prio++) { qdisc_destroy(q->queues[prio]); q->queues[prio] = &noop_qdisc; } MOD_DEC_USE_COUNT; }
static void mq_attach(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mq_sched *priv = qdisc_priv(sch); struct Qdisc *qdisc, *old; unsigned int ntx; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { qdisc = priv->qdiscs[ntx]; old = dev_graft_qdisc(qdisc->dev_queue, qdisc); if (old) qdisc_destroy(old); #ifdef CONFIG_NET_SCHED if (ntx < dev->real_num_tx_queues) qdisc_list_add(qdisc); #endif } kfree(priv->qdiscs); priv->qdiscs = NULL; }
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, unsigned int parentid) { struct Qdisc *sch; if (!try_module_get(ops->owner)) goto errout; sch = qdisc_alloc(dev_queue, ops); if (IS_ERR(sch)) goto errout; sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0) return sch; qdisc_destroy(sch); errout: return NULL; }
static int nm_os_catch_qdisc(struct netmap_generic_adapter *gna, int intercept) { struct netmap_adapter *na = &gna->up.up; struct ifnet *ifp = netmap_generic_getifp(gna); struct nm_generic_qdisc *qdiscopt = NULL; struct Qdisc *fqdisc = NULL; struct nlattr *nla = NULL; struct netdev_queue *txq; unsigned int i; if (!gna->txqdisc) { return 0; } if (intercept) { nla = kmalloc(nla_attr_size(sizeof(*qdiscopt)), GFP_KERNEL); if (!nla) { D("Failed to allocate netlink attribute"); return ENOMEM; } nla->nla_type = RTM_NEWQDISC; nla->nla_len = nla_attr_size(sizeof(*qdiscopt)); qdiscopt = (struct nm_generic_qdisc *)nla_data(nla); memset(qdiscopt, 0, sizeof(*qdiscopt)); qdiscopt->limit = na->num_tx_desc; } if (ifp->flags & IFF_UP) { dev_deactivate(ifp); } /* Replace the current qdiscs with our own. */ for (i = 0; i < ifp->real_num_tx_queues; i++) { struct Qdisc *nqdisc = NULL; struct Qdisc *oqdisc; int err; txq = netdev_get_tx_queue(ifp, i); if (intercept) { /* This takes a refcount to netmap module, alloc the * qdisc and calls the init() op with NULL netlink * attribute. */ nqdisc = qdisc_create_dflt( #ifndef NETMAP_LINUX_QDISC_CREATE_DFLT_3ARGS ifp, #endif /* NETMAP_LINUX_QDISC_CREATE_DFLT_3ARGS */ txq, &generic_qdisc_ops, TC_H_UNSPEC); if (!nqdisc) { D("Failed to create qdisc"); goto qdisc_create; } fqdisc = fqdisc ?: nqdisc; /* Call the change() op passing a valid netlink * attribute. This is used to set the queue idx. */ qdiscopt->qidx = i; err = nqdisc->ops->change(nqdisc, nla); if (err) { D("Failed to init qdisc"); goto qdisc_create; } } oqdisc = dev_graft_qdisc(txq, nqdisc); /* We can call this also with * odisc == &noop_qdisc, since the noop * qdisc has the TCQ_F_BUILTIN flag set, * and so qdisc_destroy will skip it. */ qdisc_destroy(oqdisc); } kfree(nla); if (ifp->qdisc) { qdisc_destroy(ifp->qdisc); } if (intercept) { atomic_inc(&fqdisc->refcnt); ifp->qdisc = fqdisc; } else { ifp->qdisc = &noop_qdisc; } if (ifp->flags & IFF_UP) { dev_activate(ifp); } return 0; qdisc_create: if (nla) { kfree(nla); } nm_os_catch_qdisc(gna, 0); return -1; }
static int tbf_change(struct Qdisc* sch, struct rtattr *opt) { int err = -EINVAL; struct tbf_sched_data *q = qdisc_priv(sch); struct rtattr *tb[TCA_TBF_PTAB]; struct tc_tbf_qopt *qopt; struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; int max_size,n; if (rtattr_parse_nested(tb, TCA_TBF_PTAB, opt) || tb[TCA_TBF_PARMS-1] == NULL || RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt)) goto done; qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]); rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]); if (rtab == NULL) goto done; if (qopt->peakrate.rate) { if (qopt->peakrate.rate > qopt->rate.rate) ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]); if (ptab == NULL) goto done; } for (n = 0; n < 256; n++) if (rtab->data[n] > qopt->buffer) break; max_size = (n << qopt->rate.cell_log)-1; if (ptab) { int size; for (n = 0; n < 256; n++) if (ptab->data[n] > qopt->mtu) break; size = (n << qopt->peakrate.cell_log)-1; if (size < max_size) max_size = size; } if (max_size < 0) goto done; if (qopt->limit > 0) { if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL) goto done; } sch_tree_lock(sch); if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(xchg(&q->qdisc, child)); } q->limit = qopt->limit; q->mtu = qopt->mtu; q->max_size = max_size; q->buffer = qopt->buffer; q->tokens = q->buffer; q->ptokens = q->mtu; rtab = xchg(&q->R_tab, rtab); ptab = xchg(&q->P_tab, ptab); sch_tree_unlock(sch); err = 0; done: if (rtab) qdisc_put_rtab(rtab); if (ptab) qdisc_put_rtab(ptab); return err; }
static int tbf_change(struct Qdisc *sch, struct nlattr *opt) { int err; struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_TBF_PTAB + 1]; struct tc_tbf_qopt *qopt; struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; int max_size, n; err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); if (err < 0) return err; err = -EINVAL; if (tb[TCA_TBF_PARMS] == NULL) goto done; qopt = nla_data(tb[TCA_TBF_PARMS]); rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); if (rtab == NULL) goto done; if (qopt->peakrate.rate) { if (qopt->peakrate.rate > qopt->rate.rate) ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); if (ptab == NULL) goto done; } for (n = 0; n < 256; n++) if (rtab->data[n] > qopt->buffer) break; max_size = (n << qopt->rate.cell_log) - 1; if (ptab) { int size; for (n = 0; n < 256; n++) if (ptab->data[n] > qopt->mtu) break; size = (n << qopt->peakrate.cell_log) - 1; if (size < max_size) max_size = size; } if (max_size < 0) goto done; if (q->qdisc != &noop_qdisc) { err = fifo_set_limit(q->qdisc, qopt->limit); if (err) goto done; } else if (qopt->limit > 0) { child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); if (IS_ERR(child)) { err = PTR_ERR(child); goto done; } } sch_tree_lock(sch); if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(q->qdisc); q->qdisc = child; } q->limit = qopt->limit; q->mtu = qopt->mtu; q->max_size = max_size; q->buffer = qopt->buffer; q->tokens = q->buffer; q->ptokens = q->mtu; swap(q->R_tab, rtab); swap(q->P_tab, ptab); sch_tree_unlock(sch); err = 0; done: if (rtab) qdisc_put_rtab(rtab); if (ptab) qdisc_put_rtab(ptab); return err; }
static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); struct tc_prio_qopt *qopt; struct nlattr *tb[TCA_PRIO_MAX + 1] = {0}; int err; int i; qopt = nla_data(opt); if (nla_len(opt) < sizeof(*qopt)) return -1; if (nla_len(opt) >= sizeof(*qopt) + sizeof(struct nlattr)) { err = nla_parse_nested(tb, TCA_PRIO_MAX, (struct nlattr *) (qopt + 1), NULL); if (err < 0) return err; } q->bands = qopt->bands; /* If we're multiqueue, make sure the number of incoming bands * matches the number of queues on the device we're associating with. * If the number of bands requested is zero, then set q->bands to * dev->egress_subqueue_count. Also, the root qdisc must be the * only one that is enabled for multiqueue, since it's the only one * that interacts with the underlying device. */ q->mq = nla_get_flag(tb[TCA_PRIO_MQ]); if (q->mq) { if (sch->parent != TC_H_ROOT) return -EINVAL; if (netif_is_multiqueue(sch->dev)) { if (q->bands == 0) q->bands = sch->dev->egress_subqueue_count; else if (q->bands != sch->dev->egress_subqueue_count) return -EINVAL; } else return -EOPNOTSUPP; } if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) return -EINVAL; for (i=0; i<=TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= q->bands) return -EINVAL; } sch_tree_lock(sch); memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); if (child != &noop_qdisc) { qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i=0; i<q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child; child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); child = xchg(&q->queues[i], child); if (child != &noop_qdisc) { qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } sch_tree_unlock(sch); } } } return 0; }