static inline int gred_change_vq(struct Qdisc *sch, int dp, struct tc_gred_qopt *ctl, int prio, u8 *stab, u32 max_P, struct gred_sched_data **prealloc) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; if (!q) { table->tab[dp] = q = *prealloc; *prealloc = NULL; if (!q) return -ENOMEM; } q->DP = dp; q->prio = prio; q->limit = ctl->limit; if (q->backlog == 0) red_end_of_idle_period(&q->vars); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab, max_P); red_set_vars(&q->vars); return 0; }
static int red_change(struct Qdisc *sch, struct rtattr *opt) { struct red_sched_data *q = qdisc_priv(sch); struct rtattr *tb[TCA_RED_MAX]; struct tc_red_qopt *ctl; if (opt == NULL || rtattr_parse_nested(tb, TCA_RED_MAX, opt)) return -EINVAL; if (tb[TCA_RED_PARMS-1] == NULL || RTA_PAYLOAD(tb[TCA_RED_PARMS-1]) < sizeof(*ctl) || tb[TCA_RED_STAB-1] == NULL || RTA_PAYLOAD(tb[TCA_RED_STAB-1]) < RED_STAB_SIZE) return -EINVAL; ctl = RTA_DATA(tb[TCA_RED_PARMS-1]); sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, RTA_DATA(tb[TCA_RED_STAB-1])); if (skb_queue_empty(&sch->q)) red_end_of_idle_period(&q->parms); sch_tree_unlock(sch); return 0; }
static inline int gred_change_vq(struct Qdisc *sch, int dp, struct tc_gred_qopt *ctl, int prio, u8 *stab) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q; if (table->tab[dp] == NULL) { table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); if (table->tab[dp] == NULL) return -ENOMEM; } q = table->tab[dp]; q->DP = dp; q->prio = prio; q->limit = ctl->limit; if (q->backlog == 0) red_end_of_idle_period(&q->parms); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab); return 0; }
static inline int gred_change_vq(struct Qdisc *sch, int dp, struct tc_gred_qopt *ctl, int prio, u8 *stab, u32 max_P, struct gred_sched_data **prealloc, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) { NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); return -EINVAL; } if (!q) { table->tab[dp] = q = *prealloc; *prealloc = NULL; if (!q) return -ENOMEM; q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; } q->DP = dp; q->prio = prio; if (ctl->limit > sch->limit) q->limit = sch->limit; else q->limit = ctl->limit; if (q->backlog == 0) red_end_of_idle_period(&q->vars); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab, max_P); red_set_vars(&q->vars); return 0; }
static int sfq_change(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); struct tc_sfq_qopt_v1 *ctl_v1 = NULL; unsigned int qlen; struct red_parms *p = NULL; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1))) ctl_v1 = nla_data(opt); if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; } sch_tree_lock(sch); if (ctl->quantum) { q->quantum = ctl->quantum; q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); } q->perturb_period = ctl->perturb_period * HZ; if (ctl->flows) q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { q->divisor = ctl->divisor; q->maxflows = min_t(u32, q->maxflows, q->divisor); } if (ctl_v1) { if (ctl_v1->depth) q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { swap(q->red_parms, p); red_set_parms(q->red_parms, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } q->flags = ctl_v1->flags; q->headdrop = ctl_v1->headdrop; } if (ctl->limit) { q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); q->maxflows = min_t(u32, q->maxflows, q->limit); } qlen = sch->q.qlen; while (sch->q.qlen > q->limit) sfq_drop(sch); qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); del_timer(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q->perturbation = prandom_u32(); } sch_tree_unlock(sch); kfree(p); return 0; }
static int choke_change(struct Qdisc *sch, struct nlattr *opt) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_CHOKE_MAX + 1]; const struct tc_red_qopt *ctl; int err; struct sk_buff **old = NULL; unsigned int mask; u32 max_P; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy); if (err < 0) return err; if (tb[TCA_CHOKE_PARMS] == NULL || tb[TCA_CHOKE_STAB] == NULL) return -EINVAL; max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; ctl = nla_data(tb[TCA_CHOKE_PARMS]); if (ctl->limit > CHOKE_MAX_QUEUE) return -EINVAL; mask = roundup_pow_of_two(ctl->limit + 1) - 1; if (mask != q->tab_mask) { struct sk_buff **ntab; ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL); if (!ntab) ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *)); if (!ntab) return -ENOMEM; sch_tree_lock(sch); old = q->tab; if (old) { unsigned int oqlen = sch->q.qlen, tail = 0; while (q->head != q->tail) { struct sk_buff *skb = q->tab[q->head]; q->head = (q->head + 1) & q->tab_mask; if (!skb) continue; if (tail < mask) { ntab[tail++] = skb; continue; } sch->qstats.backlog -= qdisc_pkt_len(skb); --sch->q.qlen; qdisc_drop(skb, sch); } qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen); q->head = 0; q->tail = tail; } q->tab_mask = mask; q->tab = ntab; } else sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, nla_data(tb[TCA_CHOKE_STAB]), max_P); red_set_vars(&q->vars); if (q->head == q->tail) red_end_of_idle_period(&q->vars); sch_tree_unlock(sch); choke_free(old); return 0; }