static int dsmark_qdisc_msg_parser(struct rtnl_qdisc *qdisc) { int err; struct nlattr *tb[TCA_DSMARK_MAX + 1]; struct rtnl_dsmark_qdisc *dsmark; err = tca_parse(tb, TCA_DSMARK_MAX, (struct rtnl_tca *) qdisc, dsmark_policy); if (err < 0) return err; dsmark = dsmark_qdisc_alloc(qdisc); if (!dsmark) return nl_errno(ENOMEM); if (tb[TCA_DSMARK_INDICES]) { dsmark->qdm_indices = nla_get_u16(tb[TCA_DSMARK_INDICES]); dsmark->qdm_mask |= SCH_DSMARK_ATTR_INDICES; } if (tb[TCA_DSMARK_DEFAULT_INDEX]) { dsmark->qdm_default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]); dsmark->qdm_mask |= SCH_DSMARK_ATTR_DEFAULT_INDEX; } if (tb[TCA_DSMARK_SET_TC_INDEX]) { dsmark->qdm_set_tc_index = 1; dsmark->qdm_mask |= SCH_DSMARK_ATTR_SET_TC_INDEX; } return 0; }
static int htb_class_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_HTB_MAX + 1]; struct rtnl_htb_class *htb = data; int err; if ((err = tca_parse(tb, TCA_HTB_MAX, tc, htb_policy)) < 0) return err; if (tb[TCA_HTB_PARMS]) { struct tc_htb_opt opts; nla_memcpy(&opts, tb[TCA_HTB_PARMS], sizeof(opts)); htb->ch_prio = opts.prio; rtnl_copy_ratespec(&htb->ch_rate, &opts.rate); rtnl_copy_ratespec(&htb->ch_ceil, &opts.ceil); htb->ch_rbuffer = rtnl_tc_calc_bufsize(opts.buffer, opts.rate.rate); htb->ch_cbuffer = rtnl_tc_calc_bufsize(opts.cbuffer, opts.ceil.rate); htb->ch_quantum = opts.quantum; htb->ch_level = opts.level; rtnl_tc_set_mpu(tc, htb->ch_rate.rs_mpu); rtnl_tc_set_overhead(tc, htb->ch_rate.rs_overhead); htb->ch_mask = (SCH_HTB_HAS_PRIO | SCH_HTB_HAS_RATE | SCH_HTB_HAS_CEIL | SCH_HTB_HAS_RBUFFER | SCH_HTB_HAS_CBUFFER | SCH_HTB_HAS_QUANTUM | SCH_HTB_HAS_LEVEL); } return 0; }
static int skbedit_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_skbedit *u = data; struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; int err; err = tca_parse(tb, TCA_SKBEDIT_MAX, tc, skbedit_policy); if (err < 0) return err; if (!tb[TCA_SKBEDIT_PARMS]) return -NLE_MISSING_ATTR; u->s_flags = 0; if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { u->s_flags |= SKBEDIT_F_PRIORITY; u->s_prio = nla_get_u32(tb[TCA_SKBEDIT_PRIORITY]); } if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { u->s_flags |= SKBEDIT_F_QUEUE_MAPPING; u->s_queue_mapping = nla_get_u16(tb[TCA_SKBEDIT_QUEUE_MAPPING]); } if (tb[TCA_SKBEDIT_MARK] != NULL) { u->s_flags |= SKBEDIT_F_MARK; u->s_mark = nla_get_u32(tb[TCA_SKBEDIT_MARK]); } return 0; }
static int cbq_msg_parser(struct rtnl_tca *tca) { struct nlattr *tb[TCA_CBQ_MAX + 1]; struct rtnl_cbq *cbq; int err; err = tca_parse(tb, TCA_CBQ_MAX, tca, cbq_policy); if (err < 0) return err; cbq = cbq_alloc(tca); if (!cbq) return nl_errno(ENOMEM); nla_memcpy(&cbq->cbq_lss, tb[TCA_CBQ_LSSOPT], sizeof(cbq->cbq_lss)); nla_memcpy(&cbq->cbq_rate, tb[TCA_CBQ_RATE], sizeof(cbq->cbq_rate)); nla_memcpy(&cbq->cbq_wrr, tb[TCA_CBQ_WRROPT], sizeof(cbq->cbq_wrr)); nla_memcpy(&cbq->cbq_fopt, tb[TCA_CBQ_FOPT], sizeof(cbq->cbq_fopt)); nla_memcpy(&cbq->cbq_ovl, tb[TCA_CBQ_OVL_STRATEGY], sizeof(cbq->cbq_ovl)); nla_memcpy(&cbq->cbq_police, tb[TCA_CBQ_POLICE], sizeof(cbq->cbq_police)); return 0; }
static int red_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_RED_MAX+1]; struct rtnl_red *red = data; struct tc_red_qopt *opts; int err; if (!(tc->ce_mask & TCA_ATTR_OPTS)) return 0; err = tca_parse(tb, TCA_RED_MAX, tc, red_policy); if (err < 0) return err; if (!tb[TCA_RED_PARMS]) return -NLE_MISSING_ATTR; opts = nla_data(tb[TCA_RED_PARMS]); red->qr_limit = opts->limit; red->qr_qth_min = opts->qth_min; red->qr_qth_max = opts->qth_max; red->qr_flags = opts->flags; red->qr_wlog = opts->Wlog; red->qr_plog = opts->Plog; red->qr_scell_log = opts->Scell_log; red->qr_mask = (RED_ATTR_LIMIT | RED_ATTR_QTH_MIN | RED_ATTR_QTH_MAX | RED_ATTR_FLAGS | RED_ATTR_WLOG | RED_ATTR_PLOG | RED_ATTR_SCELL_LOG); return 0; }
static int cgroup_msg_parser(struct rtnl_cls *cls) { struct rtnl_cgroup *c = rtnl_cls_data(cls); struct nlattr *tb[TCA_CGROUP_MAX + 1]; int err; err = tca_parse(tb, TCA_CGROUP_MAX, (struct rtnl_tc *) cls, cgroup_policy); if (err < 0) return err; if (tb[TCA_CGROUP_EMATCHES]) { if ((err = rtnl_ematch_parse_attr(tb[TCA_CGROUP_EMATCHES], &c->cg_ematch)) < 0) return err; c->cg_mask |= CGROUP_ATTR_EMATCH; } #if 0 TODO: TCA_CGROUP_ACT, TCA_CGROUP_POLICE, #endif return 0; }
static int mirred_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_mirred *u = data; struct nlattr *tb[TCA_MIRRED_MAX + 1]; int err; err = tca_parse(tb, TCA_MIRRED_MAX, tc, mirred_policy); if (err < 0) return err; if (!tb[TCA_MIRRED_PARMS]) return -NLE_MISSING_ATTR; nla_memcpy(&u->m_parm, tb[TCA_MIRRED_PARMS], sizeof(u->m_parm)); return 0; }
static int gact_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_gact *u = data; struct nlattr *tb[TCA_GACT_MAX + 1]; int err; err = tca_parse(tb, TCA_GACT_MAX, tc, gact_policy); if (err < 0) return err; if (!tb[TCA_GACT_PARMS]) return -NLE_MISSING_ATTR; nla_memcpy(&u->g_parm, tb[TCA_GACT_PARMS], sizeof(u->g_parm)); return 0; }
static int fq_codel_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_fq_codel *fq_codel = data; struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; int err; err = tca_parse(tb, TCA_FQ_CODEL_MAX, tc, fq_codel_policy); if (err < 0) return err; if (tb[TCA_FQ_CODEL_TARGET]) { fq_codel->fq_target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); fq_codel->fq_mask |= SCH_FQ_CODEL_ATTR_TARGET; } if (tb[TCA_FQ_CODEL_INTERVAL]) { fq_codel->fq_interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); fq_codel->fq_mask |= SCH_FQ_CODEL_ATTR_INTERVAL; } if (tb[TCA_FQ_CODEL_LIMIT]) { fq_codel->fq_limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); fq_codel->fq_mask |= SCH_FQ_CODEL_ATTR_LIMIT; } if (tb[TCA_FQ_CODEL_QUANTUM]) { fq_codel->fq_quantum = nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]); fq_codel->fq_mask |= SCH_FQ_CODEL_ATTR_QUANTUM; } if (tb[TCA_FQ_CODEL_FLOWS]) { fq_codel->fq_flows = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); fq_codel->fq_mask |= SCH_FQ_CODEL_ATTR_FLOWS; } if (tb[TCA_FQ_CODEL_ECN]) { fq_codel->fq_ecn = nla_get_u32(tb[TCA_FQ_CODEL_ECN]); fq_codel->fq_mask |= SCH_FQ_CODEL_ATTR_ECN; } return 0; }
static int tbf_msg_parser(struct rtnl_qdisc *q) { int err; struct nlattr *tb[TCA_TBF_MAX + 1]; struct rtnl_tbf *tbf; err = tca_parse(tb, TCA_TBF_MAX, (struct rtnl_tca *) q, tbf_policy); if (err < 0) return err; tbf = tbf_qdisc(q); if (!tbf) return nl_errno(ENOMEM); if (tb[TCA_TBF_PARMS]) { struct tc_tbf_qopt opts; int bufsize; nla_memcpy(&opts, tb[TCA_TBF_PARMS], sizeof(opts)); tbf->qt_limit = opts.limit; tbf->qt_mpu = opts.rate.mpu; rtnl_copy_ratespec(&tbf->qt_rate, &opts.rate); tbf->qt_rate_txtime = opts.buffer; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.buffer), opts.rate.rate); tbf->qt_rate_bucket = bufsize; rtnl_copy_ratespec(&tbf->qt_peakrate, &opts.peakrate); tbf->qt_peakrate_txtime = opts.mtu; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.mtu), opts.peakrate.rate); tbf->qt_peakrate_bucket = bufsize; tbf->qt_mask = (TBF_ATTR_LIMIT | TBF_ATTR_MPU | TBF_ATTR_RATE | TBF_ATTR_PEAKRATE); } return 0; }
static int htb_qdisc_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_HTB_MAX + 1]; struct rtnl_htb_qdisc *htb = data; int err; if ((err = tca_parse(tb, TCA_HTB_MAX, tc, htb_policy)) < 0) return err; if (tb[TCA_HTB_INIT]) { struct tc_htb_glob opts; nla_memcpy(&opts, tb[TCA_HTB_INIT], sizeof(opts)); htb->qh_rate2quantum = opts.rate2quantum; htb->qh_defcls = opts.defcls; htb->qh_direct_pkts = opts.direct_pkts; htb->qh_mask = (SCH_HTB_HAS_RATE2QUANTUM | SCH_HTB_HAS_DEFCLS); } return 0; }
static int tbf_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_TBF_MAX + 1]; struct rtnl_tbf *tbf = data; int err; if ((err = tca_parse(tb, TCA_TBF_MAX, tc, tbf_policy)) < 0) return err; if (tb[TCA_TBF_PARMS]) { struct tc_tbf_qopt opts; int bufsize; nla_memcpy(&opts, tb[TCA_TBF_PARMS], sizeof(opts)); tbf->qt_limit = opts.limit; rtnl_copy_ratespec(&tbf->qt_rate, &opts.rate); tbf->qt_rate_txtime = opts.buffer; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.buffer), opts.rate.rate); tbf->qt_rate_bucket = bufsize; rtnl_copy_ratespec(&tbf->qt_peakrate, &opts.peakrate); tbf->qt_peakrate_txtime = opts.mtu; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.mtu), opts.peakrate.rate); tbf->qt_peakrate_bucket = bufsize; rtnl_tc_set_mpu(tc, tbf->qt_rate.rs_mpu); rtnl_tc_set_overhead(tc, tbf->qt_rate.rs_overhead); tbf->qt_mask = (TBF_ATTR_LIMIT | TBF_ATTR_RATE | TBF_ATTR_PEAKRATE); } return 0; }
static int fw_msg_parser(struct rtnl_cls *cls) { struct rtnl_fw *f = rtnl_cls_data(cls); struct nlattr *tb[TCA_FW_MAX + 1]; int err; err = tca_parse(tb, TCA_FW_MAX, (struct rtnl_tca *) cls, fw_policy); if (err < 0) return err; if (tb[TCA_FW_CLASSID]) { f->cf_classid = nla_get_u32(tb[TCA_FW_CLASSID]); f->cf_mask |= FW_ATTR_CLASSID; } if (tb[TCA_FW_ACT]) { f->cf_act = nl_data_alloc_attr(tb[TCA_FW_ACT]); if (!f->cf_act) return -NLE_NOMEM; f->cf_mask |= FW_ATTR_ACTION; } if (tb[TCA_FW_POLICE]) { f->cf_police = nl_data_alloc_attr(tb[TCA_FW_POLICE]); if (!f->cf_police) return -NLE_NOMEM; f->cf_mask |= FW_ATTR_POLICE; } if (tb[TCA_FW_INDEV]) { nla_strlcpy(f->cf_indev, tb[TCA_FW_INDEV], IFNAMSIZ); f->cf_mask |= FW_ATTR_INDEV; } return 0; }
static int htb_qdisc_msg_parser(struct rtnl_qdisc *qdisc) { int err; struct nlattr *tb[TCA_HTB_MAX + 1]; struct rtnl_htb_qdisc *d; err = tca_parse(tb, TCA_HTB_MAX, (struct rtnl_tca *) qdisc, htb_policy); if (err < 0) return err; d = htb_qdisc(qdisc); if (tb[TCA_HTB_INIT]) { struct tc_htb_glob opts; nla_memcpy(&opts, tb[TCA_HTB_INIT], sizeof(opts)); d->qh_rate2quantum = opts.rate2quantum; d->qh_defcls = opts.defcls; d->qh_mask = (SCH_HTB_HAS_RATE2QUANTUM | SCH_HTB_HAS_DEFCLS); } return 0; }
static int u32_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_u32 *u = data; struct nlattr *tb[TCA_U32_MAX + 1]; int err; err = tca_parse(tb, TCA_U32_MAX, tc, u32_policy); if (err < 0) return err; if (tb[TCA_U32_DIVISOR]) { u->cu_divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); u->cu_mask |= U32_ATTR_DIVISOR; } if (tb[TCA_U32_SEL]) { u->cu_selector = nl_data_alloc_attr(tb[TCA_U32_SEL]); if (!u->cu_selector) goto errout_nomem; u->cu_mask |= U32_ATTR_SELECTOR; } if (tb[TCA_U32_MARK]) { u->cu_mark = nl_data_alloc_attr(tb[TCA_U32_MARK]); if (!u->cu_mark) goto errout_nomem; u->cu_mask |= U32_ATTR_MARK; } if (tb[TCA_U32_HASH]) { u->cu_hash = nla_get_u32(tb[TCA_U32_HASH]); u->cu_mask |= U32_ATTR_HASH; } if (tb[TCA_U32_CLASSID]) { u->cu_classid = nla_get_u32(tb[TCA_U32_CLASSID]); u->cu_mask |= U32_ATTR_CLASSID; } if (tb[TCA_U32_LINK]) { u->cu_link = nla_get_u32(tb[TCA_U32_LINK]); u->cu_mask |= U32_ATTR_LINK; } if (tb[TCA_U32_ACT]) { u->cu_mask |= U32_ATTR_ACTION; err = rtnl_act_parse(&u->cu_act, tb[TCA_U32_ACT]); if (err) return err; } if (tb[TCA_U32_POLICE]) { u->cu_police = nl_data_alloc_attr(tb[TCA_U32_POLICE]); if (!u->cu_police) goto errout_nomem; u->cu_mask |= U32_ATTR_POLICE; } if (tb[TCA_U32_PCNT]) { struct tc_u32_sel *sel; size_t pcnt_size; if (!tb[TCA_U32_SEL]) { err = -NLE_MISSING_ATTR; goto errout; } sel = u->cu_selector->d_data; pcnt_size = sizeof(struct tc_u32_pcnt) + (sel->nkeys * sizeof(uint64_t)); if (nla_len(tb[TCA_U32_PCNT]) < pcnt_size) { err = -NLE_INVAL; goto errout; } u->cu_pcnt = nl_data_alloc_attr(tb[TCA_U32_PCNT]); if (!u->cu_pcnt) goto errout_nomem; u->cu_mask |= U32_ATTR_PCNT; } if (tb[TCA_U32_INDEV]) { nla_strlcpy(u->cu_indev, tb[TCA_U32_INDEV], IFNAMSIZ); u->cu_mask |= U32_ATTR_INDEV; } return 0; errout_nomem: err = -NLE_NOMEM; errout: return err; }