struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct tc_action *a; struct tc_action_ops *a_o; char act_name[IFNAMSIZ]; struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; int err; if (name == NULL) { err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; if (kind == NULL) goto err_out; if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } else { err = -EINVAL; if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES rtnl_unlock(); request_module("act_%s", act_name); rtnl_lock(); a_o = tc_lookup_action_n(act_name); if (a_o != NULL) { err = -EAGAIN; goto err_mod; } #endif err = -ENOENT; goto err_out; } err = -ENOMEM; a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) goto err_mod; if (name == NULL) err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); else err = a_o->init(nla, est, a, ovr, bind); if (err < 0) goto err_free; if (err != ACT_P_CREATED) module_put(a_o->owner); a->ops = a_o; return a; err_free: kfree(a); err_mod: module_put(a_o->owner); err_out: return ERR_PTR(err); }
static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_PEDIT_MAX + 1]; struct tc_pedit *parm; int ret = 0, err; struct tcf_pedit *p; struct tcf_common *pc; struct tc_pedit_key *keys = NULL; int ksize; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy); if (err < 0) return err; if (tb[TCA_PEDIT_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_PEDIT_PARMS]); ksize = parm->nkeys * sizeof(struct tc_pedit_key); if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize) return -EINVAL; pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info); if (!pc) { if (!parm->nkeys) return -EINVAL; pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, &pedit_idx_gen, &pedit_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); p = to_pedit(pc); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { kfree(pc); return -ENOMEM; } ret = ACT_P_CREATED; } else { p = to_pedit(pc); if (!ovr) { tcf_hash_release(pc, bind, &pedit_hash_info); return -EEXIST; } if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) return -ENOMEM; } } spin_lock_bh(&p->tcf_lock); p->tcfp_flags = parm->flags; p->tcf_action = parm->action; if (keys) { kfree(p->tcfp_keys); p->tcfp_keys = keys; p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &pedit_hash_info); return ret; }
static int pie_change(struct Qdisc *sch, struct nlattr *opt) { struct pie_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_PIE_MAX + 1]; unsigned int qlen; int err; if (!opt) return -EINVAL; err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy); if (err < 0) return err; sch_tree_lock(sch); /* convert from microseconds to pschedtime */ if (tb[TCA_PIE_TARGET]) { /* target is in us */ u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); /* convert to pschedtime */ q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); } /* tupdate is in jiffies */ if (tb[TCA_PIE_TUPDATE]) q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); if (tb[TCA_PIE_LIMIT]) { u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); q->params.limit = limit; sch->limit = limit; } if (tb[TCA_PIE_ALPHA]) q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); if (tb[TCA_PIE_BETA]) q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]); if (tb[TCA_PIE_ECN]) q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]); if (tb[TCA_PIE_BYTEMODE]) q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); /* Drop excess packets if new limit is lower */ qlen = sch->q.qlen; while (sch->q.qlen > sch->limit) { struct sk_buff *skb = __skb_dequeue(&sch->q); qdisc_qstats_backlog_dec(sch, skb); qdisc_drop(skb, sch); } qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); sch_tree_unlock(sch); return 0; }
static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_IPT_MAX + 1]; struct tcf_ipt *ipt; struct tcf_common *pc; struct ipt_entry_target *td, *t; char *tname; int ret = 0, err; u32 hook = 0; u32 index = 0; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_IPT_MAX, nla, ipt_policy); if (err < 0) return err; if (tb[TCA_IPT_HOOK] == NULL) return -EINVAL; if (tb[TCA_IPT_TARG] == NULL) return -EINVAL; td = (struct ipt_entry_target *)nla_data(tb[TCA_IPT_TARG]); if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) return -EINVAL; if (tb[TCA_IPT_INDEX] != NULL) index = nla_get_u32(tb[TCA_IPT_INDEX]); pc = tcf_hash_check(index, a, bind, &ipt_hash_info); if (!pc) { pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, &ipt_idx_gen, &ipt_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); ret = ACT_P_CREATED; } else { if (!ovr) { tcf_ipt_release(to_ipt(pc), bind); return -EEXIST; } } ipt = to_ipt(pc); hook = nla_get_u32(tb[TCA_IPT_HOOK]); err = -ENOMEM; tname = kmalloc(IFNAMSIZ, GFP_KERNEL); if (unlikely(!tname)) goto err1; if (tb[TCA_IPT_TABLE] == NULL || nla_strlcpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ) strcpy(tname, "mangle"); t = kmemdup(td, td->u.target_size, GFP_KERNEL); if (unlikely(!t)) goto err2; if ((err = ipt_init_target(t, tname, hook)) < 0) goto err3; spin_lock_bh(&ipt->tcf_lock); if (ret != ACT_P_CREATED) { ipt_destroy_target(ipt->tcfi_t); kfree(ipt->tcfi_tname); kfree(ipt->tcfi_t); } ipt->tcfi_tname = tname; ipt->tcfi_t = t; ipt->tcfi_hook = hook; spin_unlock_bh(&ipt->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &ipt_hash_info); return ret; err3: kfree(t); err2: kfree(tname); err1: kfree(pc); return err; }
static int fq_change(struct Qdisc *sch, struct nlattr *opt) { struct fq_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_FQ_MAX + 1]; int err, drop_count = 0; unsigned drop_len = 0; u32 fq_log; if (!opt) return -EINVAL; err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy); if (err < 0) return err; sch_tree_lock(sch); fq_log = q->fq_trees_log; if (tb[TCA_FQ_BUCKETS_LOG]) { u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); if (nval >= 1 && nval <= ilog2(256*1024)) fq_log = nval; else err = -EINVAL; } if (tb[TCA_FQ_PLIMIT]) sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); if (tb[TCA_FQ_FLOW_PLIMIT]) q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); if (tb[TCA_FQ_QUANTUM]) { u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); if (quantum > 0) q->quantum = quantum; else err = -EINVAL; } if (tb[TCA_FQ_INITIAL_QUANTUM]) q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); if (tb[TCA_FQ_FLOW_MAX_RATE]) q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) q->low_rate_threshold = nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]); if (tb[TCA_FQ_RATE_ENABLE]) { u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); if (enable <= 1) q->rate_enable = enable; else err = -EINVAL; } if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; q->flow_refill_delay = usecs_to_jiffies(usecs_delay); } if (tb[TCA_FQ_ORPHAN_MASK]) q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); if (!err) { sch_tree_unlock(sch); err = fq_resize(sch, fq_log); sch_tree_lock(sch); } while (sch->q.qlen > sch->limit) { struct sk_buff *skb = fq_dequeue(sch); if (!skb) break; drop_len += qdisc_pkt_len(skb); rtnl_kfree_skbs(skb, skb); drop_count++; } qdisc_tree_reduce_backlog(sch, drop_count, drop_len); sch_tree_unlock(sch); return err; }
static int tcf_simp_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, simp_net_id); struct nlattr *tb[TCA_DEF_MAX + 1]; struct tc_defact *parm; struct tcf_defact *d; bool exists = false; int ret = 0, err; char *defdata; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_DEF_MAX, nla, simple_policy, NULL); if (err < 0) return err; if (tb[TCA_DEF_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_DEF_PARMS]); exists = tcf_hash_check(tn, parm->index, a, bind); if (exists && bind) return 0; if (tb[TCA_DEF_DATA] == NULL) { if (exists) tcf_hash_release(*a, bind); return -EINVAL; } defdata = nla_data(tb[TCA_DEF_DATA]); if (!exists) { ret = tcf_hash_create(tn, parm->index, est, a, &act_simp_ops, bind, false); if (ret) return ret; d = to_defact(*a); ret = alloc_defdata(d, defdata); if (ret < 0) { tcf_hash_cleanup(*a, est); return ret; } d->tcf_action = parm->action; ret = ACT_P_CREATED; } else { d = to_defact(*a); tcf_hash_release(*a, bind); if (!ovr) return -EEXIST; reset_policy(d, defdata, parm); } if (ret == ACT_P_CREATED) tcf_hash_insert(tn, *a); return ret; }
static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, pedit_net_id); struct nlattr *tb[TCA_PEDIT_MAX + 1]; struct nlattr *pattr; struct tc_pedit *parm; int ret = 0, err; struct tcf_pedit *p; struct tc_pedit_key *keys = NULL; struct tcf_pedit_key_ex *keys_ex; int ksize; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_PEDIT_MAX, nla, pedit_policy); if (err < 0) return err; pattr = tb[TCA_PEDIT_PARMS]; if (!pattr) pattr = tb[TCA_PEDIT_PARMS_EX]; if (!pattr) return -EINVAL; parm = nla_data(pattr); ksize = parm->nkeys * sizeof(struct tc_pedit_key); if (nla_len(pattr) < sizeof(*parm) + ksize) return -EINVAL; keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); if (IS_ERR(keys_ex)) return PTR_ERR(keys_ex); if (!tcf_hash_check(tn, parm->index, a, bind)) { if (!parm->nkeys) return -EINVAL; ret = tcf_hash_create(tn, parm->index, est, a, &act_pedit_ops, bind, false); if (ret) return ret; p = to_pedit(*a); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { tcf_hash_cleanup(*a, est); kfree(keys_ex); return -ENOMEM; } ret = ACT_P_CREATED; } else { if (bind) return 0; tcf_hash_release(*a, bind); if (!ovr) return -EEXIST; p = to_pedit(*a); if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); if (!keys) { kfree(keys_ex); return -ENOMEM; } } } spin_lock_bh(&p->tcf_lock); p->tcfp_flags = parm->flags; p->tcf_action = parm->action; if (keys) { kfree(p->tcfp_keys); p->tcfp_keys = keys; p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); kfree(p->tcfp_keys_ex); p->tcfp_keys_ex = keys_ex; spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(tn, *a); return ret; }
static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) { struct fq_codel_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; int err; if (!opt) return -EINVAL; err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy); if (err < 0) return err; if (tb[TCA_FQ_CODEL_FLOWS]) { if (q->flows) return -EINVAL; q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); if (!q->flows_cnt || q->flows_cnt > 65536) return -EINVAL; } sch_tree_lock(sch); if (tb[TCA_FQ_CODEL_TARGET]) { u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; } if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; } if (tb[TCA_FQ_CODEL_INTERVAL]) { u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; } if (tb[TCA_FQ_CODEL_LIMIT]) sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); if (tb[TCA_FQ_CODEL_ECN]) q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); if (tb[TCA_FQ_CODEL_QUANTUM]) q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); while (sch->q.qlen > sch->limit || q->memory_usage > q->memory_limit) { struct sk_buff *skb = fq_codel_dequeue(sch); q->cstats.drop_len += qdisc_pkt_len(skb); kfree_skb(skb); q->cstats.drop_count++; } qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); q->cstats.drop_count = 0; q->cstats.drop_len = 0; sch_tree_unlock(sch); return 0; }
static int gred_change(struct Qdisc *sch, struct nlattr *opt) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_qopt *ctl; struct nlattr *tb[TCA_GRED_MAX + 1]; int err, prio = GRED_DEF_PRIO; u8 *stab; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy); if (err < 0) return err; if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) return gred_change_table_def(sch, opt); if (tb[TCA_GRED_PARMS] == NULL || tb[TCA_GRED_STAB] == NULL) return -EINVAL; err = -EINVAL; ctl = nla_data(tb[TCA_GRED_PARMS]); stab = nla_data(tb[TCA_GRED_STAB]); if (ctl->DP >= table->DPs) goto errout; if (gred_rio_mode(table)) { if (ctl->prio == 0) { int def_prio = GRED_DEF_PRIO; if (table->tab[table->def]) def_prio = table->tab[table->def]->prio; printk(KERN_DEBUG "GRED: DP %u does not have a prio " "setting default to %d\n", ctl->DP, def_prio); prio = def_prio; } else prio = ctl->prio; } sch_tree_lock(sch); err = gred_change_vq(sch, ctl->DP, ctl, prio, stab); if (err < 0) goto errout_locked; if (gred_rio_mode(table)) { gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } err = 0; errout_locked: sch_tree_unlock(sch); errout: return err; }
static int netlink_msg_handler(struct nl_msg *msg, void *arg) { struct nlmsghdr *hdr; struct tcmsg *tcm; struct nlattr *attrs[TCA_MAX+1]; struct nlattr *stat_attrs[TCA_STATS_MAX+1]; struct qdisc_handler *h; char qdisc[IFNAMSIZ] = {0}; int qdisc_l = 0; char ifname[IFNAMSIZ] = {0}; int ifname_l = 0; struct timeval current_time = {0}; double time; char *ret = NULL; struct gnet_stats_basic *sb; struct gnet_stats_queue *q; struct options *opt = arg; struct recordset rset = {0}; hdr = nlmsg_hdr(msg); tcm = nlmsg_data(hdr); if(!has_iface(opt, tcm->tcm_ifindex)) return NL_SKIP; if((ret = rtnl_link_i2name(opt->cache, tcm->tcm_ifindex, ifname, IFNAMSIZ)) == NULL) return NL_SKIP; // No length checking in netlink library. if((ifname_l = 1 + strnlen(ifname, IFNAMSIZ)) >= IFNAMSIZ) { ifname[IFNAMSIZ-1] = '\0'; ifname_l = IFNAMSIZ; } gettimeofday(¤t_time, NULL); time = (double)current_time.tv_usec / 1000000 + (double) current_time.tv_sec; add_record_double(&rset, "time", sizeof("time"), time); add_record_str(&rset, "iface", sizeof("iface"), ifname, ifname_l); nlmsg_parse(hdr, sizeof(*tcm), attrs, TCA_MAX, tca_policy); if(attrs[TCA_KIND]) { qdisc_l = nla_len(attrs[TCA_KIND]); memcpy(qdisc, nla_get_string(attrs[TCA_KIND]), qdisc_l); add_record_str(&rset, "qdisc", sizeof("qdisc"), qdisc, qdisc_l); } add_record_hex(&rset, "handle", sizeof("handle"), tcm->tcm_handle >> 16); if(attrs[TCA_STATS2]) { nla_parse_nested(stat_attrs, TCA_STATS_MAX, attrs[TCA_STATS2], tca_stats_policy); if(stat_attrs[TCA_STATS_BASIC]) { sb = nla_data(stat_attrs[TCA_STATS_BASIC]); add_record_uint(&rset, "bytes", sizeof("bytes"), sb->bytes); add_record_uint(&rset, "packets", sizeof("packets"), sb->packets); } if(stat_attrs[TCA_STATS_QUEUE]) { q = nla_data(stat_attrs[TCA_STATS_QUEUE]); add_record_uint(&rset, "drops", sizeof("drops"), q->drops); add_record_uint(&rset, "qlen", sizeof("qlen"), q->qlen); add_record_uint(&rset, "backlog", sizeof("backlog"), q->backlog); add_record_uint(&rset, "overlimits", sizeof("overlimits"), q->overlimits); add_record_uint(&rset, "requeues", sizeof("requeues"), q->requeues); } if(stat_attrs[TCA_STATS_APP]) { h = find_qdisc_handler(qdisc); if(h) h->parse_stats(stat_attrs[TCA_STATS_APP], &rset); } } opt->formatter->format(opt->formatter, &rset); clear_records(&rset); if(!opt->run_length || current_time.tv_sec < opt->start_time.tv_sec + opt->run_length) return NL_OK; else return NL_STOP; }
int err, rem_limit, rem_mode; int combination_has_p2p = 0, combination_has_mgd = 0; static struct nla_policy iface_combination_policy[NUM_NL80211_IFACE_COMB] = { [NL80211_IFACE_COMB_LIMITS] = { .type = NLA_NESTED }, [NL80211_IFACE_COMB_MAXNUM] = { .type = NLA_U32 }, [NL80211_IFACE_COMB_STA_AP_BI_MATCH] = { .type = NLA_FLAG }, [NL80211_IFACE_COMB_NUM_CHANNELS] = { .type = NLA_U32 }, [NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS] = { .type = NLA_U32 }, }, iface_limit_policy[NUM_NL80211_IFACE_LIMIT] = { [NL80211_IFACE_LIMIT_TYPES] = { .type = NLA_NESTED }, [NL80211_IFACE_LIMIT_MAX] = { .type = NLA_U32 }, }; err = nla_parse_nested(tb_comb, MAX_NL80211_IFACE_COMB, nl_combi, iface_combination_policy); if (err || !tb_comb[NL80211_IFACE_COMB_LIMITS] || !tb_comb[NL80211_IFACE_COMB_MAXNUM] || !tb_comb[NL80211_IFACE_COMB_NUM_CHANNELS]) return 0; /* broken combination */ if (tb_comb[NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS]) info->capa->flags |= WPA_DRIVER_FLAGS_RADAR; nla_for_each_nested(nl_limit, tb_comb[NL80211_IFACE_COMB_LIMITS], rem_limit) { err = nla_parse_nested(tb_limit, MAX_NL80211_IFACE_LIMIT, nl_limit, iface_limit_policy); if (err || !tb_limit[NL80211_IFACE_LIMIT_TYPES]) return 0; /* broken combination */
static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; struct tc_skbedit *parm; struct tcf_skbedit *d; struct tcf_common *pc; u32 flags = 0, *priority = NULL, *mark = NULL; u16 *queue_mapping = NULL; int ret = 0, err; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy); if (err < 0) return err; if (tb[TCA_SKBEDIT_PARMS] == NULL) return -EINVAL; if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { flags |= SKBEDIT_F_PRIORITY; priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]); } if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { flags |= SKBEDIT_F_QUEUE_MAPPING; queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]); } if (tb[TCA_SKBEDIT_MARK] != NULL) { flags |= SKBEDIT_F_MARK; mark = nla_data(tb[TCA_SKBEDIT_MARK]); } if (!flags) return -EINVAL; parm = nla_data(tb[TCA_SKBEDIT_PARMS]); pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info); if (!pc) { pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, &skbedit_idx_gen, &skbedit_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); d = to_skbedit(pc); ret = ACT_P_CREATED; } else { d = to_skbedit(pc); if (!ovr) { tcf_hash_release(pc, bind, &skbedit_hash_info); return -EEXIST; } } spin_lock_bh(&d->tcf_lock); d->flags = flags; if (flags & SKBEDIT_F_PRIORITY) d->priority = *priority; if (flags & SKBEDIT_F_QUEUE_MAPPING) d->queue_mapping = *queue_mapping; if (flags & SKBEDIT_F_MARK) d->mark = *mark; d->tcf_action = parm->action; spin_unlock_bh(&d->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &skbedit_hash_info); return ret; }
static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { char port_str[27]; struct tipc_name_table_query *ntq; struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1]; struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; u32 node, depth, type, lowbound, upbound; static const char * const scope_str[] = {"", " zone", " cluster", " node"}; nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, attrs[TIPC_NLA_NAME_TABLE], NULL); nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], NULL); ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); depth = ntohl(ntq->depth); type = ntohl(ntq->type); lowbound = ntohl(ntq->lowbound); upbound = ntohl(ntq->upbound); if (!(depth & TIPC_NTQ_ALLTYPES) && (type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]))) return 0; if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]))) return 0; if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]))) return 0; tipc_tlv_sprintf(msg->rep, "%-10u ", nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])); if (depth == 1) goto out; tipc_tlv_sprintf(msg->rep, "%-10u %-10u ", nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]), nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])); if (depth == 2) goto out; node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]); sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node), tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF])); tipc_tlv_sprintf(msg->rep, "%-26s ", port_str); if (depth == 3) goto out; tipc_tlv_sprintf(msg->rep, "%-10u %s", nla_get_u32(publ[TIPC_NLA_PUBL_REF]), scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]); out: tipc_tlv_sprintf(msg->rep, "\n"); return 0; }
static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { char *name; struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], NULL); nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], NULL); name = (char *)TLV_DATA(msg->req); if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) return 0; tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n", nla_data(link[TIPC_NLA_LINK_NAME])); if (link[TIPC_NLA_LINK_BROADCAST]) { __fill_bc_link_stat(msg, prop, stats); return 0; } if (link[TIPC_NLA_LINK_ACTIVE]) tipc_tlv_sprintf(msg->rep, " ACTIVE"); else if (link[TIPC_NLA_LINK_UP]) tipc_tlv_sprintf(msg->rep, " STANDBY"); else tipc_tlv_sprintf(msg->rep, " DEFUNCT"); tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u", nla_get_u32(link[TIPC_NLA_LINK_MTU]), nla_get_u32(prop[TIPC_NLA_PROP_PRIO])); tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n", nla_get_u32(prop[TIPC_NLA_PROP_TOL]), nla_get_u32(prop[TIPC_NLA_PROP_WIN])); tipc_tlv_sprintf(msg->rep, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", nla_get_u32(link[TIPC_NLA_LINK_RX]) - nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]), nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]), nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]), nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED])); tipc_tlv_sprintf(msg->rep, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", nla_get_u32(link[TIPC_NLA_LINK_TX]) - nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]), nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]), nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]), nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED])); tipc_tlv_sprintf(msg->rep, " TX profile sample:%u packets average:%u octets\n", nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) / nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])); tipc_tlv_sprintf(msg->rep, " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ", perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]))); tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n", perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]))); tipc_tlv_sprintf(msg->rep, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n", nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]), nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]), nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES])); tipc_tlv_sprintf(msg->rep, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n", nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]), nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]), nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED])); tipc_tlv_sprintf(msg->rep, " Congestion link:%u Send queue max:%u avg:%u", nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]), nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]), nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE])); return 0; }
static int tcf_bpf_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *act, int replace, int bind) { struct tc_action_net *tn = net_generic(net, bpf_net_id); struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; struct tcf_bpf_cfg cfg, old; struct tc_act_bpf *parm; struct tcf_bpf *prog; bool is_bpf, is_ebpf; int ret, res = 0; if (!nla) return -EINVAL; ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy); if (ret < 0) return ret; if (!tb[TCA_ACT_BPF_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_ACT_BPF_PARMS]); if (!tcf_hash_check(tn, parm->index, act, bind)) { ret = tcf_hash_create(tn, parm->index, est, act, sizeof(*prog), bind, true); if (ret < 0) return ret; res = ACT_P_CREATED; } else { /* Don't override defaults. */ if (bind) return 0; tcf_hash_release(act, bind); if (!replace) return -EEXIST; } is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; is_ebpf = tb[TCA_ACT_BPF_FD]; if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { ret = -EINVAL; goto out; } memset(&cfg, 0, sizeof(cfg)); ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : tcf_bpf_init_from_efd(tb, &cfg); if (ret < 0) goto out; prog = to_bpf(act); ASSERT_RTNL(); if (res != ACT_P_CREATED) tcf_bpf_prog_fill_cfg(prog, &old); prog->bpf_ops = cfg.bpf_ops; prog->bpf_name = cfg.bpf_name; if (cfg.bpf_num_ops) prog->bpf_num_ops = cfg.bpf_num_ops; if (cfg.bpf_fd) prog->bpf_fd = cfg.bpf_fd; prog->tcf_action = parm->action; rcu_assign_pointer(prog->filter, cfg.filter); if (res == ACT_P_CREATED) { tcf_hash_insert(tn, act); } else { /* make sure the program being replaced is no longer executing */ synchronize_rcu(); tcf_bpf_cfg_cleanup(&old); } return res; out: if (res == ACT_P_CREATED) tcf_hash_cleanup(act, est); return ret; }
static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_MIRRED_MAX + 1]; struct tc_mirred *parm; struct tcf_mirred *m; struct tcf_common *pc; struct net_device *dev = NULL; int ret = 0, err; int ok_push = 0; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy); if (err < 0) return err; if (tb[TCA_MIRRED_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_MIRRED_PARMS]); if (parm->ifindex) { dev = __dev_get_by_index(&init_net, parm->ifindex); if (dev == NULL) return -ENODEV; switch (dev->type) { case ARPHRD_TUNNEL: case ARPHRD_TUNNEL6: case ARPHRD_SIT: case ARPHRD_IPGRE: case ARPHRD_VOID: case ARPHRD_NONE: ok_push = 0; break; default: ok_push = 1; break; } } pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); if (!pc) { if (!parm->ifindex) return -EINVAL; pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, &mirred_idx_gen, &mirred_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); ret = ACT_P_CREATED; } else { if (!ovr) { tcf_mirred_release(to_mirred(pc), bind); return -EEXIST; } } m = to_mirred(pc); spin_lock_bh(&m->tcf_lock); m->tcf_action = parm->action; m->tcfm_eaction = parm->eaction; if (parm->ifindex) { m->tcfm_ifindex = parm->ifindex; if (ret != ACT_P_CREATED) dev_put(m->tcfm_dev); m->tcfm_dev = dev; dev_hold(dev); m->tcfm_ok_push = ok_push; } spin_unlock_bh(&m->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &mirred_hash_info); return ret; }
static int inet6_parse_protinfo(struct rtnl_link *link, struct nlattr *attr, void *data) { struct inet6_data *i6 = data; struct nlattr *tb[IFLA_INET6_MAX+1]; int err; err = nla_parse_nested(tb, IFLA_INET6_MAX, attr, inet6_policy); if (err < 0) return err; if (tb[IFLA_INET6_CONF] && nla_len(tb[IFLA_INET6_CONF]) % 4) return -EINVAL; if (tb[IFLA_INET6_STATS] && nla_len(tb[IFLA_INET6_STATS]) % 8) return -EINVAL; if (tb[IFLA_INET6_ICMP6STATS] && nla_len(tb[IFLA_INET6_ICMP6STATS]) % 8) return -EINVAL; if (tb[IFLA_INET6_FLAGS]) i6->i6_flags = nla_get_u32(tb[IFLA_INET6_FLAGS]); if (tb[IFLA_INET6_CACHEINFO]) nla_memcpy(&i6->i6_cacheinfo, tb[IFLA_INET6_CACHEINFO], sizeof(i6->i6_cacheinfo)); if (tb[IFLA_INET6_CONF]) nla_memcpy(&i6->i6_conf, tb[IFLA_INET6_CONF], sizeof(i6->i6_conf)); if (tb[IFLA_INET6_TOKEN]) nla_memcpy(&i6->i6_token, tb[IFLA_INET6_TOKEN], sizeof(struct in6_addr)); if (tb[IFLA_INET6_ADDR_GEN_MODE]) i6->i6_addr_gen_mode = nla_get_u8 (tb[IFLA_INET6_ADDR_GEN_MODE]); /* * Due to 32bit data alignment, these addresses must be copied to an * aligned location prior to access. */ if (tb[IFLA_INET6_STATS]) { unsigned char *cnt = nla_data(tb[IFLA_INET6_STATS]); uint64_t stat; int i; int len = nla_len(tb[IFLA_INET6_STATS]) / 8; const uint8_t *map_stat_id = map_stat_id_from_IPSTATS_MIB_v2; if (len < 32 || (tb[IFLA_INET6_ICMP6STATS] && nla_len(tb[IFLA_INET6_ICMP6STATS]) < 6)) { /* kernel commit 14a196807482e6fc74f15fc03176d5c08880588f reordered the values. * The later commit 6a5dc9e598fe90160fee7de098fa319665f5253e added values * IPSTATS_MIB_CSUMERRORS/ICMP6_MIB_CSUMERRORS. If the netlink is shorter * then this, assume that the kernel uses the previous meaning of the * enumeration. */ map_stat_id = map_stat_id_from_IPSTATS_MIB_v1; } len = min_t(int, __IPSTATS_MIB_MAX, len); for (i = 1; i < len; i++) { memcpy(&stat, &cnt[i * sizeof(stat)], sizeof(stat)); rtnl_link_set_stat(link, map_stat_id[i], stat); } } if (tb[IFLA_INET6_ICMP6STATS]) { unsigned char *cnt = nla_data(tb[IFLA_INET6_ICMP6STATS]); uint64_t stat; int i; int len = min_t(int, __ICMP6_MIB_MAX, nla_len(tb[IFLA_INET6_ICMP6STATS]) / 8); for (i = 1; i < len; i++) { memcpy(&stat, &cnt[i * sizeof(stat)], sizeof(stat)); rtnl_link_set_stat(link, RTNL_LINK_ICMP6_INMSGS + i - 1, stat); } } return 0; }
static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, skbmod_net_id); struct nlattr *tb[TCA_SKBMOD_MAX + 1]; struct tcf_skbmod_params *p, *p_old; struct tc_skbmod *parm; struct tcf_skbmod *d; bool exists = false; u8 *daddr = NULL; u8 *saddr = NULL; u16 eth_type = 0; u32 lflags = 0; int ret = 0, err; if (!nla) return -EINVAL; err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy); if (err < 0) return err; if (!tb[TCA_SKBMOD_PARMS]) return -EINVAL; if (tb[TCA_SKBMOD_DMAC]) { daddr = nla_data(tb[TCA_SKBMOD_DMAC]); lflags |= SKBMOD_F_DMAC; } if (tb[TCA_SKBMOD_SMAC]) { saddr = nla_data(tb[TCA_SKBMOD_SMAC]); lflags |= SKBMOD_F_SMAC; } if (tb[TCA_SKBMOD_ETYPE]) { eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]); lflags |= SKBMOD_F_ETYPE; } parm = nla_data(tb[TCA_SKBMOD_PARMS]); if (parm->flags & SKBMOD_F_SWAPMAC) lflags = SKBMOD_F_SWAPMAC; exists = tcf_hash_check(tn, parm->index, a, bind); if (exists && bind) return 0; if (!lflags) return -EINVAL; if (!exists) { ret = tcf_hash_create(tn, parm->index, est, a, &act_skbmod_ops, bind, true); if (ret) return ret; ret = ACT_P_CREATED; } else { tcf_hash_release(*a, bind); if (!ovr) return -EEXIST; } d = to_skbmod(*a); ASSERT_RTNL(); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { if (ovr) tcf_hash_release(*a, bind); return -ENOMEM; } p->flags = lflags; d->tcf_action = parm->action; p_old = rtnl_dereference(d->skbmod_p); if (ovr) spin_lock_bh(&d->tcf_lock); if (lflags & SKBMOD_F_DMAC) ether_addr_copy(p->eth_dst, daddr); if (lflags & SKBMOD_F_SMAC) ether_addr_copy(p->eth_src, saddr); if (lflags & SKBMOD_F_ETYPE) p->eth_type = htons(eth_type); rcu_assign_pointer(d->skbmod_p, p); if (ovr) spin_unlock_bh(&d->tcf_lock); if (p_old) kfree_rcu(p_old, rcu); if (ret == ACT_P_CREATED) tcf_hash_insert(tn, *a); return ret; }
static int tcf_act_police_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { int ret = 0, err; struct nlattr *tb[TCA_POLICE_MAX + 1]; struct tc_police *parm; struct tcf_police *police; struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; struct tc_action_net *tn = net_generic(net, police_net_id); bool exists = false; int size; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy, NULL); if (err < 0) return err; if (tb[TCA_POLICE_TBF] == NULL) return -EINVAL; size = nla_len(tb[TCA_POLICE_TBF]); if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) return -EINVAL; parm = nla_data(tb[TCA_POLICE_TBF]); exists = tcf_idr_check(tn, parm->index, a, bind); if (exists && bind) return 0; if (!exists) { ret = tcf_idr_create(tn, parm->index, NULL, a, &act_police_ops, bind, false); if (ret) return ret; ret = ACT_P_CREATED; } else { tcf_idr_release(*a, bind); if (!ovr) return -EEXIST; } police = to_police(*a); if (parm->rate.rate) { err = -ENOMEM; R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL); if (R_tab == NULL) goto failure; if (parm->peakrate.rate) { P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE], NULL); if (P_tab == NULL) goto failure; } } if (est) { err = gen_replace_estimator(&police->tcf_bstats, NULL, &police->tcf_rate_est, &police->tcf_lock, NULL, est); if (err) goto failure; } else if (tb[TCA_POLICE_AVRATE] && (ret == ACT_P_CREATED || !gen_estimator_active(&police->tcf_rate_est))) { err = -EINVAL; goto failure; } spin_lock_bh(&police->tcf_lock); /* No failure allowed after this point */ police->tcfp_mtu = parm->mtu; if (police->tcfp_mtu == 0) { police->tcfp_mtu = ~0; if (R_tab) police->tcfp_mtu = 255 << R_tab->rate.cell_log; } if (R_tab) { police->rate_present = true; psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0); qdisc_put_rtab(R_tab); } else { police->rate_present = false; } if (P_tab) { police->peak_present = true; psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0); qdisc_put_rtab(P_tab); } else { police->peak_present = false; } if (tb[TCA_POLICE_RESULT]) police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); police->tcfp_burst = PSCHED_TICKS2NS(parm->burst); police->tcfp_toks = police->tcfp_burst; if (police->peak_present) { police->tcfp_mtu_ptoks = (s64) psched_l2t_ns(&police->peak, police->tcfp_mtu); police->tcfp_ptoks = police->tcfp_mtu_ptoks; } police->tcf_action = parm->action; if (tb[TCA_POLICE_AVRATE]) police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); spin_unlock_bh(&police->tcf_lock); if (ret != ACT_P_CREATED) return ret; police->tcfp_t_c = ktime_get_ns(); tcf_idr_insert(tn, *a); return ret; failure: qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab); if (ret == ACT_P_CREATED) tcf_idr_cleanup(*a, est); return err; }
static int choke_change(struct Qdisc *sch, struct nlattr *opt) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_CHOKE_MAX + 1]; const struct tc_red_qopt *ctl; int err; struct sk_buff **old = NULL; unsigned int mask; u32 max_P; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy); if (err < 0) return err; if (tb[TCA_CHOKE_PARMS] == NULL || tb[TCA_CHOKE_STAB] == NULL) return -EINVAL; max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; ctl = nla_data(tb[TCA_CHOKE_PARMS]); if (ctl->limit > CHOKE_MAX_QUEUE) return -EINVAL; mask = roundup_pow_of_two(ctl->limit + 1) - 1; if (mask != q->tab_mask) { struct sk_buff **ntab; ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL | __GFP_NOWARN); if (!ntab) ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *)); if (!ntab) return -ENOMEM; sch_tree_lock(sch); old = q->tab; if (old) { unsigned int oqlen = sch->q.qlen, tail = 0; while (q->head != q->tail) { struct sk_buff *skb = q->tab[q->head]; q->head = (q->head + 1) & q->tab_mask; if (!skb) continue; if (tail < mask) { ntab[tail++] = skb; continue; } qdisc_qstats_backlog_dec(sch, skb); --sch->q.qlen; qdisc_drop(skb, sch); } qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen); q->head = 0; q->tail = tail; } q->tab_mask = mask; q->tab = ntab; } else sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, nla_data(tb[TCA_CHOKE_STAB]), max_P); red_set_vars(&q->vars); if (q->head == q->tail) red_end_of_idle_period(&q->vars); sch_tree_unlock(sch); choke_free(old); return 0; }
static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, u8 n) { struct tcf_pedit_key_ex *keys_ex; struct tcf_pedit_key_ex *k; const struct nlattr *ka; int err = -EINVAL; int rem; if (!nla || !n) return NULL; keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); if (!keys_ex) return ERR_PTR(-ENOMEM); k = keys_ex; nla_for_each_nested(ka, nla, rem) { struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1]; if (!n) { err = -EINVAL; goto err_out; } n--; if (nla_type(ka) != TCA_PEDIT_KEY_EX) { err = -EINVAL; goto err_out; } err = nla_parse_nested(tb, TCA_PEDIT_KEY_EX_MAX, ka, pedit_key_ex_policy); if (err) goto err_out; if (!tb[TCA_PEDIT_KEY_EX_HTYPE] || !tb[TCA_PEDIT_KEY_EX_CMD]) { err = -EINVAL; goto err_out; } k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]); if (k->htype > TCA_PEDIT_HDR_TYPE_MAX || k->cmd > TCA_PEDIT_CMD_MAX) { err = -EINVAL; goto err_out; } k++; } if (n) goto err_out; return keys_ex; err_out: kfree(keys_ex); return ERR_PTR(err); }
struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *name, int ovr, int bind) { struct tc_action *a; struct tc_action_ops *a_o; char act_name[IFNAMSIZ]; struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; int err; if (name == NULL) { err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; if (kind == NULL) goto err_out; if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } else { err = -EINVAL; if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) goto err_out; } a_o = tc_lookup_action_n(act_name); if (a_o == NULL) { #ifdef CONFIG_MODULES rtnl_unlock(); request_module("act_%s", act_name); rtnl_lock(); a_o = tc_lookup_action_n(act_name); /* We dropped the RTNL semaphore in order to * perform the module load. So, even if we * succeeded in loading the module we have to * tell the caller to replay the request. We * indicate this using -EAGAIN. */ if (a_o != NULL) { err = -EAGAIN; goto err_mod; } #endif err = -ENOENT; goto err_out; } err = -ENOMEM; a = kzalloc(sizeof(*a), GFP_KERNEL); if (a == NULL) goto err_mod; /* backward compatibility for policer */ if (name == NULL) err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); else err = a_o->init(nla, est, a, ovr, bind); if (err < 0) goto err_free; /* module count goes up only when brand new policy is created if it exists and is only bound to in a_o->init() then ACT_P_CREATED is not returned (a zero is). */ if (err != ACT_P_CREATED) module_put(a_o->owner); a->ops = a_o; return a; err_free: kfree(a); err_mod: module_put(a_o->owner); err_out: return ERR_PTR(err); }
static int ipvs_services_parse_cb(struct nl_msg *msg, void *arg) { struct nlmsghdr *nlh = nlmsg_hdr(msg); struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; struct nlattr *svc_attrs[IPVS_SVC_ATTR_MAX + 1]; struct ip_vs_get_services **getp = (struct ip_vs_get_services **)arg; struct ip_vs_get_services *get = (struct ip_vs_get_services *)*getp; struct ip_vs_flags flags; int i = get->num_services; if (genlmsg_parse(nlh, 0, attrs, IPVS_CMD_ATTR_MAX, ipvs_cmd_policy) != 0) return -1; if (!attrs[IPVS_CMD_ATTR_SERVICE]) return -1; if (nla_parse_nested(svc_attrs, IPVS_SVC_ATTR_MAX, attrs[IPVS_CMD_ATTR_SERVICE], ipvs_service_policy)) return -1; memset(&(get->entrytable[i]), 0, sizeof(get->entrytable[i])); if (!(svc_attrs[IPVS_SVC_ATTR_AF] && (svc_attrs[IPVS_SVC_ATTR_FWMARK] || (svc_attrs[IPVS_SVC_ATTR_PROTOCOL] && svc_attrs[IPVS_SVC_ATTR_ADDR] && svc_attrs[IPVS_SVC_ATTR_PORT])) && svc_attrs[IPVS_SVC_ATTR_SCHED_NAME] && svc_attrs[IPVS_SVC_ATTR_NETMASK] && svc_attrs[IPVS_SVC_ATTR_TIMEOUT] && svc_attrs[IPVS_SVC_ATTR_FLAGS])) return -1; get->entrytable[i].af = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_AF]); if (svc_attrs[IPVS_SVC_ATTR_FWMARK]) get->entrytable[i].fwmark = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_FWMARK]); else { get->entrytable[i].protocol = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_PROTOCOL]); memcpy(&(get->entrytable[i].addr), nla_data(svc_attrs[IPVS_SVC_ATTR_ADDR]), sizeof(get->entrytable[i].addr)); get->entrytable[i].port = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_PORT]); } strncpy(get->entrytable[i].sched_name, nla_get_string(svc_attrs[IPVS_SVC_ATTR_SCHED_NAME]), IP_VS_SCHEDNAME_MAXLEN); get->entrytable[i].netmask = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_NETMASK]); get->entrytable[i].timeout = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_TIMEOUT]); nla_memcpy(&flags, svc_attrs[IPVS_SVC_ATTR_FLAGS], sizeof(flags)); get->entrytable[i].flags = flags.flags & flags.mask; if (ipvs_parse_stats(&(get->entrytable[i].stats), svc_attrs[IPVS_SVC_ATTR_STATS]) != 0) return -1; get->entrytable[i].num_dests = 0; i++; get->num_services = i; get = realloc(get, sizeof(*get) + sizeof(ipvs_service_entry_t) * (get->num_services + 1)); *getp = get; return 0; }
static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX+1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); t = NLMSG_DATA(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto nla_put_failure; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto nla_put_failure; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); if (err > 0) return 0; return err; nla_put_failure: nlmsg_failure: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; }
int rtnl_tc_msg_parse(struct nlmsghdr *n, struct rtnl_tc *tc) { struct nl_cache *link_cache; struct rtnl_tc_ops *ops; struct nlattr *tb[TCA_MAX + 1]; char kind[TCKINDSIZ]; struct tcmsg *tm; int err; tc->ce_msgtype = n->nlmsg_type; err = nlmsg_parse(n, sizeof(*tm), tb, TCA_MAX, tc_policy); if (err < 0) return err; if (tb[TCA_KIND] == NULL) return -NLE_MISSING_ATTR; nla_strlcpy(kind, tb[TCA_KIND], sizeof(kind)); rtnl_tc_set_kind(tc, kind); tm = nlmsg_data(n); tc->tc_family = tm->tcm_family; tc->tc_ifindex = tm->tcm_ifindex; tc->tc_handle = tm->tcm_handle; tc->tc_parent = tm->tcm_parent; tc->tc_info = tm->tcm_info; tc->ce_mask |= (TCA_ATTR_FAMILY | TCA_ATTR_IFINDEX | TCA_ATTR_HANDLE| TCA_ATTR_PARENT | TCA_ATTR_INFO); if (tb[TCA_OPTIONS]) { tc->tc_opts = nl_data_alloc_attr(tb[TCA_OPTIONS]); if (!tc->tc_opts) return -NLE_NOMEM; tc->ce_mask |= TCA_ATTR_OPTS; } if (tb[TCA_STATS2]) { struct nlattr *tbs[TCA_STATS_MAX + 1]; err = nla_parse_nested(tbs, TCA_STATS_MAX, tb[TCA_STATS2], tc_stats2_policy); if (err < 0) return err; if (tbs[TCA_STATS_BASIC]) { struct gnet_stats_basic *bs; bs = nla_data(tbs[TCA_STATS_BASIC]); tc->tc_stats[RTNL_TC_BYTES] = bs->bytes; tc->tc_stats[RTNL_TC_PACKETS] = bs->packets; } if (tbs[TCA_STATS_RATE_EST]) { struct gnet_stats_rate_est *re; re = nla_data(tbs[TCA_STATS_RATE_EST]); tc->tc_stats[RTNL_TC_RATE_BPS] = re->bps; tc->tc_stats[RTNL_TC_RATE_PPS] = re->pps; } if (tbs[TCA_STATS_QUEUE]) { struct gnet_stats_queue *q; q = nla_data(tbs[TCA_STATS_QUEUE]); tc->tc_stats[RTNL_TC_QLEN] = q->qlen; tc->tc_stats[RTNL_TC_BACKLOG] = q->backlog; tc->tc_stats[RTNL_TC_DROPS] = q->drops; tc->tc_stats[RTNL_TC_REQUEUES] = q->requeues; tc->tc_stats[RTNL_TC_OVERLIMITS] = q->overlimits; } tc->ce_mask |= TCA_ATTR_STATS; if (tbs[TCA_STATS_APP]) { tc->tc_xstats = nl_data_alloc_attr(tbs[TCA_STATS_APP]); if (tc->tc_xstats == NULL) return -NLE_NOMEM; } else goto compat_xstats; } else { if (tb[TCA_STATS]) { struct tc_stats *st = nla_data(tb[TCA_STATS]); tc->tc_stats[RTNL_TC_BYTES] = st->bytes; tc->tc_stats[RTNL_TC_PACKETS] = st->packets; tc->tc_stats[RTNL_TC_RATE_BPS] = st->bps; tc->tc_stats[RTNL_TC_RATE_PPS] = st->pps; tc->tc_stats[RTNL_TC_QLEN] = st->qlen; tc->tc_stats[RTNL_TC_BACKLOG] = st->backlog; tc->tc_stats[RTNL_TC_DROPS] = st->drops; tc->tc_stats[RTNL_TC_OVERLIMITS]= st->overlimits; tc->ce_mask |= TCA_ATTR_STATS; } compat_xstats: if (tb[TCA_XSTATS]) { tc->tc_xstats = nl_data_alloc_attr(tb[TCA_XSTATS]); if (tc->tc_xstats == NULL) return -NLE_NOMEM; tc->ce_mask |= TCA_ATTR_XSTATS; } } ops = rtnl_tc_get_ops(tc); if (ops && ops->to_msg_parser) { void *data = rtnl_tc_data(tc); if (!data) return -NLE_NOMEM; err = ops->to_msg_parser(tc, data); if (err < 0) return err; } if ((link_cache = __nl_cache_mngt_require("route/link"))) { struct rtnl_link *link; if ((link = rtnl_link_get(link_cache, tc->tc_ifindex))) { rtnl_tc_set_link(tc, link); /* rtnl_tc_set_link incs refcnt */ rtnl_link_put(link); } } return 0; }
static int tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) { int i, ret; struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; struct tc_action *head = NULL, *act, *act_prev = NULL; ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); if (ret < 0) return ret; if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { if (tb[1] != NULL) return tca_action_flush(tb[1], n, pid); else return -EINVAL; } for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { act = tcf_action_get_1(tb[i], n, pid); if (IS_ERR(act)) { ret = PTR_ERR(act); goto err; } act->order = i; if (head == NULL) head = act; else act_prev->next = act; act_prev = act; } if (event == RTM_GETACTION) ret = act_get_notify(pid, n, head, event); else { /* delete */ struct sk_buff *skb; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ret = -ENOBUFS; goto err; } if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 0, 1) <= 0) { kfree_skb(skb); ret = -EINVAL; goto err; } /* now do the delete */ tcf_action_destroy(head, 0); ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); if (ret > 0) return 0; return ret; } err: cleanup_a(head); return ret; }
static int tbf_change(struct Qdisc* sch, struct nlattr *opt) { int err; struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_TBF_PTAB + 1]; struct tc_tbf_qopt *qopt; struct qdisc_rate_table *rtab = NULL; struct qdisc_rate_table *ptab = NULL; struct Qdisc *child = NULL; int max_size,n; err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); if (err < 0) return err; err = -EINVAL; if (tb[TCA_TBF_PARMS] == NULL) goto done; qopt = nla_data(tb[TCA_TBF_PARMS]); rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]); if (rtab == NULL) goto done; if (qopt->peakrate.rate) { if (qopt->peakrate.rate > qopt->rate.rate) ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]); if (ptab == NULL) goto done; } for (n = 0; n < 256; n++) if (rtab->data[n] > qopt->buffer) break; max_size = (n << qopt->rate.cell_log)-1; if (ptab) { int size; for (n = 0; n < 256; n++) if (ptab->data[n] > qopt->mtu) break; size = (n << qopt->peakrate.cell_log)-1; if (size < max_size) max_size = size; } if (max_size < 0) goto done; if (qopt->limit > 0) { child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); if (IS_ERR(child)) { err = PTR_ERR(child); goto done; } } sch_tree_lock(sch); if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(q->qdisc); q->qdisc = child; } q->limit = qopt->limit; q->mtu = qopt->mtu; q->max_size = max_size; q->buffer = qopt->buffer; q->tokens = q->buffer; q->ptokens = q->mtu; swap(q->R_tab, rtab); swap(q->P_tab, ptab); sch_tree_unlock(sch); err = 0; done: if (rtab) qdisc_put_rtab(rtab); if (ptab) qdisc_put_rtab(ptab); return err; }
static int tcf_gact_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, gact_net_id); struct nlattr *tb[TCA_GACT_MAX + 1]; struct tc_gact *parm; struct tcf_gact *gact; int ret = 0; int err; #ifdef CONFIG_GACT_PROB struct tc_gact_p *p_parm = NULL; #endif if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_GACT_MAX, nla, gact_policy); if (err < 0) return err; if (tb[TCA_GACT_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_GACT_PARMS]); #ifndef CONFIG_GACT_PROB if (tb[TCA_GACT_PROB] != NULL) return -EOPNOTSUPP; #else if (tb[TCA_GACT_PROB]) { p_parm = nla_data(tb[TCA_GACT_PROB]); if (p_parm->ptype >= MAX_RAND) return -EINVAL; } #endif if (!tcf_hash_check(tn, parm->index, a, bind)) { ret = tcf_hash_create(tn, parm->index, est, a, &act_gact_ops, bind, true); if (ret) return ret; ret = ACT_P_CREATED; } else { if (bind)/* dont override defaults */ return 0; tcf_hash_release(*a, bind); if (!ovr) return -EEXIST; } gact = to_gact(*a); ASSERT_RTNL(); gact->tcf_action = parm->action; #ifdef CONFIG_GACT_PROB if (p_parm) { gact->tcfg_paction = p_parm->paction; gact->tcfg_pval = max_t(u16, 1, p_parm->pval); /* Make sure tcfg_pval is written before tcfg_ptype * coupled with smp_rmb() in gact_net_rand() & gact_determ() */ smp_wmb(); gact->tcfg_ptype = p_parm->ptype; } #endif if (ret == ACT_P_CREATED) tcf_hash_insert(tn, *a); return ret; }
/** * virNetDevVPortProfileGetStatus: * * tb: top level netlink response attributes + values * vf: The virtual function used in the request * instanceId: instanceId of the interface (vm uuid in case of 802.1Qbh) * is8021Qbg: whether this function is call for 8021Qbg * status: pointer to a uint16 where the status will be written into * * Get the status from the IFLA_PORT_RESPONSE field; Returns 0 in * case of success, < 0 otherwise with error having been reported */ static int virNetDevVPortProfileGetStatus(struct nlattr **tb, int32_t vf, const unsigned char *instanceId, bool nltarget_kernel, bool is8021Qbg, uint16_t *status) { int rc = -1; struct nlattr *tb_port[IFLA_PORT_MAX + 1] = { NULL, }; if (vf == PORT_SELF_VF && nltarget_kernel) { if (tb[IFLA_PORT_SELF]) { if (nla_parse_nested(tb_port, IFLA_PORT_MAX, tb[IFLA_PORT_SELF], ifla_port_policy)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("error parsing IFLA_PORT_SELF part")); goto cleanup; } } else { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("IFLA_PORT_SELF is missing")); goto cleanup; } } else { if (tb[IFLA_VF_PORTS]) { int rem; bool found = false; struct nlattr *tb_vf_ports = { NULL, }; nla_for_each_nested(tb_vf_ports, tb[IFLA_VF_PORTS], rem) { if (nla_type(tb_vf_ports) != IFLA_VF_PORT) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("error while iterating over " "IFLA_VF_PORTS part")); goto cleanup; } if (nla_parse_nested(tb_port, IFLA_PORT_MAX, tb_vf_ports, ifla_port_policy)) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("error parsing IFLA_VF_PORT part")); goto cleanup; } if (instanceId && tb_port[IFLA_PORT_INSTANCE_UUID] && !memcmp(instanceId, (unsigned char *) RTA_DATA(tb_port[IFLA_PORT_INSTANCE_UUID]), VIR_UUID_BUFLEN) && tb_port[IFLA_PORT_VF] && vf == *(uint32_t *)RTA_DATA(tb_port[IFLA_PORT_VF])) { found = true; break; } } if (!found) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Could not find netlink response with " "expected parameters")); goto cleanup; } } else { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("IFLA_VF_PORTS is missing")); goto cleanup; } }
int mtk_cfg80211_vendor_set_hotlist(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { /*WLAN_STATUS rStatus;*/ P_GLUE_INFO_T prGlueInfo = NULL; CMD_SET_PSCAN_ADD_HOTLIST_BSSID rCmdPscnAddHotlist; INT_32 i4Status = -EINVAL; P_PARAM_WIFI_BSSID_HOTLIST prWifiHotlistCmd = NULL; UINT_8 flush = 0; /* struct nlattr *attr[GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH + 1]; */ struct nlattr **attr = NULL; struct nlattr *paplist; int i, k; UINT_32 len_basic, len_aplist; ASSERT(wiphy); ASSERT(wdev); if ((data == NULL) || !data_len) goto nla_put_failure; DBGLOG(REQ, INFO, "%s for vendor command: data_len=%d \r\n", __func__, data_len); for (i = 0; i < 5; i++) DBGLOG(REQ, LOUD, "0x%x 0x%x 0x%x 0x%x \r\n", *((UINT_32 *) data + i * 4), *((UINT_32 *) data + i * 4 + 1), *((UINT_32 *) data + i * 4 + 2), *((UINT_32 *) data + i * 4 + 3)); prWifiHotlistCmd = kalMemAlloc(sizeof(PARAM_WIFI_BSSID_HOTLIST), VIR_MEM_TYPE); if (prWifiHotlistCmd == NULL) goto nla_put_failure; kalMemZero(prWifiHotlistCmd, sizeof(PARAM_WIFI_BSSID_HOTLIST)); attr = kalMemAlloc(sizeof(struct nlattr *) * (GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH + 1), VIR_MEM_TYPE); if (attr == NULL) goto nla_put_failure; kalMemZero(attr, sizeof(struct nlattr *) * (GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH + 1)); if (nla_parse_nested(attr, GSCAN_ATTRIBUTE_NUM_AP, (struct nlattr *)(data - NLA_HDRLEN), nla_parse_policy) < 0) goto nla_put_failure; len_basic = 0; for (k = GSCAN_ATTRIBUTE_HOTLIST_FLUSH; k <= GSCAN_ATTRIBUTE_NUM_AP; k++) { if (attr[k]) { switch (k) { case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE: prWifiHotlistCmd->lost_ap_sample_size = nla_get_u32(attr[k]); len_basic += NLA_ALIGN(attr[k]->nla_len); break; case GSCAN_ATTRIBUTE_NUM_AP: prWifiHotlistCmd->num_ap = nla_get_u16(attr[k]); len_basic += NLA_ALIGN(attr[k]->nla_len); DBGLOG(REQ, TRACE, "attr=0x%x, num_ap=%d nla_len=%d, \r\n", *(UINT_32 *) attr[k], prWifiHotlistCmd->num_ap, attr[k]->nla_len); break; case GSCAN_ATTRIBUTE_HOTLIST_FLUSH: flush = nla_get_u8(attr[k]); len_basic += NLA_ALIGN(attr[k]->nla_len); break; } } } paplist = (struct nlattr *)((UINT_8 *) data + len_basic); DBGLOG(REQ, TRACE, "+++basic attribute size=%d flush=%d\r\n", len_basic, flush); if (paplist->nla_type == GSCAN_ATTRIBUTE_HOTLIST_BSSIDS) paplist = (struct nlattr *)((UINT_8 *) paplist + NLA_HDRLEN); for (i = 0; i < prWifiHotlistCmd->num_ap; i++) { if (nla_parse_nested(attr, GSCAN_ATTRIBUTE_RSSI_HIGH, (struct nlattr *)paplist, nla_parse_policy) < 0) goto nla_put_failure; paplist = (struct nlattr *)((UINT_8 *) paplist + NLA_HDRLEN); /* request.attr_start(i) as nested attribute */ len_aplist = 0; for (k = GSCAN_ATTRIBUTE_BSSID; k <= GSCAN_ATTRIBUTE_RSSI_HIGH; k++) { if (attr[k]) { switch (k) { case GSCAN_ATTRIBUTE_BSSID: kalMemCopy(prWifiHotlistCmd->ap[i].bssid, nla_data(attr[k]), sizeof(mac_addr)); len_aplist += NLA_ALIGN(attr[k]->nla_len); break; case GSCAN_ATTRIBUTE_RSSI_LOW: prWifiHotlistCmd->ap[i].low = nla_get_u32(attr[k]); len_aplist += NLA_ALIGN(attr[k]->nla_len); break; case GSCAN_ATTRIBUTE_RSSI_HIGH: prWifiHotlistCmd->ap[i].high = nla_get_u32(attr[k]); len_aplist += NLA_ALIGN(attr[k]->nla_len); break; } } } if (((i + 1) % 4 == 0) || (i == prWifiHotlistCmd->num_ap - 1)) DBGLOG(REQ, TRACE, "ap[%d], len_aplist=%d\n", i, len_aplist); else DBGLOG(REQ, TRACE, "ap[%d], len_aplist=%d \t", i, len_aplist); paplist = (struct nlattr *)((UINT_8 *) paplist + len_aplist); } DBGLOG(REQ, TRACE, "flush=%d, lost_ap_sample_size=%d, Hotlist:ap[0].channel=%d low=%d high=%d, ap[1].channel=%d low=%d high=%d", flush, prWifiHotlistCmd->lost_ap_sample_size, prWifiHotlistCmd->ap[0].channel, prWifiHotlistCmd->ap[0].low, prWifiHotlistCmd->ap[0].high, prWifiHotlistCmd->ap[1].channel, prWifiHotlistCmd->ap[1].low, prWifiHotlistCmd->ap[1].high); memcpy(&(rCmdPscnAddHotlist.aucMacAddr), &(prWifiHotlistCmd->ap[0].bssid), 6 * sizeof(UINT_8)); rCmdPscnAddHotlist.ucFlags = (UINT_8) TRUE; prGlueInfo = (P_GLUE_INFO_T) wiphy_priv(wiphy); ASSERT(prGlueInfo); kalMemFree(prWifiHotlistCmd, VIR_MEM_TYPE, sizeof(PARAM_WIFI_BSSID_HOTLIST)); kalMemFree(attr, VIR_MEM_TYPE, sizeof(struct nlattr *) * (GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH + 1)); return 0; nla_put_failure: if (prWifiHotlistCmd) kalMemFree(prWifiHotlistCmd, VIR_MEM_TYPE, sizeof(PARAM_WIFI_BSSID_HOTLIST)); if (attr) kalMemFree(attr, VIR_MEM_TYPE, sizeof(struct nlattr *) * (GSCAN_ATTRIBUTE_SIGNIFICANT_CHANGE_FLUSH + 1)); return i4Status; }