static int inet6_parse_protinfo(struct rtnl_link *link, struct nlattr *attr, void *data) { struct inet6_data *i6 = data; struct nlattr *tb[IFLA_INET6_MAX+1]; int err; err = nla_parse_nested(tb, IFLA_INET6_MAX, attr, inet6_policy); if (err < 0) return err; if (tb[IFLA_INET6_FLAGS]) i6->i6_flags = nla_get_u32(tb[IFLA_INET6_FLAGS]); if (tb[IFLA_INET6_CACHEINFO]) nla_memcpy(&i6->i6_cacheinfo, tb[IFLA_INET6_CACHEINFO], sizeof(i6->i6_cacheinfo)); if (tb[IFLA_INET6_CONF]) nla_memcpy(&i6->i6_conf, tb[IFLA_INET6_CONF], sizeof(i6->i6_conf)); /* * Due to 32bit data alignment, these addresses must be copied to an * aligned location prior to access. */ if (tb[IFLA_INET6_STATS]) { unsigned char *cnt = nla_data(tb[IFLA_INET6_STATS]); uint64_t stat; int i; for (i = 1; i < __IPSTATS_MIB_MAX; i++) { memcpy(&stat, &cnt[i * sizeof(stat)], sizeof(stat)); rtnl_link_set_stat(link, RTNL_LINK_IP6_INPKTS + i - 1, stat); } } if (tb[IFLA_INET6_ICMP6STATS]) { unsigned char *cnt = nla_data(tb[IFLA_INET6_ICMP6STATS]); uint64_t stat; int i; for (i = 1; i < __ICMP6_MIB_MAX; i++) { memcpy(&stat, &cnt[i * sizeof(stat)], sizeof(stat)); rtnl_link_set_stat(link, RTNL_LINK_ICMP6_INMSGS + i - 1, stat); } } return 0; }
static int htb_class_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_HTB_MAX + 1]; struct rtnl_htb_class *htb = data; int err; if ((err = tca_parse(tb, TCA_HTB_MAX, tc, htb_policy)) < 0) return err; if (tb[TCA_HTB_PARMS]) { struct tc_htb_opt opts; nla_memcpy(&opts, tb[TCA_HTB_PARMS], sizeof(opts)); htb->ch_prio = opts.prio; rtnl_copy_ratespec(&htb->ch_rate, &opts.rate); rtnl_copy_ratespec(&htb->ch_ceil, &opts.ceil); htb->ch_rbuffer = rtnl_tc_calc_bufsize(opts.buffer, opts.rate.rate); htb->ch_cbuffer = rtnl_tc_calc_bufsize(opts.cbuffer, opts.ceil.rate); htb->ch_quantum = opts.quantum; htb->ch_level = opts.level; rtnl_tc_set_mpu(tc, htb->ch_rate.rs_mpu); rtnl_tc_set_overhead(tc, htb->ch_rate.rs_overhead); htb->ch_mask = (SCH_HTB_HAS_PRIO | SCH_HTB_HAS_RATE | SCH_HTB_HAS_CEIL | SCH_HTB_HAS_RBUFFER | SCH_HTB_HAS_CBUFFER | SCH_HTB_HAS_QUANTUM | SCH_HTB_HAS_LEVEL); } return 0; }
static int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; int ret = -EINVAL; if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] && !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) || !info->attrs[IEEE802154_ATTR_REASON]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) { addr.addr_type = IEEE802154_ADDR_LONG; nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], IEEE802154_ADDR_LEN); } else { addr.addr_type = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_u16( info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); } addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); dev_put(dev); return ret; }
static int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; int ret = -EOPNOTSUPP; if (!info->attrs[IEEE802154_ATTR_STATUS] || !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] || !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->assoc_resp) goto out; addr.addr_type = IEEE802154_ADDR_LONG; nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], IEEE802154_ADDR_LEN); addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS])); out: dev_put(dev); return ret; }
/** * ovs_vport_set_upcall_portids - set upcall portids of @vport. * * @vport: vport to modify. * @ids: new configuration, an array of port ids. * * Sets the vport's upcall_portids to @ids. * * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed * as an array of U32. * * Must be called with ovs_mutex. */ int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids) { struct vport_portids *old, *vport_portids; if (!nla_len(ids) || nla_len(ids) % sizeof(u32)) return -EINVAL; //读保护, rcu_dereference_protected old = ovsl_dereference(vport->upcall_portids); vport_portids = kmalloc(sizeof *vport_portids + nla_len(ids), GFP_KERNEL); if (!vport_portids) return -ENOMEM; vport_portids->n_ids = nla_len(ids) / sizeof(u32); vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids); nla_memcpy(vport_portids->ids, ids, nla_len(ids)); rcu_assign_pointer(vport->upcall_portids, vport_portids); if (old) //等待 old 的所有读者都完成后, 是否 old 指向的内存 call_rcu(&old->rcu, vport_portids_destroy_rcu_cb); return 0; }
static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range) { if (tb[CTA_NAT_V6_MINIP]) { nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP], sizeof(struct in6_addr)); range->flags |= NF_NAT_RANGE_MAP_IPS; } if (tb[CTA_NAT_V6_MAXIP]) nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP], sizeof(struct in6_addr)); else range->max_addr = range->min_addr; return 0; }
static int cbq_msg_parser(struct rtnl_tca *tca) { struct nlattr *tb[TCA_CBQ_MAX + 1]; struct rtnl_cbq *cbq; int err; err = tca_parse(tb, TCA_CBQ_MAX, tca, cbq_policy); if (err < 0) return err; cbq = cbq_alloc(tca); if (!cbq) return nl_errno(ENOMEM); nla_memcpy(&cbq->cbq_lss, tb[TCA_CBQ_LSSOPT], sizeof(cbq->cbq_lss)); nla_memcpy(&cbq->cbq_rate, tb[TCA_CBQ_RATE], sizeof(cbq->cbq_rate)); nla_memcpy(&cbq->cbq_wrr, tb[TCA_CBQ_WRROPT], sizeof(cbq->cbq_wrr)); nla_memcpy(&cbq->cbq_fopt, tb[TCA_CBQ_FOPT], sizeof(cbq->cbq_fopt)); nla_memcpy(&cbq->cbq_ovl, tb[TCA_CBQ_OVL_STRATEGY], sizeof(cbq->cbq_ovl)); nla_memcpy(&cbq->cbq_police, tb[TCA_CBQ_POLICE], sizeof(cbq->cbq_police)); return 0; }
static int mirred_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_mirred *u = data; struct nlattr *tb[TCA_MIRRED_MAX + 1]; int err; err = tca_parse(tb, TCA_MIRRED_MAX, tc, mirred_policy); if (err < 0) return err; if (!tb[TCA_MIRRED_PARMS]) return -NLE_MISSING_ATTR; nla_memcpy(&u->m_parm, tb[TCA_MIRRED_PARMS], sizeof(u->m_parm)); return 0; }
struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) { int actions_len = nla_len(actions); struct sw_flow_actions *sfa; if (actions_len > MAX_ACTIONS_BUFSIZE) return ERR_PTR(-EINVAL); sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); sfa->actions_len = actions_len; nla_memcpy(sfa->actions, actions, actions_len); return sfa; }
static int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; u8 page; int ret = -EOPNOTSUPP; if (!info->attrs[IEEE802154_ATTR_CHANNEL] || !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] && !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) || !info->attrs[IEEE802154_ATTR_CAPABILITY]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (!ieee802154_mlme_ops(dev)->assoc_req) goto out; if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) { addr.addr_type = IEEE802154_ADDR_LONG; nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_COORD_HW_ADDR], IEEE802154_ADDR_LEN); } else { addr.addr_type = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_u16( info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); } addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), page, nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); out: dev_put(dev); return ret; }
static int gact_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_gact *u = data; struct nlattr *tb[TCA_GACT_MAX + 1]; int err; err = tca_parse(tb, TCA_GACT_MAX, tc, gact_policy); if (err < 0) return err; if (!tb[TCA_GACT_PARMS]) return -NLE_MISSING_ATTR; nla_memcpy(&u->g_parm, tb[TCA_GACT_PARMS], sizeof(u->g_parm)); return 0; }
static iz_res_t scan_response(struct iz_cmd *cmd, struct genlmsghdr *ghdr, struct nlattr **attrs) { uint8_t status, type; int i; uint8_t edl[27]; if (ghdr->cmd == 21) { printf("Found beacon:\n\tShort addr: %d\n\tPAN id: %d\n", nla_get_u16(attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]), nla_get_u16(attrs[IEEE802154_ATTR_COORD_PAN_ID])); return IZ_CONT_OK; } if (!attrs[IEEE802154_ATTR_DEV_INDEX] || !attrs[IEEE802154_ATTR_STATUS] || !attrs[IEEE802154_ATTR_SCAN_TYPE]) return IZ_STOP_ERR; status = nla_get_u8(attrs[IEEE802154_ATTR_STATUS]); if (status != 0) printf("Scan failed: %02x\n", status); type = nla_get_u8(attrs[IEEE802154_ATTR_SCAN_TYPE]); switch (type) { case IEEE802154_MAC_SCAN_ED: if (!attrs[IEEE802154_ATTR_ED_LIST]) return IZ_STOP_ERR; nla_memcpy(edl, attrs[IEEE802154_ATTR_ED_LIST], 27); printf("ED Scan results:\n"); for (i = 0; i < 27; i++) printf(" Ch%2d --- ED = %02x\n", i, edl[i]); return IZ_STOP_OK; case IEEE802154_MAC_SCAN_ACTIVE: printf("Finished active (beacons) scan...\n"); return IZ_STOP_OK; default: printf("Unsupported scan type: %d\n", type); break; } return IZ_STOP_OK; }
static iz_res_t list_response(struct iz_cmd *cmd, struct genlmsghdr *ghdr, struct nlattr **attrs) { char * dev_name; char * phy_name = NULL; uint32_t dev_index; unsigned char hw_addr[IEEE802154_ADDR_LEN]; uint16_t short_addr; uint16_t pan_id; char * dev_type_str; /* Check for mandatory attributes */ if (!attrs[IEEE802154_ATTR_DEV_NAME] || !attrs[IEEE802154_ATTR_DEV_INDEX] || !attrs[IEEE802154_ATTR_HW_ADDR] || !attrs[IEEE802154_ATTR_SHORT_ADDR] || !attrs[IEEE802154_ATTR_PAN_ID]) return IZ_STOP_ERR; /* Get attribute values from the message */ dev_name = nla_get_string(attrs[IEEE802154_ATTR_DEV_NAME]); dev_index = nla_get_u32(attrs[IEEE802154_ATTR_DEV_INDEX]); nla_memcpy(hw_addr, attrs[IEEE802154_ATTR_HW_ADDR], IEEE802154_ADDR_LEN); short_addr = nla_get_u16(attrs[IEEE802154_ATTR_SHORT_ADDR]); pan_id = nla_get_u16(attrs[IEEE802154_ATTR_PAN_ID]); if (attrs[IEEE802154_ATTR_PHY_NAME]) phy_name = nla_get_string(attrs[IEEE802154_ATTR_PHY_NAME]); /* Display information about interface */ printf("%s\n", dev_name); dev_type_str = "IEEE 802.15.4 MAC interface"; printf(" link: %s\n", dev_type_str); if (phy_name) printf(" phy %s\n", phy_name); printf(" hw %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3], hw_addr[4], hw_addr[5], hw_addr[6], hw_addr[7]); printf(" pan 0x%04hx short 0x%04hx\n", pan_id, short_addr); return (cmd->flags & NLM_F_MULTI) ? IZ_CONT_OK : IZ_STOP_OK; }
static int tbf_msg_parser(struct rtnl_qdisc *q) { int err; struct nlattr *tb[TCA_TBF_MAX + 1]; struct rtnl_tbf *tbf; err = tca_parse(tb, TCA_TBF_MAX, (struct rtnl_tca *) q, tbf_policy); if (err < 0) return err; tbf = tbf_qdisc(q); if (!tbf) return nl_errno(ENOMEM); if (tb[TCA_TBF_PARMS]) { struct tc_tbf_qopt opts; int bufsize; nla_memcpy(&opts, tb[TCA_TBF_PARMS], sizeof(opts)); tbf->qt_limit = opts.limit; tbf->qt_mpu = opts.rate.mpu; rtnl_copy_ratespec(&tbf->qt_rate, &opts.rate); tbf->qt_rate_txtime = opts.buffer; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.buffer), opts.rate.rate); tbf->qt_rate_bucket = bufsize; rtnl_copy_ratespec(&tbf->qt_peakrate, &opts.peakrate); tbf->qt_peakrate_txtime = opts.mtu; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.mtu), opts.peakrate.rate); tbf->qt_peakrate_bucket = bufsize; tbf->qt_mask = (TBF_ATTR_LIMIT | TBF_ATTR_MPU | TBF_ATTR_RATE | TBF_ATTR_PEAKRATE); } return 0; }
static int htb_qdisc_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_HTB_MAX + 1]; struct rtnl_htb_qdisc *htb = data; int err; if ((err = tca_parse(tb, TCA_HTB_MAX, tc, htb_policy)) < 0) return err; if (tb[TCA_HTB_INIT]) { struct tc_htb_glob opts; nla_memcpy(&opts, tb[TCA_HTB_INIT], sizeof(opts)); htb->qh_rate2quantum = opts.rate2quantum; htb->qh_defcls = opts.defcls; htb->qh_direct_pkts = opts.direct_pkts; htb->qh_mask = (SCH_HTB_HAS_RATE2QUANTUM | SCH_HTB_HAS_DEFCLS); } return 0; }
static int tbf_msg_parser(struct rtnl_tc *tc, void *data) { struct nlattr *tb[TCA_TBF_MAX + 1]; struct rtnl_tbf *tbf = data; int err; if ((err = tca_parse(tb, TCA_TBF_MAX, tc, tbf_policy)) < 0) return err; if (tb[TCA_TBF_PARMS]) { struct tc_tbf_qopt opts; int bufsize; nla_memcpy(&opts, tb[TCA_TBF_PARMS], sizeof(opts)); tbf->qt_limit = opts.limit; rtnl_copy_ratespec(&tbf->qt_rate, &opts.rate); tbf->qt_rate_txtime = opts.buffer; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.buffer), opts.rate.rate); tbf->qt_rate_bucket = bufsize; rtnl_copy_ratespec(&tbf->qt_peakrate, &opts.peakrate); tbf->qt_peakrate_txtime = opts.mtu; bufsize = rtnl_tc_calc_bufsize(nl_ticks2us(opts.mtu), opts.peakrate.rate); tbf->qt_peakrate_bucket = bufsize; rtnl_tc_set_mpu(tc, tbf->qt_rate.rs_mpu); rtnl_tc_set_overhead(tc, tbf->qt_rate.rs_overhead); tbf->qt_mask = (TBF_ATTR_LIMIT | TBF_ATTR_RATE | TBF_ATTR_PEAKRATE); } return 0; }
static int htb_qdisc_msg_parser(struct rtnl_qdisc *qdisc) { int err; struct nlattr *tb[TCA_HTB_MAX + 1]; struct rtnl_htb_qdisc *d; err = tca_parse(tb, TCA_HTB_MAX, (struct rtnl_tca *) qdisc, htb_policy); if (err < 0) return err; d = htb_qdisc(qdisc); if (tb[TCA_HTB_INIT]) { struct tc_htb_glob opts; nla_memcpy(&opts, tb[TCA_HTB_INIT], sizeof(opts)); d->qh_rate2quantum = opts.rate2quantum; d->qh_defcls = opts.defcls; d->qh_mask = (SCH_HTB_HAS_RATE2QUANTUM | SCH_HTB_HAS_DEFCLS); } return 0; }
/** * ovs_vport_set_upcall_portids - set upcall portids of @vport. * * @vport: vport to modify. * @ids: new configuration, an array of port ids. * * Sets the vport's upcall_portids to @ids. * * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed * as an array of U32. * * Must be called with ovs_mutex. */ int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids) { struct vport_portids *old, *vport_portids; if (!nla_len(ids) || nla_len(ids) % sizeof(u32)) return -EINVAL; old = ovsl_dereference(vport->upcall_portids); vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids), GFP_KERNEL); if (!vport_portids) return -ENOMEM; vport_portids->n_ids = nla_len(ids) / sizeof(u32); vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids); nla_memcpy(vport_portids->ids, ids, nla_len(ids)); rcu_assign_pointer(vport->upcall_portids, vport_portids); if (old) kfree_rcu(old, rcu); return 0; }
static int vlan_parse(struct rtnl_link *link, struct nlattr *data, struct nlattr *xstats) { struct nlattr *tb[IFLA_VLAN_MAX+1]; struct vlan_info *vi; int err; NL_DBG(3, "Parsing VLAN link info"); if ((err = nla_parse_nested(tb, IFLA_VLAN_MAX, data, vlan_policy)) < 0) goto errout; if ((err = vlan_alloc(link)) < 0) goto errout; vi = link->l_info; if (tb[IFLA_VLAN_ID]) { vi->vi_vlan_id = nla_get_u16(tb[IFLA_VLAN_ID]); vi->vi_mask |= VLAN_HAS_ID; } if (tb[IFLA_VLAN_FLAGS]) { struct ifla_vlan_flags flags; nla_memcpy(&flags, tb[IFLA_VLAN_FLAGS], sizeof(flags)); vi->vi_flags = flags.flags; vi->vi_mask |= VLAN_HAS_FLAGS; } if (tb[IFLA_VLAN_INGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining; memset(vi->vi_ingress_qos, 0, sizeof(vi->vi_ingress_qos)); nla_for_each_nested(nla, tb[IFLA_VLAN_INGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return nl_error(EINVAL, "Malformed mapping"); map = nla_data(nla); if (map->from < 0 || map->from > VLAN_PRIO_MAX) { return nl_error(EINVAL, "VLAN prio %d out of " "range", map->from); } vi->vi_ingress_qos[map->from] = map->to; } vi->vi_mask |= VLAN_HAS_INGRESS_QOS; } if (tb[IFLA_VLAN_EGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining, i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return nl_error(EINVAL, "Malformed mapping"); i++; } /* align to have a little reserve */ vi->vi_egress_size = (i + 32) & ~31; vi->vi_egress_qos = calloc(vi->vi_egress_size, sizeof(*map)); if (vi->vi_egress_qos == NULL) return nl_errno(ENOMEM); i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { map = nla_data(nla); NL_DBG(4, "Assigning egress qos mapping %d\n", i); vi->vi_egress_qos[i].vm_from = map->from; vi->vi_egress_qos[i++].vm_to = map->to; } vi->vi_negress = i; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; }
static int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb) { struct dump_phy_data data = { .cb = cb, .skb = skb, .s_idx = cb->args[0], .idx = 0, }; pr_debug("%s\n", __func__); wpan_phy_for_each(ieee802154_dump_phy_iter, &data); cb->args[0] = data.idx; return skb->len; } static int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct wpan_phy *phy; const char *name; const char *devname; int rc = -ENOBUFS; struct net_device *dev; pr_debug("%s\n", __func__); if (!info->attrs[IEEE802154_ATTR_PHY_NAME]) return -EINVAL; name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') return -EINVAL; /* phy name should be null-terminated */ if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]); if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0') return -EINVAL; /* phy name should be null-terminated */ } else { devname = "wpan%d"; } if (strlen(devname) >= IFNAMSIZ) return -ENAMETOOLONG; phy = wpan_phy_find(name); if (!phy) return -ENODEV; msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE); if (!msg) goto out_dev; if (!phy->add_iface) { rc = -EINVAL; goto nla_put_failure; } if (info->attrs[IEEE802154_ATTR_HW_ADDR] && nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) != IEEE802154_ADDR_LEN) { rc = -EINVAL; goto nla_put_failure; } dev = phy->add_iface(phy, devname); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto nla_put_failure; } if (info->attrs[IEEE802154_ATTR_HW_ADDR]) { struct sockaddr addr; addr.sa_family = ARPHRD_IEEE802154; nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR], IEEE802154_ADDR_LEN); /* * strangely enough, some callbacks (inetdev_event) from * dev_set_mac_address require RTNL_LOCK */ rtnl_lock(); rc = dev_set_mac_address(dev, &addr); rtnl_unlock(); if (rc) goto dev_unregister; } if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) goto nla_put_failure; dev_put(dev); wpan_phy_put(phy); return ieee802154_nl_reply(msg, info); dev_unregister: rtnl_lock(); /* del_iface must be called with RTNL lock */ phy->del_iface(phy, dev); dev_put(dev); rtnl_unlock(); nla_put_failure: nlmsg_free(msg); out_dev: wpan_phy_put(phy); return rc; } static int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct wpan_phy *phy; const char *name; int rc; struct net_device *dev; pr_debug("%s\n", __func__); if (!info->attrs[IEEE802154_ATTR_DEV_NAME]) return -EINVAL; name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]); if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0') return -EINVAL; /* name should be null-terminated */ dev = dev_get_by_name(genl_info_net(info), name); if (!dev) return -ENODEV; phy = ieee802154_mlme_ops(dev)->get_phy(dev); BUG_ON(!phy); rc = -EINVAL; /* phy name is optional, but should be checked if it's given */ if (info->attrs[IEEE802154_ATTR_PHY_NAME]) { struct wpan_phy *phy2; const char *pname = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') /* name should be null-terminated */ goto out_dev; phy2 = wpan_phy_find(pname); if (!phy2) goto out_dev; if (phy != phy2) { wpan_phy_put(phy2); goto out_dev; } } rc = -ENOBUFS; msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE); if (!msg) goto out_dev; if (!phy->del_iface) { rc = -EINVAL; goto nla_put_failure; } rtnl_lock(); phy->del_iface(phy, dev); /* We don't have device anymore */ dev_put(dev); dev = NULL; rtnl_unlock(); if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name)) goto nla_put_failure; wpan_phy_put(phy); return ieee802154_nl_reply(msg, info); nla_put_failure: nlmsg_free(msg); out_dev: wpan_phy_put(phy); if (dev) dev_put(dev); return rc; } static struct genl_ops ieee802154_phy_ops[] = { IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy, ieee802154_dump_phy), IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface), IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface), }; /* * No need to unregister as family unregistration will do it. */ int nl802154_phy_register(void) { int i; int rc; for (i = 0; i < ARRAY_SIZE(ieee802154_phy_ops); i++) { rc = genl_register_ops(&nl802154_family, &ieee802154_phy_ops[i]); if (rc) return rc; } return 0; }
/** * parse_options - build local/remote addresses from configuration * @attrs: netlink config data * @ub: UDP bearer instance * @local: local bearer IP address/port * @remote: peer or multicast IP/port */ static int parse_options(struct nlattr *attrs[], struct udp_bearer *ub, struct udp_media_addr *local, struct udp_media_addr *remote) { struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; struct sockaddr_storage sa_local, sa_remote; if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) goto err; if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attrs[TIPC_NLA_BEARER_UDP_OPTS], tipc_nl_udp_policy)) goto err; if (opts[TIPC_NLA_UDP_LOCAL] && opts[TIPC_NLA_UDP_REMOTE]) { nla_memcpy(&sa_local, opts[TIPC_NLA_UDP_LOCAL], sizeof(sa_local)); nla_memcpy(&sa_remote, opts[TIPC_NLA_UDP_REMOTE], sizeof(sa_remote)); } else { err: pr_err("Invalid UDP bearer configuration"); return -EINVAL; } if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET) { struct sockaddr_in *ip4; ip4 = (struct sockaddr_in *)&sa_local; local->proto = htons(ETH_P_IP); local->port = ip4->sin_port; local->ipv4.s_addr = ip4->sin_addr.s_addr; ip4 = (struct sockaddr_in *)&sa_remote; remote->proto = htons(ETH_P_IP); remote->port = ip4->sin_port; remote->ipv4.s_addr = ip4->sin_addr.s_addr; return 0; #if IS_ENABLED(CONFIG_IPV6) } else if ((sa_local.ss_family & sa_remote.ss_family) == AF_INET6) { int atype; struct sockaddr_in6 *ip6; ip6 = (struct sockaddr_in6 *)&sa_local; atype = ipv6_addr_type(&ip6->sin6_addr); if (__ipv6_addr_needs_scope_id(atype) && !ip6->sin6_scope_id) return -EINVAL; local->proto = htons(ETH_P_IPV6); local->port = ip6->sin6_port; memcpy(&local->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); ub->ifindex = ip6->sin6_scope_id; ip6 = (struct sockaddr_in6 *)&sa_remote; remote->proto = htons(ETH_P_IPV6); remote->port = ip6->sin6_port; memcpy(&remote->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); return 0; #endif } return -EADDRNOTAVAIL; }
static int vlan_parse(struct rtnl_link *link, struct nlattr *data, struct nlattr *xstats) { struct nlattr *tb[IFLA_VLAN_MAX+1]; struct vlan_info *vi; int err; NL_DBG(3, "Parsing VLAN link info\n"); if ((err = nla_parse_nested(tb, IFLA_VLAN_MAX, data, vlan_policy)) < 0) goto errout; if ((err = vlan_alloc(link)) < 0) goto errout; vi = link->l_info; if (tb[IFLA_VLAN_ID]) { vi->vi_vlan_id = nla_get_u16(tb[IFLA_VLAN_ID]); vi->vi_mask |= VLAN_HAS_ID; } if (tb[IFLA_VLAN_PROTOCOL]) { vi->vi_protocol = nla_get_u16(tb[IFLA_VLAN_PROTOCOL]); vi->vi_mask |= VLAN_HAS_PROTOCOL; } if (tb[IFLA_VLAN_FLAGS]) { struct ifla_vlan_flags flags; nla_memcpy(&flags, tb[IFLA_VLAN_FLAGS], sizeof(flags)); vi->vi_flags = flags.flags; vi->vi_mask |= VLAN_HAS_FLAGS; } if (tb[IFLA_VLAN_INGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining; vi->vi_ingress_qos_mask = 0; memset(vi->vi_ingress_qos, 0, sizeof(vi->vi_ingress_qos)); nla_for_each_nested(nla, tb[IFLA_VLAN_INGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return -NLE_INVAL; map = nla_data(nla); if (map->from > VLAN_PRIO_MAX) { return -NLE_INVAL; } /* Kernel will not explicitly serialize mappings with "to" zero * (although they are implicitly set). * * Thus we only mark those as "set" which are explicitly sent. * That is similar to what we do with the egress map and it preserves * previous behavior before NL_CAPABILITY_RTNL_LINK_VLAN_INGRESS_MAP_CLEAR. * * It matters only when a received object is send back to kernel to modify * the link. */ vi->vi_ingress_qos_mask |= (1 << map->from); vi->vi_ingress_qos[map->from] = map->to; } vi->vi_mask |= VLAN_HAS_INGRESS_QOS; } if (tb[IFLA_VLAN_EGRESS_QOS]) { struct ifla_vlan_qos_mapping *map; struct nlattr *nla; int remaining, i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { if (nla_len(nla) < sizeof(*map)) return -NLE_INVAL; i++; } /* align to have a little reserve */ vi->vi_egress_size = (i + 32) & ~31; vi->vi_egress_qos = calloc(vi->vi_egress_size, sizeof(*vi->vi_egress_qos)); if (vi->vi_egress_qos == NULL) return -NLE_NOMEM; i = 0; nla_for_each_nested(nla, tb[IFLA_VLAN_EGRESS_QOS], remaining) { map = nla_data(nla); NL_DBG(4, "Assigning egress qos mapping %d\n", i); vi->vi_egress_qos[i].vm_from = map->from; vi->vi_egress_qos[i++].vm_to = map->to; } vi->vi_negress = i; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; }
static int inet6_parse_protinfo(struct rtnl_link *link, struct nlattr *attr, void *data) { struct inet6_data *i6 = data; struct nlattr *tb[IFLA_INET6_MAX+1]; int err; err = nla_parse_nested(tb, IFLA_INET6_MAX, attr, inet6_policy); if (err < 0) return err; if (tb[IFLA_INET6_CONF] && nla_len(tb[IFLA_INET6_CONF]) % 4) return -EINVAL; if (tb[IFLA_INET6_STATS] && nla_len(tb[IFLA_INET6_STATS]) % 8) return -EINVAL; if (tb[IFLA_INET6_ICMP6STATS] && nla_len(tb[IFLA_INET6_ICMP6STATS]) % 8) return -EINVAL; if (tb[IFLA_INET6_FLAGS]) i6->i6_flags = nla_get_u32(tb[IFLA_INET6_FLAGS]); if (tb[IFLA_INET6_CACHEINFO]) nla_memcpy(&i6->i6_cacheinfo, tb[IFLA_INET6_CACHEINFO], sizeof(i6->i6_cacheinfo)); if (tb[IFLA_INET6_CONF]) nla_memcpy(&i6->i6_conf, tb[IFLA_INET6_CONF], sizeof(i6->i6_conf)); if (tb[IFLA_INET6_TOKEN]) nla_memcpy(&i6->i6_token, tb[IFLA_INET6_TOKEN], sizeof(struct in6_addr)); if (tb[IFLA_INET6_ADDR_GEN_MODE]) i6->i6_addr_gen_mode = nla_get_u8 (tb[IFLA_INET6_ADDR_GEN_MODE]); /* * Due to 32bit data alignment, these addresses must be copied to an * aligned location prior to access. */ if (tb[IFLA_INET6_STATS]) { unsigned char *cnt = nla_data(tb[IFLA_INET6_STATS]); uint64_t stat; int i; int len = nla_len(tb[IFLA_INET6_STATS]) / 8; const uint8_t *map_stat_id = map_stat_id_from_IPSTATS_MIB_v2; if (len < 32 || (tb[IFLA_INET6_ICMP6STATS] && nla_len(tb[IFLA_INET6_ICMP6STATS]) < 6)) { /* kernel commit 14a196807482e6fc74f15fc03176d5c08880588f reordered the values. * The later commit 6a5dc9e598fe90160fee7de098fa319665f5253e added values * IPSTATS_MIB_CSUMERRORS/ICMP6_MIB_CSUMERRORS. If the netlink is shorter * then this, assume that the kernel uses the previous meaning of the * enumeration. */ map_stat_id = map_stat_id_from_IPSTATS_MIB_v1; } len = min_t(int, __IPSTATS_MIB_MAX, len); for (i = 1; i < len; i++) { memcpy(&stat, &cnt[i * sizeof(stat)], sizeof(stat)); rtnl_link_set_stat(link, map_stat_id[i], stat); } } if (tb[IFLA_INET6_ICMP6STATS]) { unsigned char *cnt = nla_data(tb[IFLA_INET6_ICMP6STATS]); uint64_t stat; int i; int len = min_t(int, __ICMP6_MIB_MAX, nla_len(tb[IFLA_INET6_ICMP6STATS]) / 8); for (i = 1; i < len; i++) { memcpy(&stat, &cnt[i * sizeof(stat)], sizeof(stat)); rtnl_link_set_stat(link, RTNL_LINK_ICMP6_INMSGS + i - 1, stat); } } return 0; }
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb) { struct dump_phy_data data = { .cb = cb, .skb = skb, .s_idx = cb->args[0], .idx = 0, }; pr_debug("%s\n", __func__); wpan_phy_for_each(ieee802154_dump_phy_iter, &data); cb->args[0] = data.idx; return skb->len; } int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct wpan_phy *phy; const char *name; const char *devname; int rc = -ENOBUFS; struct net_device *dev; int type = __IEEE802154_DEV_INVALID; pr_debug("%s\n", __func__); if (!info->attrs[IEEE802154_ATTR_PHY_NAME]) return -EINVAL; name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') return -EINVAL; /* phy name should be null-terminated */ if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { devname = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]); if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0') return -EINVAL; /* phy name should be null-terminated */ } else { devname = "wpan%d"; } if (strlen(devname) >= IFNAMSIZ) return -ENAMETOOLONG; phy = wpan_phy_find(name); if (!phy) return -ENODEV; msg = ieee802154_nl_new_reply(info, 0, IEEE802154_ADD_IFACE); if (!msg) goto out_dev; if (info->attrs[IEEE802154_ATTR_HW_ADDR] && nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) != IEEE802154_ADDR_LEN) { rc = -EINVAL; goto nla_put_failure; } if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) { type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]); if (type >= __IEEE802154_DEV_MAX) { rc = -EINVAL; goto nla_put_failure; } } dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname, type); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto nla_put_failure; } dev_hold(dev); if (info->attrs[IEEE802154_ATTR_HW_ADDR]) { struct sockaddr addr; addr.sa_family = ARPHRD_IEEE802154; nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR], IEEE802154_ADDR_LEN); /* strangely enough, some callbacks (inetdev_event) from * dev_set_mac_address require RTNL_LOCK */ rtnl_lock(); rc = dev_set_mac_address(dev, &addr); rtnl_unlock(); if (rc) goto dev_unregister; } if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) goto nla_put_failure; dev_put(dev); wpan_phy_put(phy); return ieee802154_nl_reply(msg, info); dev_unregister: rtnl_lock(); /* del_iface must be called with RTNL lock */ rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev); dev_put(dev); rtnl_unlock(); nla_put_failure: nlmsg_free(msg); out_dev: wpan_phy_put(phy); return rc; } int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct wpan_phy *phy; const char *name; int rc; struct net_device *dev; pr_debug("%s\n", __func__); if (!info->attrs[IEEE802154_ATTR_DEV_NAME]) return -EINVAL; name = nla_data(info->attrs[IEEE802154_ATTR_DEV_NAME]); if (name[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1] != '\0') return -EINVAL; /* name should be null-terminated */ dev = dev_get_by_name(genl_info_net(info), name); if (!dev) return -ENODEV; phy = dev->ieee802154_ptr->wpan_phy; BUG_ON(!phy); get_device(&phy->dev); rc = -EINVAL; /* phy name is optional, but should be checked if it's given */ if (info->attrs[IEEE802154_ATTR_PHY_NAME]) { struct wpan_phy *phy2; const char *pname = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]); if (pname[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0') /* name should be null-terminated */ goto out_dev; phy2 = wpan_phy_find(pname); if (!phy2) goto out_dev; if (phy != phy2) { wpan_phy_put(phy2); goto out_dev; } } rc = -ENOBUFS; msg = ieee802154_nl_new_reply(info, 0, IEEE802154_DEL_IFACE); if (!msg) goto out_dev; rtnl_lock(); rdev_del_virtual_intf_deprecated(wpan_phy_to_rdev(phy), dev); /* We don't have device anymore */ dev_put(dev); dev = NULL; rtnl_unlock(); if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name)) goto nla_put_failure; wpan_phy_put(phy); return ieee802154_nl_reply(msg, info); nla_put_failure: nlmsg_free(msg); out_dev: wpan_phy_put(phy); if (dev) dev_put(dev); return rc; }
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct ifinfomsg *ifm; struct net_device *dev; int err, send_addr_notify = 0, modified = 0; struct nlattr *tb[IFLA_MAX+1]; char ifname[IFNAMSIZ]; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); if (err < 0) goto errout; if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); else ifname[0] = '\0'; err = -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = dev_get_by_index(ifm->ifi_index); else if (tb[IFLA_IFNAME]) dev = dev_get_by_name(ifname); else goto errout; if (dev == NULL) { err = -ENODEV; goto errout; } if (tb[IFLA_ADDRESS] && nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) goto errout_dev; if (tb[IFLA_BROADCAST] && nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) goto errout_dev; if (tb[IFLA_MAP]) { struct rtnl_link_ifmap *u_map; struct ifmap k_map; if (!dev->set_config) { err = -EOPNOTSUPP; goto errout_dev; } if (!netif_device_present(dev)) { err = -ENODEV; goto errout_dev; } u_map = nla_data(tb[IFLA_MAP]); k_map.mem_start = (unsigned long) u_map->mem_start; k_map.mem_end = (unsigned long) u_map->mem_end; k_map.base_addr = (unsigned short) u_map->base_addr; k_map.irq = (unsigned char) u_map->irq; k_map.dma = (unsigned char) u_map->dma; k_map.port = (unsigned char) u_map->port; err = dev->set_config(dev, &k_map); if (err < 0) goto errout_dev; modified = 1; } if (tb[IFLA_ADDRESS]) { struct sockaddr *sa; int len; if (!dev->set_mac_address) { err = -EOPNOTSUPP; goto errout_dev; } if (!netif_device_present(dev)) { err = -ENODEV; goto errout_dev; } len = sizeof(sa_family_t) + dev->addr_len; sa = kmalloc(len, GFP_KERNEL); if (!sa) { err = -ENOMEM; goto errout_dev; } sa->sa_family = dev->type; memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), dev->addr_len); err = dev->set_mac_address(dev, sa); kfree(sa); if (err) goto errout_dev; send_addr_notify = 1; modified = 1; } if (tb[IFLA_MTU]) { err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); if (err < 0) goto errout_dev; modified = 1; } /* * Interface selected by interface index but interface * name provided implies that a name change has been * requested. */ if (ifm->ifi_index > 0 && ifname[0]) { err = dev_change_name(dev, ifname); if (err < 0) goto errout_dev; modified = 1; } if (tb[IFLA_BROADCAST]) { nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); send_addr_notify = 1; } if (ifm->ifi_flags || ifm->ifi_change) { unsigned int flags = ifm->ifi_flags; /* bugwards compatibility: ifi_change == 0 is treated as ~0 */ if (ifm->ifi_change) flags = (flags & ifm->ifi_change) | (dev->flags & ~ifm->ifi_change); dev_change_flags(dev, flags); } if (tb[IFLA_TXQLEN]) dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); if (tb[IFLA_WEIGHT]) dev->weight = nla_get_u32(tb[IFLA_WEIGHT]); if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); if (tb[IFLA_LINKMODE]) { write_lock_bh(&dev_base_lock); dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); write_unlock_bh(&dev_base_lock); } err = 0; errout_dev: if (err < 0 && modified && net_ratelimit()) printk(KERN_WARNING "A link change request failed with " "some changes comitted already. Interface %s may " "have been left with an inconsistent configuration, " "please check.\n", dev->name); if (send_addr_notify) call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); dev_put(dev); errout: return err; }
static int netem_msg_parser(struct rtnl_tc *tc, void *data) { struct rtnl_netem *netem = data; struct tc_netem_qopt *opts; int len, err = 0; if (tc->tc_opts->d_size < sizeof(*opts)) return -NLE_INVAL; opts = (struct tc_netem_qopt *) tc->tc_opts->d_data; netem->qnm_latency = opts->latency; netem->qnm_limit = opts->limit; netem->qnm_loss = opts->loss; netem->qnm_gap = opts->gap; netem->qnm_duplicate = opts->duplicate; netem->qnm_jitter = opts->jitter; netem->qnm_mask = (SCH_NETEM_ATTR_LATENCY | SCH_NETEM_ATTR_LIMIT | SCH_NETEM_ATTR_LOSS | SCH_NETEM_ATTR_GAP | SCH_NETEM_ATTR_DUPLICATE | SCH_NETEM_ATTR_JITTER); len = tc->tc_opts->d_size - sizeof(*opts); if (len > 0) { struct nlattr *tb[TCA_NETEM_MAX+1]; err = nla_parse(tb, TCA_NETEM_MAX, (struct nlattr *) (tc->tc_opts->d_data + sizeof(*opts)), len, netem_policy); if (err < 0) { free(netem); return err; } if (tb[TCA_NETEM_CORR]) { struct tc_netem_corr cor; nla_memcpy(&cor, tb[TCA_NETEM_CORR], sizeof(cor)); netem->qnm_corr.nmc_delay = cor.delay_corr; netem->qnm_corr.nmc_loss = cor.loss_corr; netem->qnm_corr.nmc_duplicate = cor.dup_corr; netem->qnm_mask |= (SCH_NETEM_ATTR_DELAY_CORR | SCH_NETEM_ATTR_LOSS_CORR | SCH_NETEM_ATTR_DUP_CORR); } if (tb[TCA_NETEM_REORDER]) { struct tc_netem_reorder ro; nla_memcpy(&ro, tb[TCA_NETEM_REORDER], sizeof(ro)); netem->qnm_ro.nmro_probability = ro.probability; netem->qnm_ro.nmro_correlation = ro.correlation; netem->qnm_mask |= (SCH_NETEM_ATTR_RO_PROB | SCH_NETEM_ATTR_RO_CORR); } if (tb[TCA_NETEM_CORRUPT]) { struct tc_netem_corrupt corrupt; nla_memcpy(&corrupt, tb[TCA_NETEM_CORRUPT], sizeof(corrupt)); netem->qnm_crpt.nmcr_probability = corrupt.probability; netem->qnm_crpt.nmcr_correlation = corrupt.correlation; netem->qnm_mask |= (SCH_NETEM_ATTR_CORRUPT_PROB | SCH_NETEM_ATTR_CORRUPT_CORR); } /* sch_netem does not currently dump TCA_NETEM_DELAY_DIST */ netem->qnm_dist.dist_data = NULL; netem->qnm_dist.dist_size = 0; } return 0; }
static int ipvs_services_parse_cb(struct nl_msg *msg, void *arg) { struct nlmsghdr *nlh = nlmsg_hdr(msg); struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1]; struct nlattr *svc_attrs[IPVS_SVC_ATTR_MAX + 1]; struct ip_vs_get_services **getp = (struct ip_vs_get_services **)arg; struct ip_vs_get_services *get = (struct ip_vs_get_services *)*getp; struct ip_vs_flags flags; int i = get->num_services; if (genlmsg_parse(nlh, 0, attrs, IPVS_CMD_ATTR_MAX, ipvs_cmd_policy) != 0) return -1; if (!attrs[IPVS_CMD_ATTR_SERVICE]) return -1; if (nla_parse_nested(svc_attrs, IPVS_SVC_ATTR_MAX, attrs[IPVS_CMD_ATTR_SERVICE], ipvs_service_policy)) return -1; memset(&(get->entrytable[i]), 0, sizeof(get->entrytable[i])); if (!(svc_attrs[IPVS_SVC_ATTR_AF] && (svc_attrs[IPVS_SVC_ATTR_FWMARK] || (svc_attrs[IPVS_SVC_ATTR_PROTOCOL] && svc_attrs[IPVS_SVC_ATTR_ADDR] && svc_attrs[IPVS_SVC_ATTR_PORT])) && svc_attrs[IPVS_SVC_ATTR_SCHED_NAME] && svc_attrs[IPVS_SVC_ATTR_NETMASK] && svc_attrs[IPVS_SVC_ATTR_TIMEOUT] && svc_attrs[IPVS_SVC_ATTR_FLAGS])) return -1; get->entrytable[i].af = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_AF]); if (svc_attrs[IPVS_SVC_ATTR_FWMARK]) get->entrytable[i].fwmark = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_FWMARK]); else { get->entrytable[i].protocol = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_PROTOCOL]); memcpy(&(get->entrytable[i].addr), nla_data(svc_attrs[IPVS_SVC_ATTR_ADDR]), sizeof(get->entrytable[i].addr)); get->entrytable[i].port = nla_get_u16(svc_attrs[IPVS_SVC_ATTR_PORT]); } strncpy(get->entrytable[i].sched_name, nla_get_string(svc_attrs[IPVS_SVC_ATTR_SCHED_NAME]), IP_VS_SCHEDNAME_MAXLEN); if (svc_attrs[IPVS_SVC_ATTR_PE_NAME]) strncpy(get->entrytable[i].pe_name, nla_get_string(svc_attrs[IPVS_SVC_ATTR_PE_NAME]), IP_VS_PENAME_MAXLEN); get->entrytable[i].netmask = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_NETMASK]); get->entrytable[i].timeout = nla_get_u32(svc_attrs[IPVS_SVC_ATTR_TIMEOUT]); nla_memcpy(&flags, svc_attrs[IPVS_SVC_ATTR_FLAGS], sizeof(flags)); get->entrytable[i].flags = flags.flags & flags.mask; if (ipvs_parse_stats(&(get->entrytable[i].stats), svc_attrs[IPVS_SVC_ATTR_STATS]) != 0) return -1; get->entrytable[i].num_dests = 0; i++; get->num_services = i; get = realloc(get, sizeof(*get) + sizeof(ipvs_service_entry_t) * (get->num_services + 1)); *getp = get; return 0; }
int idiagnl_msg_parse(struct nlmsghdr *nlh, struct idiagnl_msg **result) { struct idiagnl_msg *msg = NULL; struct inet_diag_msg *raw_msg = NULL; struct nl_addr *src = NULL, *dst = NULL; struct nlattr *tb[IDIAG_ATTR_MAX]; int err = 0; msg = idiagnl_msg_alloc(); if (!msg) goto errout_nomem; err = nlmsg_parse(nlh, sizeof(struct inet_diag_msg), tb, IDIAG_ATTR_MAX, ext_policy); if (err < 0) goto errout; raw_msg = nlmsg_data(nlh); msg->idiag_family = raw_msg->idiag_family; msg->idiag_state = raw_msg->idiag_state; msg->idiag_timer = raw_msg->idiag_timer; msg->idiag_retrans = raw_msg->idiag_retrans; msg->idiag_expires = raw_msg->idiag_expires; msg->idiag_rqueue = raw_msg->idiag_rqueue; msg->idiag_wqueue = raw_msg->idiag_wqueue; msg->idiag_uid = raw_msg->idiag_uid; msg->idiag_inode = raw_msg->idiag_inode; msg->idiag_sport = raw_msg->id.idiag_sport; msg->idiag_dport = raw_msg->id.idiag_dport; msg->idiag_ifindex = raw_msg->id.idiag_if; dst = nl_addr_build(raw_msg->idiag_family, raw_msg->id.idiag_dst, sizeof(raw_msg->id.idiag_dst)); if (!dst) goto errout_nomem; err = idiagnl_msg_set_dst(msg, dst); if (err < 0) goto errout; nl_addr_put(dst); src = nl_addr_build(raw_msg->idiag_family, raw_msg->id.idiag_src, sizeof(raw_msg->id.idiag_src)); if (!src) goto errout_nomem; err = idiagnl_msg_set_src(msg, src); if (err < 0) goto errout; nl_addr_put(src); if (tb[IDIAG_ATTR_TOS]) msg->idiag_tos = nla_get_u8(tb[IDIAG_ATTR_TOS]); if (tb[IDIAG_ATTR_TCLASS]) msg->idiag_tclass = nla_get_u8(tb[IDIAG_ATTR_TCLASS]); if (tb[IDIAG_ATTR_SHUTDOWN]) msg->idiag_shutdown = nla_get_u8(tb[IDIAG_ATTR_SHUTDOWN]); if (tb[IDIAG_ATTR_CONG]) msg->idiag_cong = nla_strdup(tb[IDIAG_ATTR_CONG]); if (tb[IDIAG_ATTR_INFO]) nla_memcpy(&msg->idiag_tcpinfo, tb[IDIAG_ATTR_INFO], sizeof(msg->idiag_tcpinfo)); if (tb[IDIAG_ATTR_MEMINFO]) { struct idiagnl_meminfo *minfo = idiagnl_meminfo_alloc(); struct inet_diag_meminfo *raw_minfo = NULL; if (!minfo) goto errout_nomem; raw_minfo = (struct inet_diag_meminfo *) nla_data(tb[IDIAG_ATTR_MEMINFO]); idiagnl_meminfo_set_rmem(minfo, raw_minfo->idiag_rmem); idiagnl_meminfo_set_wmem(minfo, raw_minfo->idiag_wmem); idiagnl_meminfo_set_fmem(minfo, raw_minfo->idiag_fmem); idiagnl_meminfo_set_tmem(minfo, raw_minfo->idiag_tmem); msg->idiag_meminfo = minfo; } if (tb[IDIAG_ATTR_VEGASINFO]) { struct idiagnl_vegasinfo *vinfo = idiagnl_vegasinfo_alloc(); struct tcpvegas_info *raw_vinfo = NULL; if (!vinfo) goto errout_nomem; raw_vinfo = (struct tcpvegas_info *) nla_data(tb[IDIAG_ATTR_VEGASINFO]); idiagnl_vegasinfo_set_enabled(vinfo, raw_vinfo->tcpv_enabled); idiagnl_vegasinfo_set_rttcnt(vinfo, raw_vinfo->tcpv_rttcnt); idiagnl_vegasinfo_set_rtt(vinfo, raw_vinfo->tcpv_rtt); idiagnl_vegasinfo_set_minrtt(vinfo, raw_vinfo->tcpv_minrtt); msg->idiag_vegasinfo = vinfo; } if (tb[IDIAG_ATTR_SKMEMINFO]) nla_memcpy(&msg->idiag_skmeminfo, tb[IDIAG_ATTR_SKMEMINFO], sizeof(msg->idiag_skmeminfo)); *result = msg; return 0; errout: idiagnl_msg_put(msg); return err; errout_nomem: err = -NLE_NOMEM; goto errout; }