static int validate_nla(struct nlattr *nla, int maxtype, struct nla_policy *policy) { struct nla_policy *pt; unsigned int minlen = 0; int type = nla_type(nla); if (type < 0 || type > maxtype) return 0; pt = &policy[type]; if (pt->type > NLA_TYPE_MAX) BUG(); if (pt->minlen) minlen = pt->minlen; else if (pt->type != NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (nla_len(nla) < minlen) return -NLE_RANGE; if (pt->maxlen && nla_len(nla) > pt->maxlen) return -NLE_RANGE; if (pt->type == NLA_STRING) { char *data = nla_data(nla); if (data[nla_len(nla) - 1] != '\0') return -NLE_INVAL; } return 0; }
/** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type > maxtype) continue; if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } if (tb[type]) NL_DBG(1, "Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); tb[type] = nla; }
static int output_userspace(struct datapath *dp, struct sk_buff *skb, const struct nlattr *attr) { struct dp_upcall_info upcall; const struct nlattr *a; int rem; upcall.cmd = OVS_PACKET_CMD_ACTION; upcall.key = &OVS_CB(skb)->flow->key; upcall.userdata = NULL; upcall.pid = 0; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { switch (nla_type(a)) { case OVS_USERSPACE_ATTR_USERDATA: upcall.userdata = a; break; case OVS_USERSPACE_ATTR_PID: upcall.pid = nla_get_u32(a); break; } } return ovs_dp_upcall(dp, skb, &upcall); }
static void output_actions(struct nlattr *parent) { struct nlattr *attr; int rem; nla_for_each_nested(attr, parent, rem) { switch (nla_type(attr)) { case OVS_ACTION_ATTR_OUTPUT: printf("output %d", nla_get_u32(attr)); break; case OVS_ACTION_ATTR_USERSPACE: printf("pktin"); break; case OVS_ACTION_ATTR_POP_VLAN: printf("pop-vlan"); break; case OVS_ACTION_ATTR_PUSH_VLAN: { struct ovs_action_push_vlan *x = nla_data(attr); printf("push-vlan { vid=%u pcp=%d }", ntohs(x->vlan_tci) & 0xfff, ntohs(x->vlan_tci) >> 13); break; } case OVS_ACTION_ATTR_SET: printf("set { "); output_key(attr); printf("}"); break; default: printf("?"); break; } printf(" "); } }
static int execute_set_action(struct sk_buff *skb, const struct nlattr *nested_attr) { int err = 0; switch (nla_type(nested_attr)) { case OVS_KEY_ATTR_PRIORITY: skb->priority = nla_get_u32(nested_attr); break; case OVS_KEY_ATTR_ETHERNET: err = set_eth_addr(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV4: err = set_ipv4(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_TCP: err = set_tcp(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_UDP: err = set_udp(skb, nla_data(nested_attr)); break; } return err; }
static int gred_vqs_validate(struct gred_sched *table, u32 cdp, struct nlattr *vqs, struct netlink_ext_ack *extack) { const struct nlattr *attr; int rem, err; err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, gred_vqe_policy, extack); if (err < 0) return err; nla_for_each_nested(attr, vqs, rem) { switch (nla_type(attr)) { case TCA_GRED_VQ_ENTRY: err = gred_vq_validate(table, cdp, attr, extack); if (err) return err; break; default: NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes"); return -EINVAL; } } if (rem > 0) { NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list"); return -EINVAL; } return 0; }
static int sample(struct datapath *dp, struct sk_buff *skb, const struct nlattr *attr, struct ovs_key_ipv4_tunnel *tun_key) { const struct nlattr *acts_list = NULL; const struct nlattr *a; int rem; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { switch (nla_type(a)) { case OVS_SAMPLE_ATTR_PROBABILITY: if (net_random() >= nla_get_u32(a)) return 0; break; case OVS_SAMPLE_ATTR_ACTIONS: acts_list = a; break; } } return do_execute_actions(dp, skb, nla_data(acts_list), nla_len(acts_list), tun_key, true); }
static int fib6_commit_metrics(struct dst_entry *dst, struct nlattr *mx, int mx_len) { struct nlattr *nla; int remaining; u32 *mp; if (dst->flags & DST_HOST) { mp = dst_metrics_write_ptr(dst); } else { mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); if (!mp) return -ENOMEM; dst_init_metrics(dst, mp, 0); } nla_for_each_attr(nla, mx, mx_len, remaining) { int type = nla_type(nla); if (type) { if (type > RTAX_MAX) return -EINVAL; mp[type - 1] = nla_get_u32(nla); } }
/** * Create attribute index based on a stream of attributes. * @arg tb Index array to be filled (maxtype+1 elements). * @arg maxtype Maximum attribute type expected and accepted. * @arg head Head of attribute stream. * @arg len Length of attribute stream. * @arg policy Attribute validation policy. * * Iterates over the stream of attributes and stores a pointer to each * attribute in the index array using the attribute type as index to * the array. Attribute with a type greater than the maximum type * specified will be silently ignored in order to maintain backwards * compatibility. If \a policy is not NULL, the attribute will be * validated using the specified policy. * * @see nla_validate * @return 0 on success or a negative error code. */ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, struct nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { int type = nla_type(nla); if (type == 0) { NL_DBG(1, "Illegal nla->nla_type == 0\n"); continue; } if (type <= maxtype) { if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } tb[type] = nla; } }
static int bridge_parse_af_full(struct rtnl_link *link, struct nlattr *attr_full, void *data) { struct bridge_data *bd = data; struct bridge_vlan_info *vinfo = NULL; uint16_t vid_range_start = 0; uint16_t vid_range_flags = -1; struct nlattr *attr; int remaining; nla_for_each_nested(attr, attr_full, remaining) { if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) continue; if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo = nla_data(attr); if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) return -EINVAL; if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { vid_range_start = vinfo->vid; vid_range_flags = (vinfo->flags ^ BRIDGE_VLAN_INFO_RANGE_BEGIN); continue; } if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { /* sanity check the range flags */ if (vid_range_flags != (vinfo->flags ^ BRIDGE_VLAN_INFO_RANGE_END)) { NL_DBG(1, "VLAN range flags differ; can not handle it.\n"); return -EINVAL; } } else { vid_range_start = vinfo->vid; } for (; vid_range_start <= vinfo->vid; vid_range_start++) { if (vinfo->flags & BRIDGE_VLAN_INFO_PVID) bd->vlan_info.pvid = vinfo->vid; if (vinfo->flags & BRIDGE_VLAN_INFO_UNTAGGED) set_bit(vid_range_start, bd->vlan_info.untagged_bitmap); set_bit(vid_range_start, bd->vlan_info.vlan_bitmap); bd->ce_mask |= BRIDGE_ATTR_PORT_VLAN; } vid_range_flags = -1; } return 0; }
static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) { const struct nlattr *attr; int rem; nla_for_each_nested(attr, vqs, rem) { switch (nla_type(attr)) { case TCA_GRED_VQ_ENTRY: gred_vq_apply(table, attr); break; } } }
static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, u32 *metrics, struct netlink_ext_ack *extack) { bool ecn_ca = false; struct nlattr *nla; int remaining; if (!fc_mx) return 0; nla_for_each_attr(nla, fc_mx, fc_mx_len, remaining) { int type = nla_type(nla); u32 val; if (!type) continue; if (type > RTAX_MAX) { NL_SET_ERR_MSG(extack, "Invalid metric type"); return -EINVAL; } if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; nla_strlcpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca); if (val == TCP_CA_UNSPEC) { NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm"); return -EINVAL; } } else { if (nla_len(nla) != sizeof(u32)) { NL_SET_ERR_MSG_ATTR(extack, nla, "Invalid attribute in metrics"); return -EINVAL; } val = nla_get_u32(nla); } if (type == RTAX_ADVMSS && val > 65535 - 40) val = 65535 - 40; if (type == RTAX_MTU && val > 65535 - 15) val = 65535 - 15; if (type == RTAX_HOPLIMIT && val > 255) val = 255; if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) { NL_SET_ERR_MSG(extack, "Unknown flag set in feature mask in metrics attribute"); return -EINVAL; } metrics[type - 1] = val; }
static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); const struct nlattr *la; int rem; nla_for_each_nested(la, attr, rem) { u16 type = nla_type(la); switch(type) { case NETEM_LOSS_GI: { const struct tc_netem_gimodel *gi = nla_data(la); if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { pr_info("netem: incorrect gi model size\n"); return -EINVAL; } q->loss_model = CLG_4_STATES; q->clg.state = 1; q->clg.a1 = gi->p13; q->clg.a2 = gi->p31; q->clg.a3 = gi->p32; q->clg.a4 = gi->p14; q->clg.a5 = gi->p23; break; } case NETEM_LOSS_GE: { const struct tc_netem_gemodel *ge = nla_data(la); if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { pr_info("netem: incorrect ge model size\n"); return -EINVAL; } q->loss_model = CLG_GILB_ELL; q->clg.state = 1; q->clg.a1 = ge->p; q->clg.a2 = ge->r; q->clg.a3 = ge->h; q->clg.a4 = ge->k1; break; } default: pr_info("netem: unknown loss type %u\n", type); return -EINVAL; } }
static int execute_set_action(struct sk_buff *skb, const struct nlattr *nested_attr, struct ovs_key_ipv4_tunnel *tun_key) { int err = 0; switch (nla_type(nested_attr)) { case OVS_KEY_ATTR_PRIORITY: skb->priority = nla_get_u32(nested_attr); break; case OVS_KEY_ATTR_TUN_ID: /* If we're only using the TUN_ID action, store the value in a * temporary instance of struct ovs_key_ipv4_tunnel on the stack. * If both IPV4_TUNNEL and TUN_ID are being used together we * can't write into the IPV4_TUNNEL action, so make a copy and * write into that version. */ if (!OVS_CB(skb)->tun_key) memset(tun_key, 0, sizeof(*tun_key)); else if (OVS_CB(skb)->tun_key != tun_key) memcpy(tun_key, OVS_CB(skb)->tun_key, sizeof(*tun_key)); OVS_CB(skb)->tun_key = tun_key; OVS_CB(skb)->tun_key->tun_id = nla_get_be64(nested_attr); break; case OVS_KEY_ATTR_IPV4_TUNNEL: OVS_CB(skb)->tun_key = nla_data(nested_attr); break; case OVS_KEY_ATTR_ETHERNET: err = set_eth_addr(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV4: err = set_ipv4(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_TCP: err = set_tcp(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_UDP: err = set_udp(skb, nla_data(nested_attr)); break; } return err; }
static int execute_set_action(struct sk_buff *skb, const struct nlattr *nested_attr, struct ovs_key_ipv4_tunnel *tun_key) { int err = 0; switch (nla_type(nested_attr)) { case OVS_KEY_ATTR_PRIORITY: skb->priority = nla_get_u32(nested_attr); break; case OVS_KEY_ATTR_TUN_ID: if (!OVS_CB(skb)->tun_key) { /* If tun_key is NULL for this skb, assign it to * a value the caller passed in for action processing * and output. This can disappear once we drop support * for setting tun_id outside of tun_key. */ memset(tun_key, 0, sizeof(struct ovs_key_ipv4_tunnel)); OVS_CB(skb)->tun_key = tun_key; } OVS_CB(skb)->tun_key->tun_id = nla_get_be64(nested_attr); OVS_CB(skb)->tun_key->tun_flags |= OVS_FLOW_TNL_F_KEY; break; case OVS_KEY_ATTR_IPV4_TUNNEL: OVS_CB(skb)->tun_key = nla_data(nested_attr); break; case OVS_KEY_ATTR_ETHERNET: err = set_eth_addr(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV4: err = set_ipv4(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_TCP: err = set_tcp(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_UDP: err = set_udp(skb, nla_data(nested_attr)); break; } return err; }
static int validate_sample(const struct nlattr *attr, const struct sw_flow_key *key, int depth) { const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; const struct nlattr *probability, *actions; const struct nlattr *a; int rem; memset(attrs, 0, sizeof(attrs)); nla_for_each_nested(a, attr, rem) { int type = nla_type(a); if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) return -EINVAL; attrs[type] = a; }
static int wl_cfgvendor_gscan_get_channel_list(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0, type, band; struct wl_priv *cfg = wiphy_priv(wiphy); uint16 *reply = NULL; uint32 reply_len = 0, num_channels, mem_needed; struct sk_buff *skb; type = nla_type(data); if (type == GSCAN_ATTRIBUTE_BAND) { band = nla_get_u32(data); } else { return -1; } reply = dhd_dev_pno_get_gscan(wl_to_prmry_ndev(cfg), DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len); if (!reply) { WL_ERR(("Could not get channel list\n")); err = -EINVAL; return err; } num_channels = reply_len/ sizeof(uint32); mem_needed = reply_len + VENDOR_REPLY_OVERHEAD + (ATTRIBUTE_U32_LEN * 2); /* Alloc the SKB for vendor_event */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); if (unlikely(!skb)) { WL_ERR(("skb alloc failed")); err = -ENOMEM; goto exit; } nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels); nla_put(skb, GSCAN_ATTRIBUTE_CHANNEL_LIST, reply_len, reply); err = cfg80211_vendor_cmd_reply(skb); if (unlikely(err)) WL_ERR(("Vendor Command reply failed ret:%d \n", err)); exit: kfree(reply); return err; }
/* * Parse the IFLA_VFINFO_LIST block of the netlink message. * Return zero on success and errno else. */ static int vdpnl_vfinfolist(struct nlattr *vfinfolist, struct vdpnl_vsi *vsi) { struct nlattr *le1, *vf[IFLA_VF_MAX + 1]; int rem; if (!vfinfolist) { LLDPAD_ERR("%s:IFLA_VFINFO_LIST missing\n", __func__); return -EINVAL; } nla_for_each_nested(le1, vfinfolist, rem) { bool have_mac = false, have_vid = false; if (nla_type(le1) != IFLA_VF_INFO) { LLDPAD_ERR("%s:parsing of IFLA_VFINFO_LIST failed\n", __func__); return -EINVAL; } if (nla_parse_nested(vf, IFLA_VF_MAX, le1, ifla_vf_policy)) { LLDPAD_ERR("%s:parsing of IFLA_VF_INFO failed\n", __func__); return -EINVAL; } if (vf[IFLA_VF_MAC]) { struct ifla_vf_mac *mac = RTA_DATA(vf[IFLA_VF_MAC]); memcpy(vsi->maclist->mac, mac->mac, ETH_ALEN); have_mac = true; } if (vf[IFLA_VF_VLAN]) { struct ifla_vf_vlan *vlan = RTA_DATA(vf[IFLA_VF_VLAN]); vsi->maclist->vlan = vlan->vlan; vsi->maclist->qos = vlan->qos; have_vid = true; } LLDPAD_DBG("%s:have_vid:%d have_mac:%d\n", __func__, have_vid, have_mac); if (have_vid && have_mac) vsi->filter_fmt = VDP22_FFMT_MACVID; else if (have_vid) vsi->filter_fmt = VDP22_FFMT_VID; else return -EINVAL; }
static int wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct bcm_cfg80211 *cfg = wiphy_priv(wiphy); int type; u32 nodfs; type = nla_type(data); if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) { nodfs = nla_get_u32(data); err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs); } else { err = -1; } return err; }
static int wl_cfgvendor_initiate_gscan(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct wl_priv *cfg = wiphy_priv(wiphy); int type, tmp = len; int run = 0xFF; int flush = 0; const struct nlattr *iter; nla_for_each_attr(iter, data, len, tmp) { type = nla_type(iter); if (type == GSCAN_ATTRIBUTE_ENABLE_FEATURE) run = nla_get_u32(iter); else if (type == GSCAN_ATTRIBUTE_FLUSH_FEATURE) flush = nla_get_u32(iter); }
static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info, int opts_len, struct netlink_ext_ack *extack) { info->options_len = opts_len; switch (nla_type(nla_data(nla))) { case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: #if IS_ENABLED(CONFIG_INET) info->key.tun_flags |= TUNNEL_GENEVE_OPT; return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info), opts_len, extack); #else return -EAFNOSUPPORT; #endif default: NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type"); return -EINVAL; } }
static int wl_cfgvendor_set_country(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = BCME_ERROR, rem, type; char country_code[WLC_CNTRY_BUF_SZ] = {0}; const struct nlattr *iter; nla_for_each_attr(iter, data, len, rem) { type = nla_type(iter); switch (type) { case ANDR_WIFI_ATTRIBUTE_COUNTRY: memcpy(country_code, nla_data(iter), MIN(nla_len(iter), WLC_CNTRY_BUF_SZ)); break; default: WL_ERR(("Unknown type: %d\n", type)); return err; } }
int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, int fc_mx_len, u32 *metrics) { bool ecn_ca = false; struct nlattr *nla; int remaining; if (!fc_mx) return 0; nla_for_each_attr(nla, fc_mx, fc_mx_len, remaining) { int type = nla_type(nla); u32 val; if (!type) continue; if (type > RTAX_MAX) return -EINVAL; if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; nla_strlcpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca); if (val == TCP_CA_UNSPEC) return -EINVAL; } else { if (nla_len(nla) != sizeof(u32)) return -EINVAL; val = nla_get_u32(nla); } if (type == RTAX_ADVMSS && val > 65535 - 40) val = 65535 - 40; if (type == RTAX_MTU && val > 65535 - 15) val = 65535 - 15; if (type == RTAX_HOPLIMIT && val > 255) val = 255; if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) return -EINVAL; metrics[type - 1] = val; }
static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst, int dst_len, struct netlink_ext_ack *extack) { int err, rem, opt_len, len = nla_len(nla), opts_len = 0; const struct nlattr *attr, *head = nla_data(nla); err = nla_validate(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX, enc_opts_policy, extack); if (err) return err; nla_for_each_attr(attr, head, len, rem) { switch (nla_type(attr)) { case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE: opt_len = tunnel_key_copy_geneve_opt(attr, dst, dst_len, extack); if (opt_len < 0) return opt_len; opts_len += opt_len; if (dst) { dst_len -= opt_len; dst += opt_len; } break; } } if (!opts_len) { NL_SET_ERR_MSG(extack, "Empty list of tunnel options"); return -EINVAL; } if (rem > 0) { NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes"); return -EINVAL; } return opts_len; }
static int drbd_nla_check_mandatory(int maxtype, struct nlattr *nla) { struct nlattr *head = nla_data(nla); int len = nla_len(nla); int rem; /* * validate_nla (called from nla_parse_nested) ignores attributes * beyond maxtype, and does not understand the DRBD_GENLA_F_MANDATORY flag. * In order to have it validate attributes with the DRBD_GENLA_F_MANDATORY * flag set also, check and remove that flag before calling * nla_parse_nested. */ nla_for_each_attr(nla, head, len, rem) { if (nla->nla_type & DRBD_GENLA_F_MANDATORY) { nla->nla_type &= ~DRBD_GENLA_F_MANDATORY; if (nla_type(nla) > maxtype) return -EOPNOTSUPP; } } return 0; }
static int __parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], u64 *attrsp, bool nz) { const struct nlattr *nla; u64 attrs; int rem; attrs = *attrsp; nla_for_each_nested(nla, attr, rem) { u16 type = nla_type(nla); int expected_len; if (type > OVS_KEY_ATTR_MAX) { OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", type, OVS_KEY_ATTR_MAX); return -EINVAL; } if (attrs & (1ULL << type)) { OVS_NLERR("Duplicate key attribute (type %d).\n", type); return -EINVAL; } expected_len = ovs_key_lens[type]; if (nla_len(nla) != expected_len && expected_len != -1) { OVS_NLERR("Key attribute has unexpected length (type=%d" ", length=%d, expected=%d).\n", type, nla_len(nla), expected_len); return -EINVAL; } if (!nz || !is_all_zero(nla_data(nla), expected_len)) { attrs |= 1ULL << type; a[type] = nla; } }
static int wl_cfgvendor_set_pno_mac_oui(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct wl_priv *cfg = wiphy_priv(wiphy); int type; uint8 pno_random_mac_oui[DOT11_OUI_LEN]; type = nla_type(data); if (type == ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI) { memcpy(pno_random_mac_oui, nla_data(data), DOT11_OUI_LEN); err = dhd_dev_pno_set_mac_oui(wl_to_prmry_ndev(cfg), pno_random_mac_oui); if (unlikely(err)) WL_ERR(("Bad OUI, could not set:%d \n", err)); } else { err = -1; } return err; }
/* 用户空间发送数据过来,调用此接口进行处理 */ static int genlnet_msg_handle(struct sk_buff *skb, struct genl_info *info) { char str[MAX_STR_LEN]; void *data; uint32_t data_len; struct nlattr *nla; nla = info->attrs[MSG_CMD]; if (nla == NULL || nla_type(nla) != MSG_CMD) { printk("%s %d\n", __func__, __LINE__); return -1; } g_pid = info->snd_pid; data = nla_data(nla); data_len = nla_len(nla); memcpy(str, data, data_len); printk("%s\n", str); strcpy(str, "From kernel: hello user."); genlnet_msg_send(g_pid, str, strlen(str) + 1); return 0; }
static void wiphy_info_supported_iftypes(struct wiphy_info_data *info, struct nlattr *tb) { struct nlattr *nl_mode; int i; if (tb == NULL) return; nla_for_each_nested(nl_mode, tb, i) { switch (nla_type(nl_mode)) { case NL80211_IFTYPE_AP: info->capa->flags |= WPA_DRIVER_FLAGS_AP; break; case NL80211_IFTYPE_MESH_POINT: info->capa->flags |= WPA_DRIVER_FLAGS_MESH; break; case NL80211_IFTYPE_ADHOC: info->capa->flags |= WPA_DRIVER_FLAGS_IBSS; break; case NL80211_IFTYPE_P2P_DEVICE: info->capa->flags |= WPA_DRIVER_FLAGS_DEDICATED_P2P_DEVICE; break; case NL80211_IFTYPE_P2P_GO: info->p2p_go_supported = 1; break; case NL80211_IFTYPE_P2P_CLIENT: info->p2p_client_supported = 1; break; case NL80211_IFTYPE_MONITOR: info->monitor_supported = 1; break; } } }
static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy) { const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); if (type <= 0 || type > maxtype) return 0; pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); switch (pt->type) { case NLA_FLAG: if (attrlen > 0) return -ERANGE; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) return -EINVAL; /* fall through */ case NLA_STRING: if (attrlen < 1) return -ERANGE; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) return -ERANGE; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) return -ERANGE; break; case NLA_NESTED_COMPAT: if (attrlen < pt->len) return -ERANGE; if (attrlen < NLA_ALIGN(pt->len)) break; if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN) return -ERANGE; nla = nla_data(nla) + NLA_ALIGN(pt->len); if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla)) return -ERANGE; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; default: if (pt->len) minlen = pt->len; else if (pt->type != NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) return -ERANGE; }