static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct) { struct nlattr *nest_parms; spin_lock_bh(&ct->lock); nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) || nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE, ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) || nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, cpu_to_be64(ct->proto.dccp.handshake_seq))) goto nla_put_failure; nla_nest_end(skb, nest_parms); spin_unlock_bh(&ct->lock); return 0; nla_put_failure: spin_unlock_bh(&ct->lock); return -1; }
static inline int ctnetlink_dump_tuples_proto(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l4proto *l4proto) { int ret = 0; struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); if (likely(l4proto->tuple_to_nlattr)) ret = l4proto->tuple_to_nlattr(skb, tuple); nla_nest_end(skb, nest_parms); return ret; nla_put_failure: return -1; }
static int dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, htonl(natseq->correction_pos)); NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, htonl(natseq->offset_before)); NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, htonl(natseq->offset_after)); nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; }
static int ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir) { enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; struct nlattr *nest_count; nest_count = nla_nest_start(skb, type | NLA_F_NESTED); if (!nest_count) goto nla_put_failure; NLA_PUT_BE32(skb, CTA_COUNTERS32_PACKETS, htonl(ct->counters[dir].packets)); NLA_PUT_BE32(skb, CTA_COUNTERS32_BYTES, htonl(ct->counters[dir].bytes)); nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; }
static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) { struct fq_codel_sched_data *q = qdisc_priv(sch); struct nlattr *opts; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, codel_time_to_us(q->cparams.target)) || nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, sch->limit) || nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, codel_time_to_us(q->cparams.interval)) || nla_put_u32(skb, TCA_FQ_CODEL_ECN, q->cparams.ecn) || nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, q->quantum) || nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, q->drop_batch_size) || nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, q->memory_limit) || nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, q->flows_cnt)) goto nla_put_failure; if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, codel_time_to_us(q->cparams.ce_threshold))) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: return -1; }
static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct) { struct nlattr *nest_parms; struct nf_ct_tcp_flags tmp = {}; spin_lock_bh(&ct->lock); nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED); if (!nest_parms) goto nla_put_failure; if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) || nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, ct->proto.tcp.seen[0].td_scale) || nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, ct->proto.tcp.seen[1].td_scale)) goto nla_put_failure; tmp.flags = ct->proto.tcp.seen[0].flags; if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, sizeof(struct nf_ct_tcp_flags), &tmp)) goto nla_put_failure; tmp.flags = ct->proto.tcp.seen[1].flags; if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, sizeof(struct nf_ct_tcp_flags), &tmp)) goto nla_put_failure; spin_unlock_bh(&ct->lock); nla_nest_end(skb, nest_parms); return 0; nla_put_failure: spin_unlock_bh(&ct->lock); return -1; }
static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a) { struct hlist_head *head; struct tcf_common *p; int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; struct nlattr *nest; spin_lock_bh(&hinfo->lock); s_i = cb->args[0]; for (i = 0; i < (hinfo->hmask + 1); i++) { head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; hlist_for_each_entry_rcu(p, head, tcfc_head) { index++; if (index < s_i) continue; a->priv = p; a->order = n_i; nest = nla_nest_start(skb, a->order); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; nlmsg_trim(skb, nest); goto done; } nla_nest_end(skb, nest); n_i++; if (n_i >= TCA_ACT_MAX_PRIO) goto done; } }
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tbf_sched_data *q = qdisc_priv(sch); struct nlattr *nest; struct tc_tbf_qopt opt; sch->qstats.backlog = q->qdisc->qstats.backlog; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; opt.limit = q->limit; psched_ratecfg_getrate(&opt.rate, &q->rate); if (tbf_peak_present(q)) psched_ratecfg_getrate(&opt.peakrate, &q->peak); else memset(&opt.peakrate, 0, sizeof(opt.peakrate)); opt.mtu = PSCHED_NS2TICKS(q->mtu); opt.buffer = PSCHED_NS2TICKS(q->buffer); if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if (q->rate.rate_bytes_ps >= (1ULL << 32) && nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, TCA_TBF_PAD)) goto nla_put_failure; if (tbf_peak_present(q) && q->peak.rate_bytes_ps >= (1ULL << 32) && nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, TCA_TBF_PAD)) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -1; }
static int plt_power_mode(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv) { struct nlattr *key; unsigned int pmode; if (argc != 1) { fprintf(stderr, "%s> Missing arguments\n", __func__); return 2; } if (strcmp(argv[0], "on") == 0) pmode = 1; else if (strcmp(argv[0], "off") == 0) pmode = 0; else { fprintf(stderr, "%s> Invalid parameter\n", __func__); return 2; } key = nla_nest_start(msg, NL80211_ATTR_TESTDATA); if (!key) { fprintf(stderr, "%s> fail to nla_nest_start()\n", __func__); return 1; } NLA_PUT_U32(msg, WL1271_TM_ATTR_CMD_ID, WL1271_TM_CMD_SET_PLT_MODE); NLA_PUT_U32(msg, WL1271_TM_ATTR_PLT_MODE, pmode); nla_nest_end(msg, key); return 0; nla_put_failure: fprintf(stderr, "%s> building message failed\n", __func__); return 2; }
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(vport->dev); __be16 dst_port = vxlan->cfg.dst_port; if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port))) return -EMSGSIZE; if (vxlan->flags & VXLAN_F_GBP) { struct nlattr *exts; exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION); if (!exts) return -EMSGSIZE; if (vxlan->flags & VXLAN_F_GBP && nla_put_flag(skb, OVS_VXLAN_EXT_GBP)) return -EMSGSIZE; nla_nest_end(skb, exts); } return 0; }
static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { u32 val; struct nlattr *net; val = ntohl(*(__be32 *)TLV_DATA(msg->req)); net = nla_nest_start(skb, TIPC_NLA_NET); if (!net) return -EMSGSIZE; if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) { if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val)) return -EMSGSIZE; } else if (msg->cmd == TIPC_CMD_SET_NETID) { if (nla_put_u32(skb, TIPC_NLA_NET_ID, val)) return -EMSGSIZE; } nla_nest_end(skb, net); return 0; }
static int ncsi_write_channel_info(struct sk_buff *skb, struct ncsi_dev_priv *ndp, struct ncsi_channel *nc) { struct ncsi_channel_vlan_filter *ncf; struct ncsi_channel_mode *m; struct nlattr *vid_nest; int i; nla_put_u32(skb, NCSI_CHANNEL_ATTR_ID, nc->id); m = &nc->modes[NCSI_MODE_LINK]; nla_put_u32(skb, NCSI_CHANNEL_ATTR_LINK_STATE, m->data[2]); if (nc->state == NCSI_CHANNEL_ACTIVE) nla_put_flag(skb, NCSI_CHANNEL_ATTR_ACTIVE); if (nc == nc->package->preferred_channel) nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED); nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version); nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MINOR, nc->version.alpha2); nla_put_string(skb, NCSI_CHANNEL_ATTR_VERSION_STR, nc->version.fw_name); vid_nest = nla_nest_start(skb, NCSI_CHANNEL_ATTR_VLAN_LIST); if (!vid_nest) return -ENOMEM; ncf = &nc->vlan_filter; i = -1; while ((i = find_next_bit((void *)&ncf->bitmap, ncf->n_vids, i + 1)) < ncf->n_vids) { if (ncf->vids[i]) nla_put_u16(skb, NCSI_CHANNEL_ATTR_VLAN_ID, ncf->vids[i]); } nla_nest_end(skb, vid_nest); return 0; }
static int handle_fetch_nvs(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv) { char *end; void *map = MAP_FAILED; int fd, retval = 0; struct nlattr *key; struct stat filestat; if (argc != 0) return 1; key = nla_nest_start(msg, NL80211_ATTR_TESTDATA); if (!key) goto nla_put_failure; NLA_PUT_U32(msg, WL1271_TM_ATTR_CMD_ID, WL1271_TM_CMD_NVS_PUSH); NLA_PUT_U32(msg, WL1271_TM_ATTR_IE_ID, WL1271_TM_CMD_NVS_PUSH); nla_nest_end(msg, key); goto cleanup; nla_put_failure: retval = -ENOBUFS; cleanup: if (map != MAP_FAILED) munmap(map, filestat.st_size); close(fd); return retval; }
static int put_char_array(struct sk_buff *skb, const char* const* chars, int type, int nested_type) { struct nlattr* nest_attr; int ret = 0; if ( chars ) { int i=0; nest_attr = nla_nest_start(skb, type); if ( !nest_attr ) { ret = -EMSGSIZE; goto failure; } while ( chars[i] ) { // printk("Putting arg\n"); ret = nla_put_string(skb, nested_type, chars[i]); if (ret != 0) goto failure; i++; } // printk("Putting length: %d\n", i); ret = nla_put_u32(skb, DIRECTOR_A_LENGTH, i); // Count of elements if (ret != 0) goto failure; nla_nest_end(skb, nest_attr); } return ret; failure: minfo(ERR1, "Putting of char array has failed: %d", ret); return ret; }
/** * virNetDevMacVLanCreate: * * @ifname: The name the interface is supposed to have; optional parameter * @type: The type of device, i.e., "macvtap", "macvlan" * @macaddress: The MAC address of the device * @srcdev: The name of the 'link' device * @macvlan_mode: The macvlan mode to use * @retry: Pointer to integer that will be '1' upon return if an interface * with the same name already exists and it is worth to try * again with a different name * * Create a macvtap device with the given properties. * * Returns 0 on success, -1 on fatal error. */ int virNetDevMacVLanCreate(const char *ifname, const char *type, const unsigned char *macaddress, const char *srcdev, uint32_t macvlan_mode, int *retry) { int rc = -1; struct nlmsghdr *resp; struct nlmsgerr *err; struct ifinfomsg ifinfo = { .ifi_family = AF_UNSPEC }; int ifindex; unsigned char *recvbuf = NULL; unsigned int recvbuflen; struct nl_msg *nl_msg; struct nlattr *linkinfo, *info_data; if (virNetDevGetIndex(srcdev, &ifindex) < 0) return -1; *retry = 0; nl_msg = nlmsg_alloc_simple(RTM_NEWLINK, NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL); if (!nl_msg) { virReportOOMError(); return -1; } if (nlmsg_append(nl_msg, &ifinfo, sizeof(ifinfo), NLMSG_ALIGNTO) < 0) goto buffer_too_small; if (nla_put_u32(nl_msg, IFLA_LINK, ifindex) < 0) goto buffer_too_small; if (nla_put(nl_msg, IFLA_ADDRESS, VIR_MAC_BUFLEN, macaddress) < 0) goto buffer_too_small; if (ifname && nla_put(nl_msg, IFLA_IFNAME, strlen(ifname)+1, ifname) < 0) goto buffer_too_small; if (!(linkinfo = nla_nest_start(nl_msg, IFLA_LINKINFO))) goto buffer_too_small; if (nla_put(nl_msg, IFLA_INFO_KIND, strlen(type), type) < 0) goto buffer_too_small; if (macvlan_mode > 0) { if (!(info_data = nla_nest_start(nl_msg, IFLA_INFO_DATA))) goto buffer_too_small; if (nla_put(nl_msg, IFLA_MACVLAN_MODE, sizeof(macvlan_mode), &macvlan_mode) < 0) goto buffer_too_small; nla_nest_end(nl_msg, info_data); } nla_nest_end(nl_msg, linkinfo); if (virNetlinkCommand(nl_msg, &recvbuf, &recvbuflen, 0) < 0) { goto cleanup; } if (recvbuflen < NLMSG_LENGTH(0) || recvbuf == NULL) goto malformed_resp; resp = (struct nlmsghdr *)recvbuf; switch (resp->nlmsg_type) { case NLMSG_ERROR: err = (struct nlmsgerr *)NLMSG_DATA(resp); if (resp->nlmsg_len < NLMSG_LENGTH(sizeof(*err))) goto malformed_resp; switch (err->error) { case 0: break; case -EEXIST: *retry = 1; goto cleanup; default: virReportSystemError(-err->error, _("error creating %s type of interface"), type); goto cleanup; } break; case NLMSG_DONE: break; default: goto malformed_resp; } rc = 0; cleanup: nlmsg_free(nl_msg); VIR_FREE(recvbuf); return rc; malformed_resp: virNetDevError(VIR_ERR_INTERNAL_ERROR, "%s", _("malformed netlink response message")); goto cleanup; buffer_too_small: virNetDevError(VIR_ERR_INTERNAL_ERROR, "%s", _("allocated netlink buffer is too small")); goto cleanup; }
static int handle_wowlan_enable(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *wowlan, *pattern; struct nl_msg *patterns = NULL; enum { PS_REG, PS_PAT, } parse_state = PS_REG; int err = -ENOBUFS; unsigned char *pat, *mask; size_t patlen; int patnum = 0; wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!wowlan) return -ENOBUFS; while (argc) { switch (parse_state) { case PS_REG: if (strcmp(argv[0], "any") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); else if (strcmp(argv[0], "disconnect") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); else if (strcmp(argv[0], "magic-packet") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); else if (strcmp(argv[0], "gtk-rekey-failure") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); else if (strcmp(argv[0], "eap-identity-request") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); else if (strcmp(argv[0], "4way-handshake") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); else if (strcmp(argv[0], "rfkill-release") == 0) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); else if (strcmp(argv[0], "patterns") == 0) { parse_state = PS_PAT; patterns = nlmsg_alloc(); if (!patterns) { err = -ENOMEM; goto nla_put_failure; } } else { err = 1; goto nla_put_failure; } break; case PS_PAT: if (parse_hex_mask(argv[0], &pat, &patlen, &mask)) { err = 1; goto nla_put_failure; } pattern = nla_nest_start(patterns, ++patnum); NLA_PUT(patterns, NL80211_WOWLAN_PKTPAT_MASK, DIV_ROUND_UP(patlen, 8), mask); NLA_PUT(patterns, NL80211_WOWLAN_PKTPAT_PATTERN, patlen, pat); nla_nest_end(patterns, pattern); free(mask); free(pat); break; } argv++; argc--; } if (patterns) nla_put_nested(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, patterns); nla_nest_end(msg, wowlan); err = 0; nla_put_failure: nlmsg_free(patterns); return err; }
static int vlan_put_attrs(struct nl_msg *msg, struct rtnl_link *link) { struct vlan_info *vi = link->l_info; struct nlattr *data; if (!(data = nla_nest_start(msg, IFLA_INFO_DATA))) return nl_errno(ENOBUFS); if (vi->vi_mask & VLAN_HAS_ID) NLA_PUT_U16(msg, IFLA_VLAN_ID, vi->vi_vlan_id); if (vi->vi_mask & VLAN_HAS_FLAGS) { struct ifla_vlan_flags flags = { .flags = vi->vi_flags, .mask = vi->vi_flags_mask, }; NLA_PUT(msg, IFLA_VLAN_FLAGS, sizeof(flags), &flags); } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_INGRESS_QOS))) goto nla_put_failure; for (i = 0; i <= VLAN_PRIO_MAX; i++) { if (vi->vi_ingress_qos[i]) { map.from = i; map.to = vi->vi_ingress_qos[i]; NLA_PUT(msg, i, sizeof(map), &map); } } nla_nest_end(msg, qos); } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { struct ifla_vlan_qos_mapping map; struct nlattr *qos; int i; if (!(qos = nla_nest_start(msg, IFLA_VLAN_EGRESS_QOS))) goto nla_put_failure; for (i = 0; i < vi->vi_negress; i++) { map.from = vi->vi_egress_qos[i].vm_from; map.to = vi->vi_egress_qos[i].vm_to; NLA_PUT(msg, i, sizeof(map), &map); } nla_nest_end(msg, qos); } nla_nest_end(msg, data); nla_put_failure: return 0; } static struct rtnl_link_info_ops vlan_info_ops = { .io_name = "vlan", .io_alloc = vlan_alloc, .io_parse = vlan_parse, .io_dump[NL_DUMP_BRIEF] = vlan_dump_brief, .io_dump[NL_DUMP_FULL] = vlan_dump_full, .io_clone = vlan_clone, .io_put_attrs = vlan_put_attrs, .io_free = vlan_free, }; int rtnl_link_vlan_set_id(struct rtnl_link *link, int id) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_vlan_id = id; vi->vi_mask |= VLAN_HAS_ID; return 0; } int rtnl_link_vlan_get_id(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (vi->vi_mask & VLAN_HAS_ID) return vi->vi_vlan_id; else return 0; } int rtnl_link_vlan_set_flags(struct rtnl_link *link, unsigned int flags) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_flags_mask |= flags; vi->vi_flags |= flags; vi->vi_mask |= VLAN_HAS_FLAGS; return 0; } int rtnl_link_vlan_unset_flags(struct rtnl_link *link, unsigned int flags) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); vi->vi_flags_mask |= flags; vi->vi_flags &= ~flags; vi->vi_mask |= VLAN_HAS_FLAGS; return 0; } unsigned int rtnl_link_vlan_get_flags(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); return vi->vi_flags; } int rtnl_link_vlan_set_ingress_map(struct rtnl_link *link, int from, uint32_t to) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (from < 0 || from > VLAN_PRIO_MAX) return nl_error(EINVAL, "Invalid vlan prio 0..%d", VLAN_PRIO_MAX); vi->vi_ingress_qos[from] = to; vi->vi_mask |= VLAN_HAS_INGRESS_QOS; return 0; } uint32_t *rtnl_link_vlan_get_ingress_map(struct rtnl_link *link) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) { nl_error(EOPNOTSUPP, "Not a VLAN link"); return NULL; } if (vi->vi_mask & VLAN_HAS_INGRESS_QOS) return vi->vi_ingress_qos; else return NULL; } int rtnl_link_vlan_set_egress_map(struct rtnl_link *link, uint32_t from, int to) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) return nl_error(EOPNOTSUPP, "Not a VLAN link"); if (to < 0 || to > VLAN_PRIO_MAX) return nl_error(EINVAL, "Invalid vlan prio 0..%d", VLAN_PRIO_MAX); if (vi->vi_negress >= vi->vi_egress_size) { int new_size = vi->vi_egress_size + 32; void *ptr; ptr = realloc(vi->vi_egress_qos, new_size); if (!ptr) return nl_errno(ENOMEM); vi->vi_egress_qos = ptr; vi->vi_egress_size = new_size; } vi->vi_egress_qos[vi->vi_negress].vm_from = from; vi->vi_egress_qos[vi->vi_negress].vm_to = to; vi->vi_negress++; vi->vi_mask |= VLAN_HAS_EGRESS_QOS; return 0; } struct vlan_map *rtnl_link_vlan_get_egress_map(struct rtnl_link *link, int *negress) { struct vlan_info *vi = link->l_info; if (link->l_info_ops != &vlan_info_ops || !link->l_info_ops) { nl_error(EOPNOTSUPP, "Not a VLAN link"); return NULL; } if (negress == NULL) { nl_error(EINVAL, "Require pointer to store negress"); return NULL; } if (vi->vi_mask & VLAN_HAS_EGRESS_QOS) { *negress = vi->vi_negress; return vi->vi_egress_qos; } else { *negress = 0; return NULL; } } static void __init vlan_init(void) { rtnl_link_register_info(&vlan_info_ops); } static void __exit vlan_exit(void) { rtnl_link_unregister_info(&vlan_info_ops); }
static int handle_bitrates(struct nl80211_state *state, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *nl_rates, *nl_band; int i; bool have_legacy_24 = false, have_legacy_5 = false; uint8_t legacy_24[32], legacy_5[32]; int n_legacy_24 = 0, n_legacy_5 = 0; uint8_t *legacy = NULL; int *n_legacy = NULL; bool have_ht_mcs_24 = false, have_ht_mcs_5 = false; bool have_vht_mcs_24 = false, have_vht_mcs_5 = false; uint8_t ht_mcs_24[77], ht_mcs_5[77]; int n_ht_mcs_24 = 0, n_ht_mcs_5 = 0; struct nl80211_txrate_vht txrate_vht_24 = {}; struct nl80211_txrate_vht txrate_vht_5 = {}; uint8_t *mcs = NULL; int *n_mcs = NULL; char *vht_argv_5[VHT_ARGC_MAX] = {}; char *vht_argv_24[VHT_ARGC_MAX] = {}; char **vht_argv = NULL; int vht_argc_5 = 0; int vht_argc_24 = 0; int *vht_argc = NULL; int sgi_24 = 0, sgi_5 = 0, lgi_24 = 0, lgi_5 = 0; enum { S_NONE, S_LEGACY, S_HT, S_VHT, S_GI, } parser_state = S_NONE; for (i = 0; i < argc; i++) { char *end; double tmpd; long tmpl; if (strcmp(argv[i], "legacy-2.4") == 0) { if (have_legacy_24) return 1; parser_state = S_LEGACY; legacy = legacy_24; n_legacy = &n_legacy_24; have_legacy_24 = true; } else if (strcmp(argv[i], "legacy-5") == 0) { if (have_legacy_5) return 1; parser_state = S_LEGACY; legacy = legacy_5; n_legacy = &n_legacy_5; have_legacy_5 = true; } else if (strcmp(argv[i], "ht-mcs-2.4") == 0) { if (have_ht_mcs_24) return 1; parser_state = S_HT; mcs = ht_mcs_24; n_mcs = &n_ht_mcs_24; have_ht_mcs_24 = true; } else if (strcmp(argv[i], "ht-mcs-5") == 0) { if (have_ht_mcs_5) return 1; parser_state = S_HT; mcs = ht_mcs_5; n_mcs = &n_ht_mcs_5; have_ht_mcs_5 = true; } else if (strcmp(argv[i], "vht-mcs-2.4") == 0) { if (have_vht_mcs_24) return 1; parser_state = S_VHT; vht_argv = vht_argv_24; vht_argc = &vht_argc_24; have_vht_mcs_24 = true; } else if (strcmp(argv[i], "vht-mcs-5") == 0) { if (have_vht_mcs_5) return 1; parser_state = S_VHT; vht_argv = vht_argv_5; vht_argc = &vht_argc_5; have_vht_mcs_5 = true; } else if (strcmp(argv[i], "sgi-2.4") == 0) { sgi_24 = 1; parser_state = S_GI; } else if (strcmp(argv[i], "sgi-5") == 0) { sgi_5 = 1; parser_state = S_GI; } else if (strcmp(argv[i], "lgi-2.4") == 0) { lgi_24 = 1; parser_state = S_GI; } else if (strcmp(argv[i], "lgi-5") == 0) { lgi_5 = 1; parser_state = S_GI; } else switch (parser_state) { case S_LEGACY: tmpd = strtod(argv[i], &end); if (*end != '\0') return 1; if (tmpd < 1 || tmpd > 255 * 2) return 1; legacy[(*n_legacy)++] = tmpd * 2; break; case S_HT: tmpl = strtol(argv[i], &end, 0); if (*end != '\0') return 1; if (tmpl < 0 || tmpl > 255) return 1; mcs[(*n_mcs)++] = tmpl; break; case S_VHT: if (*vht_argc >= VHT_ARGC_MAX) return 1; vht_argv[(*vht_argc)++] = argv[i]; break; case S_GI: break; default: return 1; } } if (have_vht_mcs_24) if(!setup_vht(&txrate_vht_24, vht_argc_24, vht_argv_24)) return -EINVAL; if (have_vht_mcs_5) if(!setup_vht(&txrate_vht_5, vht_argc_5, vht_argv_5)) return -EINVAL; if (sgi_5 && lgi_5) return 1; if (sgi_24 && lgi_24) return 1; nl_rates = nla_nest_start(msg, NL80211_ATTR_TX_RATES); if (!nl_rates) goto nla_put_failure; if (have_legacy_24 || have_ht_mcs_24 || have_vht_mcs_24 || sgi_24 || lgi_24) { nl_band = nla_nest_start(msg, NL80211_BAND_2GHZ); if (!nl_band) goto nla_put_failure; if (have_legacy_24) nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_24, legacy_24); if (have_ht_mcs_24) nla_put(msg, NL80211_TXRATE_HT, n_ht_mcs_24, ht_mcs_24); if (have_vht_mcs_24) nla_put(msg, NL80211_TXRATE_VHT, sizeof(txrate_vht_24), &txrate_vht_24); if (sgi_24) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_SGI); if (lgi_24) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_LGI); nla_nest_end(msg, nl_band); } if (have_legacy_5 || have_ht_mcs_5 || have_vht_mcs_5 || sgi_5 || lgi_5) { nl_band = nla_nest_start(msg, NL80211_BAND_5GHZ); if (!nl_band) goto nla_put_failure; if (have_legacy_5) nla_put(msg, NL80211_TXRATE_LEGACY, n_legacy_5, legacy_5); if (have_ht_mcs_5) nla_put(msg, NL80211_TXRATE_HT, n_ht_mcs_5, ht_mcs_5); if (have_vht_mcs_5) nla_put(msg, NL80211_TXRATE_VHT, sizeof(txrate_vht_5), &txrate_vht_5); if (sgi_5) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_SGI); if (lgi_5) nla_put_u8(msg, NL80211_TXRATE_GI, NL80211_TXRATE_FORCE_LGI); nla_nest_end(msg, nl_band); } nla_nest_end(msg, nl_rates); return 0; nla_put_failure: return -ENOBUFS; }
static int fill_ematch_sequence(struct nl_msg *msg, struct nl_list_head *list) { struct rtnl_ematch *e; nl_list_for_each_entry(e, list, e_list) { struct tcf_ematch_hdr match = { .matchid = e->e_id, .kind = e->e_kind, .flags = e->e_flags, }; struct nlattr *attr; int err = 0; if (!(attr = nla_nest_start(msg, e->e_index + 1))) return -NLE_NOMEM; if (nlmsg_append(msg, &match, sizeof(match), 0) < 0) return -NLE_NOMEM; if (e->e_ops->eo_fill) err = e->e_ops->eo_fill(e, msg); else if (e->e_flags & TCF_EM_SIMPLE) err = nlmsg_append(msg, e->e_data, 4, 0); else if (e->e_datalen > 0) err = nlmsg_append(msg, e->e_data, e->e_datalen, 0); NL_DBG(3, "msg %p: added ematch [%d] id=%d kind=%d flags=%d\n", msg, e->e_index, match.matchid, match.kind, match.flags); if (err < 0) return -NLE_NOMEM; nla_nest_end(msg, attr); } nl_list_for_each_entry(e, list, e_list) { if (e->e_kind == TCF_EM_CONTAINER && fill_ematch_sequence(msg, &e->e_childs) < 0) return -NLE_NOMEM; } return 0; } int rtnl_ematch_fill_attr(struct nl_msg *msg, int attrid, struct rtnl_ematch_tree *tree) { struct tcf_ematch_tree_hdr thdr = { .progid = tree->et_progid, }; struct nlattr *list, *topattr; int err, index = 0; /* Assign index number to each ematch to allow for references * to be made while constructing the sequence of matches. */ err = update_container_index(&tree->et_list, &index); if (err < 0) return err; if (!(topattr = nla_nest_start(msg, attrid))) goto nla_put_failure; thdr.nmatches = index; NLA_PUT(msg, TCA_EMATCH_TREE_HDR, sizeof(thdr), &thdr); if (!(list = nla_nest_start(msg, TCA_EMATCH_TREE_LIST))) goto nla_put_failure; if (fill_ematch_sequence(msg, &tree->et_list) < 0) goto nla_put_failure; nla_nest_end(msg, list); nla_nest_end(msg, topattr); return 0; nla_put_failure: return -NLE_NOMEM; } /** @} */ extern int ematch_parse(void *, char **, struct nl_list_head *); int rtnl_ematch_parse_expr(const char *expr, char **errp, struct rtnl_ematch_tree **result) { struct rtnl_ematch_tree *tree; YY_BUFFER_STATE buf = NULL; yyscan_t scanner = NULL; int err; NL_DBG(2, "Parsing ematch expression \"%s\"\n", expr); if (!(tree = rtnl_ematch_tree_alloc(RTNL_EMATCH_PROGID))) return -NLE_FAILURE; if ((err = ematch_lex_init(&scanner)) < 0) { err = -NLE_FAILURE; goto errout; } buf = ematch__scan_string(expr, scanner); if ((err = ematch_parse(scanner, errp, &tree->et_list)) != 0) { ematch__delete_buffer(buf, scanner); err = -NLE_PARSE_ERR; goto errout; } ematch_lex_destroy(scanner); *result = tree; return 0; errout: if (scanner) ematch_lex_destroy(scanner); rtnl_ematch_tree_free(tree); return err; } static const char *layer_txt[] = { [TCF_LAYER_LINK] = "eth", [TCF_LAYER_NETWORK] = "ip", [TCF_LAYER_TRANSPORT] = "tcp", }; char *rtnl_ematch_offset2txt(uint8_t layer, uint16_t offset, char *buf, size_t len) { snprintf(buf, len, "%s+%u", (layer <= TCF_LAYER_MAX) ? layer_txt[layer] : "?", offset); return buf; } static const char *operand_txt[] = { [TCF_EM_OPND_EQ] = "=", [TCF_EM_OPND_LT] = "<", [TCF_EM_OPND_GT] = ">", }; char *rtnl_ematch_opnd2txt(uint8_t opnd, char *buf, size_t len) { snprintf(buf, len, "%s", opnd < ARRAY_SIZE(operand_txt) ? operand_txt[opnd] : "?"); return buf; }
static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; struct tc_action_ops *a_o; struct tc_action a; int ret = 0; struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); struct nlattr *kind = find_dump_kind(cb->nlh); if (kind == NULL) { pr_info("tc_dump_action: action bad kind\n"); return 0; } a_o = tc_lookup_action(kind); if (a_o == NULL) return 0; memset(&a, 0, sizeof(struct tc_action)); a.ops = a_o; if (a_o->walk == NULL) { WARN(1, "tc_dump_action: %s !capable of dumping table\n", a_o->kind); goto out_module_put; } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_module_put; ret = a_o->walk(skb, cb, RTM_GETACTION, &a); if (ret < 0) goto out_module_put; if (ret > 0) { nla_nest_end(skb, nest); ret = skb->len; } else nla_nest_cancel(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).portid && ret) nlh->nlmsg_flags |= NLM_F_MULTI; module_put(a_o->owner); return skb->len; out_module_put: module_put(a_o->owner); nlmsg_trim(skb, b); return skb->len; }
static int tca_action_flush(struct net *net, struct nlattr *nla, struct nlmsghdr *n, u32 portid) { struct sk_buff *skb; unsigned char *b; struct nlmsghdr *nlh; struct tcamsg *t; struct netlink_callback dcb; struct nlattr *nest; struct nlattr *tb[TCA_ACT_MAX + 1]; struct nlattr *kind; struct tc_action *a = create_a(0); int err = -ENOMEM; if (a == NULL) { pr_debug("tca_action_flush: couldnt create tc_action\n"); return err; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { pr_debug("tca_action_flush: failed skb alloc\n"); kfree(a); return err; } b = skb_tail_pointer(skb); err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); if (err < 0) goto err_out; err = -EINVAL; kind = tb[TCA_ACT_KIND]; a->ops = tc_lookup_action(kind); if (a->ops == NULL) goto err_out; nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) goto out_module_put; err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); if (err < 0) goto out_module_put; if (err == 0) goto noflush_out; nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; nlh->nlmsg_flags |= NLM_F_ROOT; module_put(a->ops->owner); kfree(a); err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (err > 0) return 0; return err; out_module_put: module_put(a->ops->owner); err_out: noflush_out: kfree_skb(skb); kfree(a); return err; }
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); choke_free(q->tab); } static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long choke_get(struct Qdisc *sch, u32 classid) { return 0; } static void choke_put(struct Qdisc *q, unsigned long cl) { } static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl) { struct choke_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static int choke_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg) { if (!arg->stop) { if (arg->fn(sch, 1, arg) < 0) { arg->stop = 1; return; } arg->count++; } } static const struct Qdisc_class_ops choke_class_ops = { .leaf = choke_leaf, .get = choke_get, .put = choke_put, .tcf_chain = choke_find_tcf, .bind_tcf = choke_bind, .unbind_tcf = choke_put, .dump = choke_dump_class, .walk = choke_walk, }; static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .drop = choke_drop, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
/* * Create one netlink message for one interface * Contains port and master info as well as carrier and bridge state. */ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *port, u32 pid, u32 seq, int event, unsigned int flags, u32 filter_mask, const struct net_device *dev) { const struct net_bridge *br; struct ifinfomsg *hdr; struct nlmsghdr *nlh; u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; if (port) br = port->br; else br = netdev_priv(dev); br_debug(br, "br_fill_info event %d port %s master %s\n", event, dev->name, br->dev->name); nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags); if (nlh == NULL) return -EMSGSIZE; hdr = nlmsg_data(nlh); hdr->ifi_family = AF_BRIDGE; hdr->__ifi_pad = 0; hdr->ifi_type = dev->type; hdr->ifi_index = dev->ifindex; hdr->ifi_flags = dev_get_flags(dev); hdr->ifi_change = 0; if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u8(skb, IFLA_OPERSTATE, operstate) || (dev->addr_len && nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink))) goto nla_put_failure; if (event == RTM_NEWLINK && port) { struct nlattr *nest = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED); if (nest == NULL || br_port_fill_attrs(skb, port) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } /* Check if the VID information is requested */ if (filter_mask & RTEXT_FILTER_BRVLAN) { struct nlattr *af; const struct net_port_vlans *pv; struct bridge_vlan_info vinfo; u16 vid; u16 pvid; if (port) pv = nbp_get_vlan_info(port); else pv = br_get_vlan_info(br); if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) goto done; af = nla_nest_start(skb, IFLA_AF_SPEC); if (!af) goto nla_put_failure; pvid = br_get_pvid(pv); for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN); vid < BR_VLAN_BITMAP_LEN; vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) { vinfo.vid = vid; vinfo.flags = 0; if (vid == pvid) vinfo.flags |= BRIDGE_VLAN_INFO_PVID; if (test_bit(vid, pv->untagged_bitmap)) vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED; if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO, sizeof(vinfo), &vinfo)) goto nla_put_failure; } nla_nest_end(skb, af); } done: return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }
static int nfnl_exp_build_tuple(struct nl_msg *msg, const struct nfnl_exp *exp, int cta) { struct nlattr *tuple, *ip, *proto; struct nl_addr *addr; int family; family = nfnl_exp_get_family(exp); int type = exp_get_tuple_attr(cta); if (cta == CTA_EXPECT_NAT) tuple = nla_nest_start(msg, CTA_EXPECT_NAT_TUPLE); else tuple = nla_nest_start(msg, cta); if (!tuple) goto nla_put_failure; ip = nla_nest_start(msg, CTA_TUPLE_IP); if (!ip) goto nla_put_failure; addr = nfnl_exp_get_src(exp, type); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_SRC : CTA_IP_V6_SRC, addr); addr = nfnl_exp_get_dst(exp, type); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_DST : CTA_IP_V6_DST, addr); nla_nest_end(msg, ip); proto = nla_nest_start(msg, CTA_TUPLE_PROTO); if (!proto) goto nla_put_failure; if (nfnl_exp_test_l4protonum(exp, type)) NLA_PUT_U8(msg, CTA_PROTO_NUM, nfnl_exp_get_l4protonum(exp, type)); if (nfnl_exp_test_ports(exp, type)) { NLA_PUT_U16(msg, CTA_PROTO_SRC_PORT, htons(nfnl_exp_get_src_port(exp, type))); NLA_PUT_U16(msg, CTA_PROTO_DST_PORT, htons(nfnl_exp_get_dst_port(exp, type))); } if (nfnl_exp_test_icmp(exp, type)) { NLA_PUT_U16(msg, CTA_PROTO_ICMP_ID, htons(nfnl_exp_get_icmp_id(exp, type))); NLA_PUT_U8(msg, CTA_PROTO_ICMP_TYPE, nfnl_exp_get_icmp_type(exp, type)); NLA_PUT_U8(msg, CTA_PROTO_ICMP_CODE, nfnl_exp_get_icmp_code(exp, type)); } nla_nest_end(msg, proto); nla_nest_end(msg, tuple); return 0; nla_put_failure: return -NLE_MSGSIZE; }
static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); choke_free(q->tab); } static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL");
static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct wl_priv *cfg = wiphy_priv(wiphy); gscan_results_cache_t *results, *iter; uint32 reply_len, complete = 1; int32 mem_needed, num_results_iter; wifi_gscan_result_t *ptr; uint16 num_scan_ids, num_results; struct sk_buff *skb; struct nlattr *scan_hdr, *complete_flag; err = dhd_dev_wait_batch_results_complete(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) return -EBUSY; err = dhd_dev_pno_lock_access_batch_results(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) { WL_ERR(("Can't obtain lock to access batch results %d\n", err)); return -EBUSY; } results = dhd_dev_pno_get_gscan(wl_to_prmry_ndev(cfg), DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); if (!results) { WL_ERR(("No results to send %d\n", err)); err = wl_cfgvendor_send_cmd_reply(wiphy, wl_to_prmry_ndev(cfg), results, 0); if (unlikely(err)) WL_ERR(("Vendor Command reply failed ret:%d \n", err)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return err; } num_scan_ids = reply_len & 0xFFFF; num_results = (reply_len & 0xFFFF0000) >> 16; mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { mem_needed = (int32)NLMSG_DEFAULT_SIZE; complete = 0; } WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, (int)NLMSG_DEFAULT_SIZE)); /* Alloc the SKB for vendor_event */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); if (unlikely(!skb)) { WL_ERR(("skb alloc failed")); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return -ENOMEM; } iter = results; complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, sizeof(complete)); mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); while (iter) { num_results_iter = (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); if (num_results_iter <= 0 || ((iter->tot_count - iter->tot_consumed) > num_results_iter)) break; scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); /* no more room? we are done then (for now) */ if (scan_hdr == NULL) { complete = 0; break; } nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); num_results_iter = iter->tot_count - iter->tot_consumed; nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); if (num_results_iter) { ptr = &iter->results[iter->tot_consumed]; iter->tot_consumed += num_results_iter; nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, num_results_iter * sizeof(wifi_gscan_result_t), ptr); } nla_nest_end(skb, scan_hdr); mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + (num_results_iter * sizeof(wifi_gscan_result_t)); iter = iter->next; } memcpy(nla_data(complete_flag), &complete, sizeof(complete)); dhd_dev_gscan_batch_cache_cleanup(wl_to_prmry_ndev(cfg)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return cfg80211_vendor_cmd_reply(skb); }
static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; struct sk_buff *nskb = NULL; struct sk_buff *user_skb; /* to be queued to userspace */ struct nlattr *nla; unsigned int len; int err; if (vlan_tx_tag_present(skb)) { nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb)); if (!nskb) return -ENOMEM; nskb->vlan_tci = 0; skb = nskb; } if (nla_attr_size(skb->len) > USHRT_MAX) { err = -EFBIG; goto out; } len = sizeof(struct ovs_header); len += nla_total_size(skb->len); len += nla_total_size(FLOW_BUFSIZE); if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) len += nla_total_size(8); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; goto out; } upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd); upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); ovs_flow_to_nlattrs(upcall_info->key, user_skb); nla_nest_end(user_skb, nla); if (upcall_info->userdata) nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, nla_get_u64(upcall_info->userdata)); nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); skb_copy_and_csum_dev(skb, nla_data(nla)); err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid); out: kfree_skb(nskb); return err; }
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *opts = NULL; int i; u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, .grio = gred_rio_mode(table), .flags = table->red_flags, }; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; memset(&opt, 0, sizeof(opt)); if (!q) { /* hack -- fix at some point with proper message This is how we indicate to tc that there is no VQ at this DP */ opt.DP = MAX_DPs + i; goto append_opt; } opt.limit = q->limit; opt.DP = q->DP; opt.backlog = q->backlog; opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; opt.other = q->stats.other; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; opt.packets = q->packetsin; opt.bytesin = q->bytesin; if (gred_wred_mode(table)) gred_load_wred_set(table, q); opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) goto nla_put_failure; } nla_nest_end(skb, parms); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static void gred_destroy(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; for (i = 0; i < table->DPs; i++) { if (table->tab[i]) gred_destroy_vq(table->tab[i]); } } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { .id = "gred", .priv_size = sizeof(struct gred_sched), .enqueue = gred_enqueue, .dequeue = gred_dequeue, .peek = qdisc_peek_head, .drop = gred_drop, .init = gred_init, .reset = gred_reset, .destroy = gred_destroy, .change = gred_change, .dump = gred_dump, .owner = THIS_MODULE, }; static int __init gred_module_init(void) { return register_qdisc(&gred_qdisc_ops); } static void __exit gred_module_exit(void) { unregister_qdisc(&gred_qdisc_ops); } module_init(gred_module_init) module_exit(gred_module_exit) MODULE_LICENSE("GPL");
/* Setter */ static int set_interface_meshparam(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { const struct mesh_param_descr *mdescr; struct nlattr *container; uint32_t ret; int err; container = nla_nest_start(msg, NL80211_ATTR_MESH_PARAMS); if (!container) return -ENOBUFS; if (!argc) return 1; while (argc) { const char *name; char *value; _any any; memset(&any, 0, sizeof(_any)); name = argv[0]; value = strchr(name, '='); if (value) { *value = '\0'; value++; argc--; argv++; } else { /* backward compat -- accept w/o '=' */ if (argc < 2) { printf("Must specify a value for %s.\n", name); return 2; } value = argv[1]; argc -= 2; argv += 2; } mdescr = find_mesh_param(name); if (!mdescr) return 2; /* Parse the new value */ ret = mdescr->parse_fn(value, &any); if (ret != 0) { if (mdescr->mesh_param_num == NL80211_MESHCONF_POWER_MODE) printf("%s must be set to active, light or " "deep.\n", mdescr->name); else printf("%s must be set to a number " "between 0 and %u\n", mdescr->name, ret); return 2; } err = mdescr->nla_put_fn(msg, mdescr->mesh_param_num, &any); if (err) return err; } nla_nest_end(msg, container); return err; }
static int join_mesh(struct nl80211_state *state, struct nl_cb *cb, struct nl_msg *msg, int argc, char **argv, enum id_input id) { struct nlattr *container; float rate; int bintval, dtim_period; char *end; if (argc < 1) return 1; NLA_PUT(msg, NL80211_ATTR_MESH_ID, strlen(argv[0]), argv[0]); argc--; argv++; if (argc > 1 && strcmp(argv[0], "mcast-rate") == 0) { argv++; argc--; rate = strtod(argv[0], &end); if (*end != '\0') return 1; NLA_PUT_U32(msg, NL80211_ATTR_MCAST_RATE, (int)(rate * 10)); argv++; argc--; } if (argc > 1 && strcmp(argv[0], "beacon-interval") == 0) { argc--; argv++; bintval = strtoul(argv[0], &end, 10); if (*end != '\0') return 1; NLA_PUT_U32(msg, NL80211_ATTR_BEACON_INTERVAL, bintval); argv++; argc--; } if (argc > 1 && strcmp(argv[0], "dtim-period") == 0) { argc--; argv++; dtim_period = strtoul(argv[0], &end, 10); if (*end != '\0') return 1; NLA_PUT_U32(msg, NL80211_ATTR_DTIM_PERIOD, dtim_period); argv++; argc--; } container = nla_nest_start(msg, NL80211_ATTR_MESH_SETUP); if (!container) return -ENOBUFS; if (argc > 1 && strcmp(argv[0], "vendor_sync") == 0) { argv++; argc--; if (strcmp(argv[0], "on") == 0) NLA_PUT_U8(msg, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, 1); else NLA_PUT_U8(msg, NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, 0); argv++; argc--; } /* parse and put other NL80211_ATTR_MESH_SETUP elements here */ nla_nest_end(msg, container); if (!argc) return 0; return set_interface_meshparam(state, cb, msg, argc, argv, id); nla_put_failure: return -ENOBUFS; }